Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/python-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -20,5 +20,5 @@ jobs:
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
- name: Test with pytest
run: |
python -m pytest tests/
python -m pytest -v -m fast tests/

3 changes: 3 additions & 0 deletions pytest.ini
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
[pytest]
markers =
fast: mark test as a fast test
121 changes: 93 additions & 28 deletions tests/test_algorithms.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,47 +8,64 @@
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier

problem_generators = [construct_murder_problem(), construct_examtt_simple(6, 3, 2, 10), construct_nurse_rostering()]
# Modify the problem generators for fast tests
fast_problem_generators = [construct_murder_problem()] # Keep only the smallest problem

benchmarks = []
instance, oracle = construct_murder_problem()
benchmarks.append({"instance": instance, "oracle": oracle})
instance, oracle = construct_examtt_simple(6, 3, 2, 10)
benchmarks.append({"instance": instance, "oracle": oracle})
instance, oracle = construct_nurse_rostering()
benchmarks.append({"instance": instance, "oracle": oracle})
problem_generators = [construct_murder_problem(), construct_examtt_simple(6, 3, 2, 10), construct_nurse_rostering()]

classifiers = [DecisionTreeClassifier(), RandomForestClassifier()]
algorithms = [ca.QuAcq(), ca.MQuAcq(), ca.MQuAcq2(), ca.GQuAcq(), ca.PQuAcq()]
fast_tests_algorithms = [ca.QuAcq(), ca.MQuAcq(), ca.MQuAcq2()]

def _generate_benchmarks():
for generator in problem_generators:
def _generate_fast_benchmarks():
for generator in fast_problem_generators:
yield tuple(generator)


def _generate_base_inputs():
combs = product(_generate_benchmarks(), algorithms)
def _generate_benchmarks():
for generator in problem_generators:
yield tuple(generator)

def _generate_base_inputs(fast=False):
if fast:
combs = product(_generate_fast_benchmarks(), fast_tests_algorithms) # Use fewer inputs for fast tests
else:
combs = product(_generate_benchmarks(), algorithms)
for comb in combs:
yield comb


def _generate_proba_inputs():
combs = product(_generate_benchmarks(), algorithms, classifiers)
def _generate_proba_inputs(fast=False):
if fast:
combs = product(_generate_fast_benchmarks(), fast_tests_algorithms, [DecisionTreeClassifier()]) # Use minimal combinations
else:
combs = product(_generate_benchmarks(), algorithms, classifiers)
for comb in combs:
yield comb


class TestAlgorithms:

@pytest.mark.parametrize(("bench", "algorithm"), _generate_base_inputs(), ids=str)
@pytest.mark.parametrize(
("bench", "algorithm"),
[
*[pytest.param(*inputs, marks=pytest.mark.fast) for inputs in _generate_base_inputs(fast=True)],
*[pytest.param(*inputs) for inputs in _generate_base_inputs(fast=False)]
]
)
def test_base_algorithms(self, bench, algorithm):
(instance, oracle) = bench
ca_system = algorithm
learned_instance = ca_system.learn(instance=instance, oracle=oracle)
assert len(learned_instance.cl) > 0
assert learned_instance.get_cpmpy_model().solve()

@pytest.mark.parametrize(("bench", "inner_alg"), _generate_base_inputs(), ids=str)
@pytest.mark.parametrize(
("bench", "inner_alg"),
[
*[pytest.param(*inputs, marks=pytest.mark.fast) for inputs in _generate_base_inputs(fast=True)],
*[pytest.param(*inputs) for inputs in _generate_base_inputs(fast=False)]
]
)
def test_growacq(self, bench, inner_alg):
env = ca.ActiveCAEnv()
(instance, oracle) = bench
Expand All @@ -57,7 +74,13 @@ def test_growacq(self, bench, inner_alg):
assert len(learned_instance.cl) > 0
assert learned_instance.get_cpmpy_model().solve()

@pytest.mark.parametrize(("bench", "algorithm", "classifier"), _generate_proba_inputs(), ids=str)
@pytest.mark.parametrize(
("bench", "algorithm", "classifier"),
[
*[pytest.param(*inputs, marks=pytest.mark.fast) for inputs in _generate_proba_inputs(fast=True)],
*[pytest.param(*inputs) for inputs in _generate_proba_inputs(fast=False)]
]
)
def test_proba(self, bench, algorithm, classifier):
env = ca.ProbaActiveCAEnv(classifier=classifier)
(instance, oracle) = bench
Expand All @@ -67,7 +90,13 @@ def test_proba(self, bench, algorithm, classifier):
assert len(learned_instance.cl) > 0
assert learned_instance.get_cpmpy_model().solve()

@pytest.mark.parametrize(("bench", "inner_alg", "classifier"), _generate_proba_inputs(), ids=str)
@pytest.mark.parametrize(
("bench", "inner_alg", "classifier"),
[
*[pytest.param(*inputs, marks=pytest.mark.fast) for inputs in _generate_proba_inputs(fast=True)],
*[pytest.param(*inputs) for inputs in _generate_proba_inputs(fast=False)]
]
)
def test_proba_growacq(self, bench, inner_alg, classifier):
env = ca.ProbaActiveCAEnv(classifier=classifier)
(instance, oracle) = bench
Expand All @@ -76,7 +105,13 @@ def test_proba_growacq(self, bench, inner_alg, classifier):
assert len(learned_instance.cl) > 0
assert learned_instance.get_cpmpy_model().solve()

@pytest.mark.parametrize(("bench", "algorithm"), _generate_base_inputs(), ids=str)
@pytest.mark.parametrize(
("bench", "algorithm"),
[
*[pytest.param(*inputs, marks=pytest.mark.fast) for inputs in _generate_base_inputs(fast=True)],
*[pytest.param(*inputs) for inputs in _generate_base_inputs(fast=False)]
]
)
def test_base_algorithms_with_initial_cl(self, bench, algorithm):
(instance, oracle) = bench
# Create a copy of the instance to avoid modifying the original
Expand All @@ -89,10 +124,16 @@ def test_base_algorithms_with_initial_cl(self, bench, algorithm):

ca_system = algorithm
learned_instance = ca_system.learn(instance=instance, oracle=oracle)
assert len(learned_instance.cl) == initial_cl_size*2
assert ca_system.env.metrics.converged
assert learned_instance.get_cpmpy_model().solve()

@pytest.mark.parametrize(("bench", "algorithm", "classifier"), _generate_proba_inputs(), ids=str)
@pytest.mark.parametrize(
("bench", "algorithm", "classifier"),
[
*[pytest.param(*inputs, marks=pytest.mark.fast) for inputs in _generate_proba_inputs(fast=True)],
*[pytest.param(*inputs) for inputs in _generate_proba_inputs(fast=False)]
]
)
def test_proba_with_initial_cl(self, bench, algorithm, classifier):
env = ca.ProbaActiveCAEnv(classifier=classifier)
(instance, oracle) = bench
Expand All @@ -107,10 +148,16 @@ def test_proba_with_initial_cl(self, bench, algorithm, classifier):
ca_system = algorithm
ca_system.env = env
learned_instance = ca_system.learn(instance=instance, oracle=oracle)
assert len(learned_instance.cl) == initial_cl_size*2
assert ca_system.env.metrics.converged
assert learned_instance.get_cpmpy_model().solve()

@pytest.mark.parametrize(("bench", "algorithm"), _generate_base_inputs(), ids=str)
@pytest.mark.parametrize(
("bench", "algorithm"),
[
*[pytest.param(*inputs, marks=pytest.mark.fast) for inputs in _generate_base_inputs(fast=True)],
*[pytest.param(*inputs) for inputs in _generate_base_inputs(fast=False)]
]
)
def test_base_algorithms_with_bias(self, bench, algorithm):
(instance, oracle) = bench
# Create a copy of the instance to avoid modifying the original
Expand All @@ -129,7 +176,13 @@ def test_base_algorithms_with_bias(self, bench, algorithm):
assert len(learned_instance.cl) > 0
assert learned_instance.get_cpmpy_model().solve()

@pytest.mark.parametrize(("bench", "algorithm", "classifier"), _generate_proba_inputs(), ids=str)
@pytest.mark.parametrize(
("bench", "algorithm", "classifier"),
[
*[pytest.param(*inputs, marks=pytest.mark.fast) for inputs in _generate_proba_inputs(fast=True)],
*[pytest.param(*inputs) for inputs in _generate_proba_inputs(fast=False)]
]
)
def test_proba_with_bias(self, bench, algorithm, classifier):
env = ca.ProbaActiveCAEnv(classifier=classifier)
(instance, oracle) = bench
Expand All @@ -150,7 +203,13 @@ def test_proba_with_bias(self, bench, algorithm, classifier):
assert len(learned_instance.cl) > 0
assert learned_instance.get_cpmpy_model().solve()

@pytest.mark.parametrize(("bench", "inner_alg"), _generate_base_inputs(), ids=str)
@pytest.mark.parametrize(
("bench", "inner_alg"),
[
*[pytest.param(*inputs, marks=pytest.mark.fast) for inputs in _generate_base_inputs(fast=True)],
*[pytest.param(*inputs) for inputs in _generate_base_inputs(fast=False)]
]
)
def test_growacq_with_initial_cl(self, bench, inner_alg):
(instance, oracle) = bench
# Create a copy of the instance to avoid modifying the original
Expand All @@ -163,10 +222,16 @@ def test_growacq_with_initial_cl(self, bench, inner_alg):

ca_system = ca.GrowAcq(inner_algorithm=inner_alg)
learned_instance = ca_system.learn(instance=instance, oracle=oracle)
assert len(learned_instance.cl) == initial_cl_size*2
assert ca_system.env.metrics.converged
assert learned_instance.get_cpmpy_model().solve()

@pytest.mark.parametrize(("bench", "inner_alg"), _generate_base_inputs(), ids=str)
@pytest.mark.parametrize(
("bench", "inner_alg"),
[
*[pytest.param(*inputs, marks=pytest.mark.fast) for inputs in _generate_base_inputs(fast=True)],
*[pytest.param(*inputs) for inputs in _generate_base_inputs(fast=False)]
]
)
def test_growacq_with_bias(self, bench, inner_alg):
(instance, oracle) = bench
# Create a copy of the instance to avoid modifying the original
Expand Down