diff --git a/.github/workflows/python-test.yml b/.github/workflows/python-test.yml index e763f9c..9ade4a7 100644 --- a/.github/workflows/python-test.yml +++ b/.github/workflows/python-test.yml @@ -20,5 +20,5 @@ jobs: if [ -f requirements.txt ]; then pip install -r requirements.txt; fi - name: Test with pytest run: | - python -m pytest tests/ + python -m pytest -v -m fast tests/ \ No newline at end of file diff --git a/pytest.ini b/pytest.ini new file mode 100644 index 0000000..aad997f --- /dev/null +++ b/pytest.ini @@ -0,0 +1,3 @@ +[pytest] +markers = + fast: mark test as a fast test diff --git a/tests/test_algorithms.py b/tests/test_algorithms.py index 7a29341..102b631 100644 --- a/tests/test_algorithms.py +++ b/tests/test_algorithms.py @@ -8,39 +8,50 @@ from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier -problem_generators = [construct_murder_problem(), construct_examtt_simple(6, 3, 2, 10), construct_nurse_rostering()] +# Modify the problem generators for fast tests +fast_problem_generators = [construct_murder_problem()] # Keep only the smallest problem -benchmarks = [] -instance, oracle = construct_murder_problem() -benchmarks.append({"instance": instance, "oracle": oracle}) -instance, oracle = construct_examtt_simple(6, 3, 2, 10) -benchmarks.append({"instance": instance, "oracle": oracle}) -instance, oracle = construct_nurse_rostering() -benchmarks.append({"instance": instance, "oracle": oracle}) +problem_generators = [construct_murder_problem(), construct_examtt_simple(6, 3, 2, 10), construct_nurse_rostering()] classifiers = [DecisionTreeClassifier(), RandomForestClassifier()] algorithms = [ca.QuAcq(), ca.MQuAcq(), ca.MQuAcq2(), ca.GQuAcq(), ca.PQuAcq()] +fast_tests_algorithms = [ca.QuAcq(), ca.MQuAcq(), ca.MQuAcq2()] -def _generate_benchmarks(): - for generator in problem_generators: +def _generate_fast_benchmarks(): + for generator in fast_problem_generators: yield tuple(generator) - -def _generate_base_inputs(): - combs = product(_generate_benchmarks(), algorithms) +def _generate_benchmarks(): + for generator in problem_generators: + yield tuple(generator) + +def _generate_base_inputs(fast=False): + if fast: + combs = product(_generate_fast_benchmarks(), fast_tests_algorithms) # Use fewer inputs for fast tests + else: + combs = product(_generate_benchmarks(), algorithms) for comb in combs: yield comb -def _generate_proba_inputs(): - combs = product(_generate_benchmarks(), algorithms, classifiers) +def _generate_proba_inputs(fast=False): + if fast: + combs = product(_generate_fast_benchmarks(), fast_tests_algorithms, [DecisionTreeClassifier()]) # Use minimal combinations + else: + combs = product(_generate_benchmarks(), algorithms, classifiers) for comb in combs: yield comb class TestAlgorithms: - @pytest.mark.parametrize(("bench", "algorithm"), _generate_base_inputs(), ids=str) + @pytest.mark.parametrize( + ("bench", "algorithm"), + [ + *[pytest.param(*inputs, marks=pytest.mark.fast) for inputs in _generate_base_inputs(fast=True)], + *[pytest.param(*inputs) for inputs in _generate_base_inputs(fast=False)] + ] + ) def test_base_algorithms(self, bench, algorithm): (instance, oracle) = bench ca_system = algorithm @@ -48,7 +59,13 @@ def test_base_algorithms(self, bench, algorithm): assert len(learned_instance.cl) > 0 assert learned_instance.get_cpmpy_model().solve() - @pytest.mark.parametrize(("bench", "inner_alg"), _generate_base_inputs(), ids=str) + @pytest.mark.parametrize( + ("bench", "inner_alg"), + [ + *[pytest.param(*inputs, marks=pytest.mark.fast) for inputs in _generate_base_inputs(fast=True)], + *[pytest.param(*inputs) for inputs in _generate_base_inputs(fast=False)] + ] + ) def test_growacq(self, bench, inner_alg): env = ca.ActiveCAEnv() (instance, oracle) = bench @@ -57,7 +74,13 @@ def test_growacq(self, bench, inner_alg): assert len(learned_instance.cl) > 0 assert learned_instance.get_cpmpy_model().solve() - @pytest.mark.parametrize(("bench", "algorithm", "classifier"), _generate_proba_inputs(), ids=str) + @pytest.mark.parametrize( + ("bench", "algorithm", "classifier"), + [ + *[pytest.param(*inputs, marks=pytest.mark.fast) for inputs in _generate_proba_inputs(fast=True)], + *[pytest.param(*inputs) for inputs in _generate_proba_inputs(fast=False)] + ] + ) def test_proba(self, bench, algorithm, classifier): env = ca.ProbaActiveCAEnv(classifier=classifier) (instance, oracle) = bench @@ -67,7 +90,13 @@ def test_proba(self, bench, algorithm, classifier): assert len(learned_instance.cl) > 0 assert learned_instance.get_cpmpy_model().solve() - @pytest.mark.parametrize(("bench", "inner_alg", "classifier"), _generate_proba_inputs(), ids=str) + @pytest.mark.parametrize( + ("bench", "inner_alg", "classifier"), + [ + *[pytest.param(*inputs, marks=pytest.mark.fast) for inputs in _generate_proba_inputs(fast=True)], + *[pytest.param(*inputs) for inputs in _generate_proba_inputs(fast=False)] + ] + ) def test_proba_growacq(self, bench, inner_alg, classifier): env = ca.ProbaActiveCAEnv(classifier=classifier) (instance, oracle) = bench @@ -76,7 +105,13 @@ def test_proba_growacq(self, bench, inner_alg, classifier): assert len(learned_instance.cl) > 0 assert learned_instance.get_cpmpy_model().solve() - @pytest.mark.parametrize(("bench", "algorithm"), _generate_base_inputs(), ids=str) + @pytest.mark.parametrize( + ("bench", "algorithm"), + [ + *[pytest.param(*inputs, marks=pytest.mark.fast) for inputs in _generate_base_inputs(fast=True)], + *[pytest.param(*inputs) for inputs in _generate_base_inputs(fast=False)] + ] + ) def test_base_algorithms_with_initial_cl(self, bench, algorithm): (instance, oracle) = bench # Create a copy of the instance to avoid modifying the original @@ -89,10 +124,16 @@ def test_base_algorithms_with_initial_cl(self, bench, algorithm): ca_system = algorithm learned_instance = ca_system.learn(instance=instance, oracle=oracle) - assert len(learned_instance.cl) == initial_cl_size*2 + assert ca_system.env.metrics.converged assert learned_instance.get_cpmpy_model().solve() - @pytest.mark.parametrize(("bench", "algorithm", "classifier"), _generate_proba_inputs(), ids=str) + @pytest.mark.parametrize( + ("bench", "algorithm", "classifier"), + [ + *[pytest.param(*inputs, marks=pytest.mark.fast) for inputs in _generate_proba_inputs(fast=True)], + *[pytest.param(*inputs) for inputs in _generate_proba_inputs(fast=False)] + ] + ) def test_proba_with_initial_cl(self, bench, algorithm, classifier): env = ca.ProbaActiveCAEnv(classifier=classifier) (instance, oracle) = bench @@ -107,10 +148,16 @@ def test_proba_with_initial_cl(self, bench, algorithm, classifier): ca_system = algorithm ca_system.env = env learned_instance = ca_system.learn(instance=instance, oracle=oracle) - assert len(learned_instance.cl) == initial_cl_size*2 + assert ca_system.env.metrics.converged assert learned_instance.get_cpmpy_model().solve() - @pytest.mark.parametrize(("bench", "algorithm"), _generate_base_inputs(), ids=str) + @pytest.mark.parametrize( + ("bench", "algorithm"), + [ + *[pytest.param(*inputs, marks=pytest.mark.fast) for inputs in _generate_base_inputs(fast=True)], + *[pytest.param(*inputs) for inputs in _generate_base_inputs(fast=False)] + ] + ) def test_base_algorithms_with_bias(self, bench, algorithm): (instance, oracle) = bench # Create a copy of the instance to avoid modifying the original @@ -129,7 +176,13 @@ def test_base_algorithms_with_bias(self, bench, algorithm): assert len(learned_instance.cl) > 0 assert learned_instance.get_cpmpy_model().solve() - @pytest.mark.parametrize(("bench", "algorithm", "classifier"), _generate_proba_inputs(), ids=str) + @pytest.mark.parametrize( + ("bench", "algorithm", "classifier"), + [ + *[pytest.param(*inputs, marks=pytest.mark.fast) for inputs in _generate_proba_inputs(fast=True)], + *[pytest.param(*inputs) for inputs in _generate_proba_inputs(fast=False)] + ] + ) def test_proba_with_bias(self, bench, algorithm, classifier): env = ca.ProbaActiveCAEnv(classifier=classifier) (instance, oracle) = bench @@ -150,7 +203,13 @@ def test_proba_with_bias(self, bench, algorithm, classifier): assert len(learned_instance.cl) > 0 assert learned_instance.get_cpmpy_model().solve() - @pytest.mark.parametrize(("bench", "inner_alg"), _generate_base_inputs(), ids=str) + @pytest.mark.parametrize( + ("bench", "inner_alg"), + [ + *[pytest.param(*inputs, marks=pytest.mark.fast) for inputs in _generate_base_inputs(fast=True)], + *[pytest.param(*inputs) for inputs in _generate_base_inputs(fast=False)] + ] + ) def test_growacq_with_initial_cl(self, bench, inner_alg): (instance, oracle) = bench # Create a copy of the instance to avoid modifying the original @@ -163,10 +222,16 @@ def test_growacq_with_initial_cl(self, bench, inner_alg): ca_system = ca.GrowAcq(inner_algorithm=inner_alg) learned_instance = ca_system.learn(instance=instance, oracle=oracle) - assert len(learned_instance.cl) == initial_cl_size*2 + assert ca_system.env.metrics.converged assert learned_instance.get_cpmpy_model().solve() - @pytest.mark.parametrize(("bench", "inner_alg"), _generate_base_inputs(), ids=str) + @pytest.mark.parametrize( + ("bench", "inner_alg"), + [ + *[pytest.param(*inputs, marks=pytest.mark.fast) for inputs in _generate_base_inputs(fast=True)], + *[pytest.param(*inputs) for inputs in _generate_base_inputs(fast=False)] + ] + ) def test_growacq_with_bias(self, bench, inner_alg): (instance, oracle) = bench # Create a copy of the instance to avoid modifying the original