From 23cae9c633bfbb9ea471f6399b19a19bd0c326cc Mon Sep 17 00:00:00 2001 From: Dilan Pathirana <59329744+dilpath@users.noreply.github.com> Date: Wed, 8 Jan 2025 12:43:10 +0100 Subject: [PATCH] Deprecate `petab_yaml` properly; move pypesto test to pyPESTO (#145) --- petab_select/model_subspace.py | 11 +- pyproject.toml | 4 - test/pypesto/__init__.py | 0 test/pypesto/generate_expected_models.py | 90 ---------- test/pypesto/regenerate_model_hashes.py | 34 ---- test/pypesto/test_pypesto.py | 217 ----------------------- 6 files changed, 10 insertions(+), 346 deletions(-) delete mode 100644 test/pypesto/__init__.py delete mode 100644 test/pypesto/generate_expected_models.py delete mode 100644 test/pypesto/regenerate_model_hashes.py delete mode 100644 test/pypesto/test_pypesto.py diff --git a/petab_select/model_subspace.py b/petab_select/model_subspace.py index 92a67903..6b0b3cb9 100644 --- a/petab_select/model_subspace.py +++ b/petab_select/model_subspace.py @@ -733,7 +733,16 @@ def from_definition( The model subspace. """ model_subspace_id = definition.pop(MODEL_SUBSPACE_ID) - petab_yaml = definition.pop(MODEL_SUBSPACE_PETAB_YAML) + if "petab_yaml" in definition: + petab_yaml = definition.pop("petab_yaml") + warnings.warn( + "Change the `petab_yaml` column to " + "`model_subspace_petab_yaml`, in the model space TSV.", + DeprecationWarning, + stacklevel=1, + ) + else: + petab_yaml = definition.pop(MODEL_SUBSPACE_PETAB_YAML) parameters = { column_id: decompress_parameter_values(value) for column_id, value in definition.items() diff --git a/pyproject.toml b/pyproject.toml index 8e2f1f09..5462e7a1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,10 +34,6 @@ plot = [ test = [ "pytest >= 5.4.3", "pytest-cov >= 2.10.0", - "amici >= 0.11.25", - "fides >= 0.7.5", - "pypesto >= 0.5.4", - #"pypesto @ git+https://github.com/ICB-DCM/pyPESTO.git@select_mkstd#egg=pypesto", "tox >= 3.12.4", ] doc = [ diff --git a/test/pypesto/__init__.py b/test/pypesto/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/test/pypesto/generate_expected_models.py b/test/pypesto/generate_expected_models.py deleted file mode 100644 index ca6e8b09..00000000 --- a/test/pypesto/generate_expected_models.py +++ /dev/null @@ -1,90 +0,0 @@ -import os -from pathlib import Path - -import fides -import pypesto.engine -import pypesto.optimize -import pypesto.select - -import petab_select - -SKIP_TEST_CASES_WITH_PREEXISTING_EXPECTED_MODEL = False -os.environ["AMICI_EXPERIMENTAL_SBML_NONCONST_CLS"] = "1" - -# Set to `[]` to test all -test_cases = [ - #'0001', - # "0003", - "0009", -] - -# Do not use computationally-expensive test cases in CI -skip_test_cases = [ - # "0009", -] - -test_cases_path = Path(__file__).resolve().parent.parent.parent / "test_cases" - -# Reduce runtime but with high reproducibility -minimize_options = { - "n_starts": 24, - "optimizer": pypesto.optimize.FidesOptimizer( - verbose=0, hessian_update=fides.BFGS() - ), - "engine": pypesto.engine.MultiProcessEngine(), - "filename": None, - "progress_bar": False, -} - - -def objective_customizer(obj): - # obj.amici_solver.setAbsoluteTolerance(1e-17) - obj.amici_solver.setRelativeTolerance(1e-12) - - -model_problem_options = { - "minimize_options": minimize_options, - "objective_customizer": objective_customizer, -} - - -for test_case_path in test_cases_path.glob("*"): - if test_cases and test_case_path.stem not in test_cases: - continue - - if test_case_path.stem in skip_test_cases: - continue - - expected_model_yaml = test_case_path / "expected.yaml" - - if ( - SKIP_TEST_CASES_WITH_PREEXISTING_EXPECTED_MODEL - and expected_model_yaml.is_file() - ): - # Skip test cases that already have an expected model. - continue - print(f"Running test case {test_case_path.stem}") - - # Setup the pyPESTO model selector instance. - petab_select_problem = petab_select.Problem.from_yaml( - test_case_path / "petab_select_problem.yaml", - ) - pypesto_select_problem = pypesto.select.Problem( - petab_select_problem=petab_select_problem - ) - - # Run the selection process until "exhausted". - pypesto_select_problem.select_to_completion(**model_problem_options) - - # Get the best model - best_model = petab_select.analyze.get_best( - models=pypesto_select_problem.calibrated_models, - criterion=petab_select_problem.criterion, - ) - - # Generate the expected model. - best_model.to_yaml(expected_model_yaml) - - # pypesto_select_problem.calibrated_models.to_yaml( - # output_yaml="all_models.yaml", - # ) diff --git a/test/pypesto/regenerate_model_hashes.py b/test/pypesto/regenerate_model_hashes.py deleted file mode 100644 index 95fdfb2a..00000000 --- a/test/pypesto/regenerate_model_hashes.py +++ /dev/null @@ -1,34 +0,0 @@ -from pathlib import Path - -import yaml - -import petab_select -from petab_select import ( - MODEL_HASH, - MODEL_ID, - MODEL_SUBSPACE_ID, - MODEL_SUBSPACE_INDICES, - PREDECESSOR_MODEL_HASH, -) - -test_cases_path = Path(__file__).resolve().parent.parent.parent / "test_cases" - - -for test_case_path in test_cases_path.glob("*"): - petab_select_problem = petab_select.Problem.from_yaml( - test_case_path / "petab_select_problem.yaml", - ) - expected_model_yaml = test_case_path / "expected.yaml" - - with open(expected_model_yaml) as f: - model_dict = yaml.safe_load(f) - - model = petab_select_problem.model_space.model_subspaces[ - model_dict[MODEL_SUBSPACE_ID] - ].indices_to_model(model_dict[MODEL_SUBSPACE_INDICES]) - model_dict[MODEL_ID] = str(model.model_id) - model_dict[MODEL_HASH] = str(model.get_hash()) - model_dict[PREDECESSOR_MODEL_HASH] = None - - with open(expected_model_yaml, "w") as f: - yaml.safe_dump(model_dict, f) diff --git a/test/pypesto/test_pypesto.py b/test/pypesto/test_pypesto.py deleted file mode 100644 index efaf10a4..00000000 --- a/test/pypesto/test_pypesto.py +++ /dev/null @@ -1,217 +0,0 @@ -import os -import shlex -import shutil -import subprocess -from pathlib import Path - -import numpy as np -import pandas as pd -import pypesto.engine -import pypesto.optimize -import pypesto.select -import pytest -import yaml - -import petab_select -from petab_select import Model -from petab_select.constants import ( - CRITERIA, - ESTIMATED_PARAMETERS, - TERMINATE, -) - -os.environ["AMICI_EXPERIMENTAL_SBML_NONCONST_CLS"] = "1" - -# Set to `[]` to test all -test_cases = [ - # '0001', - # '0006', - # '0002', - # '0008', - # "0009", -] - -# Do not use computationally-expensive test cases in CI -skip_test_cases = [ - "0009", -] - -test_cases_path = Path(__file__).resolve().parent.parent.parent / "test_cases" - -# Reduce runtime but with high reproducibility -minimize_options = { - "n_starts": 10, - "engine": pypesto.engine.MultiProcessEngine(), - "filename": None, - "progress_bar": False, -} - - -def objective_customizer(obj): - # obj.amici_solver.setAbsoluteTolerance(1e-17) - obj.amici_solver.setRelativeTolerance(1e-12) - - -model_problem_options = { - "minimize_options": minimize_options, - "objective_customizer": objective_customizer, -} - - -@pytest.mark.parametrize( - "test_case_path_stem", - sorted( - [test_case_path.stem for test_case_path in test_cases_path.glob("*")] - ), -) -def test_pypesto(test_case_path_stem): - """Run all test cases with pyPESTO.""" - if test_cases and test_case_path_stem not in test_cases: - pytest.skip("Test excluded from subset selected for debugging.") - - if test_case_path_stem in skip_test_cases: - pytest.skip("Test marked to be skipped.") - - test_case_path = test_cases_path / test_case_path_stem - expected_model_yaml = test_case_path / "expected.yaml" - # Setup the pyPESTO model selector instance. - petab_select_problem = petab_select.Problem.from_yaml( - test_case_path / "petab_select_problem.yaml", - ) - pypesto_select_problem = pypesto.select.Problem( - petab_select_problem=petab_select_problem - ) - - # Run the selection process until "exhausted". - pypesto_select_problem.select_to_completion( - model_problem_options=model_problem_options, - ) - - # Get the best model - best_model = petab_select.analyze.get_best( - models=pypesto_select_problem.calibrated_models, - criterion=petab_select_problem.criterion, - compare=petab_select_problem.compare, - ) - - # Load the expected model. - expected_model = Model.from_yaml(expected_model_yaml) - - def get_series(model, dict_attribute) -> pd.Series: - return pd.Series( - getattr(model, dict_attribute), - dtype=np.float64, - ).sort_index() - - # The estimated parameters and criteria values are as expected. - for dict_attribute in [CRITERIA, ESTIMATED_PARAMETERS]: - pd.testing.assert_series_equal( - get_series(expected_model, dict_attribute), - get_series(best_model, dict_attribute), - rtol=1e-2, - ) - # FIXME ensure `current model criterion` trajectory also matches, in summary.tsv file, - # for test case 0009, after summary format is revised - - -@pytest.mark.skipif( - os.getenv("GITHUB_ACTIONS") == "true", - reason="Too CPU heavy for CI.", -) -def test_famos_cli(): - """Run test case 0009 with pyPESTO and the CLI interface.""" - test_case_path = test_cases_path / "0009" - expected_model_yaml = test_case_path / "expected.yaml" - problem_yaml = test_case_path / "petab_select_problem.yaml" - - problem = petab_select.Problem.from_yaml(problem_yaml) - - # Setup working directory for intermediate files - work_dir = Path(__file__).parent / "output_famos_cli" - work_dir_str = str(work_dir) - if work_dir.exists(): - shutil.rmtree(work_dir_str) - work_dir.mkdir(exist_ok=True, parents=True) - - models_yamls = [] - metadata_yaml = work_dir / "metadata.yaml" - state_dill = work_dir / "state.dill" - iteration = 0 - while True: - iteration += 1 - uncalibrated_models_yaml = ( - work_dir / f"uncalibrated_models_{iteration}.yaml" - ) - calibrated_models_yaml = ( - work_dir / f"calibrated_models_{iteration}.yaml" - ) - models_yaml = work_dir / f"models_{iteration}.yaml" - models_yamls.append(models_yaml) - # Start iteration - subprocess.run( # noqa: S603 - shlex.split( - f"""petab_select start_iteration - --problem {problem_yaml} - --state {state_dill} - --output-uncalibrated-models {uncalibrated_models_yaml} - """ - ) - ) - # Calibrate models - models = petab_select.Models.from_yaml(uncalibrated_models_yaml) - for model in models: - pypesto.select.ModelProblem( - model=model, - criterion=problem.criterion, - **model_problem_options, - ) - models.to_yaml(filename=calibrated_models_yaml) - # End iteration - subprocess.run( # noqa: S603 - shlex.split( - f"""petab_select end_iteration - --output-models {models_yaml} - --output-metadata {metadata_yaml} - --state {state_dill} - --calibrated-models {calibrated_models_yaml} - """ - ) - ) - with open(metadata_yaml) as f: - metadata = yaml.safe_load(f) - if metadata[TERMINATE]: - break - - # Get the best model - models_yamls_arg = " ".join( - f"--models {models_yaml}" for models_yaml in models_yamls - ) - subprocess.run( # noqa: S603 - shlex.split( - f"""petab_select get_best - --problem {problem_yaml} - {models_yamls_arg} - --output {work_dir / "best_model.yaml"} - """ - ) - ) - best_model = petab_select.Model.from_yaml(work_dir / "best_model.yaml") - - # Load the expected model. - expected_model = Model.from_yaml(expected_model_yaml) - - def get_series(model, dict_attribute) -> pd.Series: - return pd.Series( - getattr(model, dict_attribute), - dtype=np.float64, - ).sort_index() - - # The estimated parameters and criteria values are as expected. - for dict_attribute in [CRITERIA, ESTIMATED_PARAMETERS]: - pd.testing.assert_series_equal( - get_series(expected_model, dict_attribute), - get_series(best_model, dict_attribute), - rtol=1e-2, - ) - # FIXME ensure `current model criterion` trajectory also matches, in summary.tsv file, - # after summary format is revised