From 61b4f1ed2c71bee27928448b1112edd366c9a252 Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Tue, 13 Jul 2021 23:22:35 +0200 Subject: [PATCH 01/11] Update AMICI (#354) ... and remove cpputest-related build steps. CppUTest now replaced by gtest. * git subrepo clone --branch=d6a076839dbeb55951c80d6f0d5ca7ade220ea22 --force git@github.com:AMICI-dev/AMICI.git deps/AMICI subrepo: subdir: "deps/AMICI" merged: "d6a07683" upstream: origin: "git@github.com:AMICI-dev/AMICI.git" branch: "d6a076839dbeb55951c80d6f0d5ca7ade220ea22" commit: "d6a07683" git-subrepo: version: "0.4.3" origin: "https://github.com/ingydotnet/git-subrepo" commit: "be9f02a" * Remove cpputest-related code --- .github/workflows/parpe_tests.yml | 6 +- README.md | 2 +- buildSuperMUC.sh | 13 - .../charliecloud/parpe_base/install_parpe.sh | 5 +- deps/AMICI/.github/workflows/test_install.yml | 4 - .../workflows/test_python_cplusplus.yml | 8 +- deps/AMICI/.gitignore | 11 +- deps/AMICI/.gitrepo | 6 +- deps/AMICI/CMakeLists.txt | 2 +- deps/AMICI/documentation/CI.md | 12 +- deps/AMICI/documentation/development.rst | 6 +- deps/AMICI/matlab/tests/testModels.m | 14 +- .../python/tests/test_pregenerated_models.py | 26 +- deps/AMICI/scripts/README.md | 9 +- deps/AMICI/scripts/buildAmici.sh | 23 +- deps/AMICI/scripts/buildCpputest.sh | 32 - deps/AMICI/scripts/buildDependencies.sh | 1 - deps/AMICI/scripts/buildXcode.sh | 10 +- deps/AMICI/scripts/run-cpp-tests.sh | 20 + deps/AMICI/scripts/run-cppcheck.sh | 15 +- deps/AMICI/scripts/run-cpputest.sh | 13 - deps/AMICI/scripts/run-valgrind-cpp.sh | 14 +- deps/AMICI/tests/README.md | 2 +- .../tests/{cpputest => cpp}/CMakeLists.txt | 45 +- deps/AMICI/tests/cpp/CMakeLists.txt.in | 15 + .../robertson => cpp/calvetti}/CMakeLists.txt | 10 +- .../{cpputest => cpp}/calvetti/tests1.cpp | 7 +- .../calvetti => cpp/dirac}/CMakeLists.txt | 10 +- .../tests/{cpputest => cpp}/dirac/tests1.cpp | 10 +- .../dirac => cpp/events}/CMakeLists.txt | 11 +- .../tests/{cpputest => cpp}/events/tests1.cpp | 11 +- .../{cpputest => cpp}/expectedResults.h5 | Bin .../jakstat_adjoint/CMakeLists.txt | 11 +- .../jakstat_adjoint/tests1.cpp | 27 +- .../cpp/jakstat_adjoint_o2/CMakeLists.txt | 21 + .../jakstat_adjoint_o2/tests1.cpp | 12 +- .../tests/cpp/nested_events/CMakeLists.txt | 21 + .../nested_events/tests1.cpp | 9 +- deps/AMICI/tests/cpp/neuron/CMakeLists.txt | 21 + .../tests/{cpputest => cpp}/neuron/tests1.cpp | 9 +- deps/AMICI/tests/cpp/neuron_o2/CMakeLists.txt | 21 + .../{cpputest => cpp}/neuron_o2/tests1.cpp | 7 +- deps/AMICI/tests/cpp/robertson/CMakeLists.txt | 21 + .../{cpputest => cpp}/robertson/tests1.cpp | 13 +- .../tests/cpp/steadystate/CMakeLists.txt | 21 + .../{cpputest => cpp}/steadystate/tests1.cpp | 93 +- .../tests/{cpputest => cpp}/testOptions.h5 | Bin .../tests/{cpputest => cpp}/testfunctions.cpp | 57 +- .../tests/{cpputest => cpp}/testfunctions.h | 5 +- deps/AMICI/tests/cpp/unittests/CMakeLists.txt | 27 + .../AMICI/tests/cpp/unittests/testExpData.cpp | 328 +++++++ deps/AMICI/tests/cpp/unittests/testMisc.cpp | 597 ++++++++++++ .../tests/cpp/unittests/testSerialization.cpp | 257 +++++ .../tests/{cpputest => cpp}/wrapTestModels.m | 0 .../tests/cpputest/events/CMakeLists.txt | 18 - .../jakstat_adjoint_o2/CMakeLists.txt | 18 - deps/AMICI/tests/cpputest/main.cpp | 10 - .../cpputest/nested_events/CMakeLists.txt | 18 - .../tests/cpputest/neuron/CMakeLists.txt | 18 - .../tests/cpputest/neuron_o2/CMakeLists.txt | 18 - .../tests/cpputest/steadystate/CMakeLists.txt | 18 - .../tests/cpputest/unittests/CMakeLists.txt | 27 - .../AMICI/tests/cpputest/unittests/tests1.cpp | 911 ------------------ .../cpputest/unittests/testsSerialization.cpp | 258 ----- .../generateTestConfigurationForExamples.sh | 28 +- templates/CMakeLists.template.txt | 28 - 66 files changed, 1639 insertions(+), 1682 deletions(-) delete mode 100755 deps/AMICI/scripts/buildCpputest.sh create mode 100755 deps/AMICI/scripts/run-cpp-tests.sh delete mode 100755 deps/AMICI/scripts/run-cpputest.sh rename deps/AMICI/tests/{cpputest => cpp}/CMakeLists.txt (60%) create mode 100644 deps/AMICI/tests/cpp/CMakeLists.txt.in rename deps/AMICI/tests/{cpputest/robertson => cpp/calvetti}/CMakeLists.txt (67%) rename deps/AMICI/tests/{cpputest => cpp}/calvetti/tests1.cpp (50%) rename deps/AMICI/tests/{cpputest/calvetti => cpp/dirac}/CMakeLists.txt (67%) rename deps/AMICI/tests/{cpputest => cpp}/dirac/tests1.cpp (54%) rename deps/AMICI/tests/{cpputest/dirac => cpp/events}/CMakeLists.txt (54%) rename deps/AMICI/tests/{cpputest => cpp}/events/tests1.cpp (55%) rename deps/AMICI/tests/{cpputest => cpp}/expectedResults.h5 (100%) rename deps/AMICI/tests/{cpputest => cpp}/jakstat_adjoint/CMakeLists.txt (54%) rename deps/AMICI/tests/{cpputest => cpp}/jakstat_adjoint/tests1.cpp (84%) create mode 100644 deps/AMICI/tests/cpp/jakstat_adjoint_o2/CMakeLists.txt rename deps/AMICI/tests/{cpputest => cpp}/jakstat_adjoint_o2/tests1.cpp (53%) create mode 100644 deps/AMICI/tests/cpp/nested_events/CMakeLists.txt rename deps/AMICI/tests/{cpputest => cpp}/nested_events/tests1.cpp (55%) create mode 100644 deps/AMICI/tests/cpp/neuron/CMakeLists.txt rename deps/AMICI/tests/{cpputest => cpp}/neuron/tests1.cpp (62%) create mode 100644 deps/AMICI/tests/cpp/neuron_o2/CMakeLists.txt rename deps/AMICI/tests/{cpputest => cpp}/neuron_o2/tests1.cpp (56%) create mode 100644 deps/AMICI/tests/cpp/robertson/CMakeLists.txt rename deps/AMICI/tests/{cpputest => cpp}/robertson/tests1.cpp (63%) create mode 100644 deps/AMICI/tests/cpp/steadystate/CMakeLists.txt rename deps/AMICI/tests/{cpputest => cpp}/steadystate/tests1.cpp (64%) rename deps/AMICI/tests/{cpputest => cpp}/testOptions.h5 (100%) rename deps/AMICI/tests/{cpputest => cpp}/testfunctions.cpp (91%) rename deps/AMICI/tests/{cpputest => cpp}/testfunctions.h (98%) create mode 100644 deps/AMICI/tests/cpp/unittests/CMakeLists.txt create mode 100644 deps/AMICI/tests/cpp/unittests/testExpData.cpp create mode 100644 deps/AMICI/tests/cpp/unittests/testMisc.cpp create mode 100644 deps/AMICI/tests/cpp/unittests/testSerialization.cpp rename deps/AMICI/tests/{cpputest => cpp}/wrapTestModels.m (100%) delete mode 100644 deps/AMICI/tests/cpputest/events/CMakeLists.txt delete mode 100644 deps/AMICI/tests/cpputest/jakstat_adjoint_o2/CMakeLists.txt delete mode 100644 deps/AMICI/tests/cpputest/main.cpp delete mode 100644 deps/AMICI/tests/cpputest/nested_events/CMakeLists.txt delete mode 100644 deps/AMICI/tests/cpputest/neuron/CMakeLists.txt delete mode 100644 deps/AMICI/tests/cpputest/neuron_o2/CMakeLists.txt delete mode 100644 deps/AMICI/tests/cpputest/steadystate/CMakeLists.txt delete mode 100644 deps/AMICI/tests/cpputest/unittests/CMakeLists.txt delete mode 100644 deps/AMICI/tests/cpputest/unittests/tests1.cpp delete mode 100644 deps/AMICI/tests/cpputest/unittests/testsSerialization.cpp diff --git a/.github/workflows/parpe_tests.yml b/.github/workflows/parpe_tests.yml index 11f66db27..f843d6741 100644 --- a/.github/workflows/parpe_tests.yml +++ b/.github/workflows/parpe_tests.yml @@ -37,13 +37,9 @@ jobs: run: | cd $AMICI_PATH \ && scripts/buildSuiteSparse.sh \ - && scripts/buildSundials.sh \ - && scripts/buildCpputest.sh #&& scripts/buildAmici.sh + && scripts/buildSundials.sh - name: Install AMICI - # with tests: - # CPPUTEST_BUILD_DIR=${AMICI_PATH}/ThirdParty/cpputest-master/build/ - # -DCppUTest_DIR=${CPPUTEST_BUILD_DIR} run: | cmake \ -S ${AMICI_PATH} \ diff --git a/README.md b/README.md index 2080c9b31..0fe3d277d 100644 --- a/README.md +++ b/README.md @@ -66,7 +66,7 @@ On Debian-based systems, dependencies can be installed via: ```shell sudo apt-get install build-essential cmake cmake-curses-gui \ coinor-libipopt-dev curl gfortran \ - libblas-dev libboost-serialization-dev libceres-dev libcpputest-dev \ + libblas-dev libboost-serialization-dev libceres-dev \ libmpich-dev libhdf5-dev libpython3-dev python3-pip ``` diff --git a/buildSuperMUC.sh b/buildSuperMUC.sh index 1b551d62f..7f969b105 100755 --- a/buildSuperMUC.sh +++ b/buildSuperMUC.sh @@ -9,17 +9,6 @@ # result in "ld: cannot find -lpkgcfg_lib_IPOPT_iomp5-NOTFOUND" # -> use newer CMake -build_cpputest() { - #cpputest - #CPPUTEST_PATH=${amici_path}/ThirdParty/cpputest-master - #cd ${CPPUTEST_PATH} - # -DC++11=ON breaks compilation of some `_override` for no obvious reason - #cmake -DC++11=OFF -DCMAKE_INSTALL_PREFIX=`pwd` - #make ${make_opts} - #make install - : -} - build_amici() { amici_path=${parpe_root}/deps/AMICI @@ -69,7 +58,6 @@ build_3rd_party_deps() { # build_boost "${parpe_root}/ThirdParty/installIpopt.sh" #./installCeres.sh - #./installCpputest.sh # ceres_base=${parpe_root}/ThirdParty/ceres-solver-1.13.0/ # ceres_install_dir=${ceres_base}/build/install/ # if [[ -d ${ceres_base} ]]; then @@ -108,7 +96,6 @@ parpe_root=$(cd "${parpe_root}" && pwd) make_opts=${MAKEOPTS--j12} -build_cpputest build_3rd_party_deps build_amici build_parpe diff --git a/container/charliecloud/parpe_base/install_parpe.sh b/container/charliecloud/parpe_base/install_parpe.sh index 1a46ef8c0..470334030 100755 --- a/container/charliecloud/parpe_base/install_parpe.sh +++ b/container/charliecloud/parpe_base/install_parpe.sh @@ -17,15 +17,12 @@ export PARPE_BASE=$(pwd) export AMICI_PATH=${PARPE_BASE}/deps/AMICI/ cd "${AMICI_PATH}" \ && scripts/buildSuiteSparse.sh \ - && scripts/buildSundials.sh \ - && scripts/buildCpputest.sh #&& scripts/buildAmici.sh + && scripts/buildSundials.sh mkdir -p "${AMICI_PATH}"/build && cd "${AMICI_PATH}"/build -CPPUTEST_BUILD_DIR=${AMICI_PATH}/ThirdParty/cpputest-master/build/ cmake \ -DCMAKE_BUILD_TYPE=Debug \ -DENABLE_PYTHON=ON \ -DBUILD_TESTS=OFF \ - -DCppUTest_DIR="${CPPUTEST_BUILD_DIR}" \ .. && make -j12 #- cd $PARPE_BASE/ThirdParty && ./installCeres.sh diff --git a/deps/AMICI/.github/workflows/test_install.yml b/deps/AMICI/.github/workflows/test_install.yml index b075d8d6f..b47ae6b09 100644 --- a/deps/AMICI/.github/workflows/test_install.yml +++ b/deps/AMICI/.github/workflows/test_install.yml @@ -33,10 +33,6 @@ jobs: run: | scripts/buildSundials.sh - - name: Build cpputest - run: | - scripts/buildCpputest.sh - - name: Build AMICI run: | scripts/buildAmici.sh diff --git a/deps/AMICI/.github/workflows/test_python_cplusplus.yml b/deps/AMICI/.github/workflows/test_python_cplusplus.yml index 2d26cd288..81d32223c 100644 --- a/deps/AMICI/.github/workflows/test_python_cplusplus.yml +++ b/deps/AMICI/.github/workflows/test_python_cplusplus.yml @@ -69,7 +69,7 @@ jobs: - name: C++ tests run: | - scripts/run-cpputest.sh + scripts/run-cpp-tests.sh - name: Install python package run: | @@ -85,7 +85,7 @@ jobs: --cov-report=xml:"${AMICI_DIR}/build/coverage_py.xml" \ --cov-append \ ${AMICI_DIR}/python/tests - + - name: example notebooks run: | scripts/runNotebook.sh python/examples/example_*/ @@ -96,7 +96,7 @@ jobs: - name: Codecov Python uses: codecov/codecov-action@v1 - with: + with: token: ${{ secrets.CODECOV_TOKEN }} file: ./build/coverage_py.xml flags: python @@ -163,4 +163,4 @@ jobs: - name: C++ tests run: | - scripts/run-cpputest.sh + scripts/run-cpp-tests.sh diff --git a/deps/AMICI/.gitignore b/deps/AMICI/.gitignore index 44053581e..4b17a5945 100644 --- a/deps/AMICI/.gitignore +++ b/deps/AMICI/.gitignore @@ -4,9 +4,9 @@ build/* build-debug/* build_xcode/* swig/python/build/* -tests/cpputest/build/* -tests/cpputest/build_xcode/* -tests/cpputest/Testing/* +tests/cpp/build/* +tests/cpp/build_xcode/* +tests/cpp/Testing/* doc-venv/* doc/* @@ -136,8 +136,8 @@ tests/test/* */tests/fricker_2010_apoptosis_amici/* */tests/explicit_amici/* */tests/fixed_initial_amici/* -tests/cpputest/writeResults.h5 -tests/cpputest/writeResults.h5.bak +tests/cpp/writeResults.h5 +tests/cpp/writeResults.h5.bak tests/sbml-test-suite/* tests/sbml-test-suite/ tests/sedml-test-suite/ @@ -171,7 +171,6 @@ AMICI_guide.pdf ThirdParty/bionetgen.tar.gz ThirdParty/BioNetGen-* -ThirdParty/cpputest-master* ThirdParty/doxygen/* ThirdParty/mtocpp-master* ThirdParty/sundials/build/* diff --git a/deps/AMICI/.gitrepo b/deps/AMICI/.gitrepo index 0fd9144bf..6ba515c24 100644 --- a/deps/AMICI/.gitrepo +++ b/deps/AMICI/.gitrepo @@ -5,8 +5,8 @@ ; [subrepo] remote = git@github.com:ICB-DCM/AMICI.git - branch = v0.11.18 - commit = bd3b89d3839e6518532d547e54e7fe66ef3cfec3 - parent = 98a0746a708be9e43c7daeb54ec38bcb6709f7d9 + branch = d6a076839dbeb55951c80d6f0d5ca7ade220ea22 + commit = d6a076839dbeb55951c80d6f0d5ca7ade220ea22 + parent = ea1b80659eaf6a8f734df09c7af64dd55c659d3f cmdver = 0.4.3 method = merge diff --git a/deps/AMICI/CMakeLists.txt b/deps/AMICI/CMakeLists.txt index 3e2a7f1f9..09d464352 100644 --- a/deps/AMICI/CMakeLists.txt +++ b/deps/AMICI/CMakeLists.txt @@ -268,7 +268,7 @@ if(BUILD_TESTS) if(ENABLE_HDF5) enable_testing() - add_subdirectory(tests/cpputest) + add_subdirectory(tests/cpp) else() message(WARNING "Cannot build tests with ENABLE_HDF5=OFF.") endif() diff --git a/deps/AMICI/documentation/CI.md b/deps/AMICI/documentation/CI.md index 93fd106dc..3fd72d99f 100644 --- a/deps/AMICI/documentation/CI.md +++ b/deps/AMICI/documentation/CI.md @@ -18,7 +18,7 @@ tests are integrated with CMake, see `make help` in the build directory. ## C++ unit and integration tests To run C++ tests, build AMICI with `make` or `scripts/buildAll.sh`, -then run `scripts/run-cpputest.sh`. +then run `scripts/run-cpp-tests.sh`. ## Python unit and integration tests @@ -58,7 +58,7 @@ To execute the Matlab test suite, run `tests/testModels.m`. Many of our integration tests are model simulations. The simulation results obtained from the Python and C++ are compared to results saved in an HDF5 file -(`tests/cpputest/expectedResults.h5`). +(`tests/cpp/expectedResults.h5`). Settings and data for the test simulations are also specified in this file. **Note:** The C++ code for the models is included in the repository under @@ -73,7 +73,7 @@ the Matlab model import routines change. This is done with - tests/cpputest/wrapTestModels.m + tests/cpp/wrapTestModels.m **Note:** This is currently only possible from Matlab < R2018a. This should change as soon as 1) all second-order sensitivity code is ported to C++/Python, @@ -84,13 +84,13 @@ for Python. ### Regenerating expected results To update test results, run `make test` in the build directory, -replace `tests/cpputest/expectedResults.h5` by -`tests/cpputest/writeResults.h5.bak` +replace `tests/cpp/expectedResults.h5` by +`tests/cpp/writeResults.h5.bak` [ONLY DO THIS AFTER TRIPLE CHECKING CORRECTNESS OF RESULTS] Before replacing the test results, confirm that only expected datasets have changed, e.g. using - h5diff -v --relative 1e-8 tests/cpputest/expectedResults.h5 tests/cpputest/writeResults.h5.bak | less + h5diff -v --relative 1e-8 tests/cpp/expectedResults.h5 tests/cpp/writeResults.h5.bak | less ## Adding/Updating tests diff --git a/deps/AMICI/documentation/development.rst b/deps/AMICI/documentation/development.rst index 0e4899bd2..365da13c6 100644 --- a/deps/AMICI/documentation/development.rst +++ b/deps/AMICI/documentation/development.rst @@ -60,7 +60,7 @@ process described below: (our CI pipeline will do this for you) - When adding new functionality, please also provide test cases (see - ``tests/cpputest/`` and + ``tests/cpp/`` and `documentation/CI.md `__) - Write meaningful commit messages @@ -68,10 +68,10 @@ process described below: - Run all tests to ensure nothing was broken (`more details `__) - - Run ``scripts/buildAll.sh && scripts/run-cpputest.sh``. + - Run ``scripts/buildAll.sh && scripts/run-cpp-tests.sh``. - If you made changes to the Matlab or C++ code and have a Matlab - license, please also run ``tests/cpputest/wrapTestModels.m`` and + license, please also run ``tests/cpp/wrapTestModels.m`` and ``tests/testModels.m`` - If you made changes to the Python or C++ code, run diff --git a/deps/AMICI/matlab/tests/testModels.m b/deps/AMICI/matlab/tests/testModels.m index baf1c57bd..047f86daf 100644 --- a/deps/AMICI/matlab/tests/testModels.m +++ b/deps/AMICI/matlab/tests/testModels.m @@ -18,18 +18,18 @@ function testModels() model_dir = [fileparts(mfilename('fullpath')) '/../../models/']; cd(fileparts(mfilename('fullpath'))) - addpath(genpath('../../tests/cpputest')); + addpath(genpath('../../tests/cpp')); addpath(genpath('../examples')); % wrapTestModels() cd(fileparts(mfilename('fullpath'))) hdf5file = fullfile(fileparts(mfilename('fullpath')), ... - '../../tests/cpputest', 'expectedResults.h5'); - + '../../tests/cpp', 'expectedResults.h5'); + info = h5info(hdf5file); for imodel = 1:length(info.Groups) modelname = info.Groups(imodel).Name(2:end); - + if(~isempty(regexp(modelname,'^model_neuron'))) model_atol = 1e-9; model_rtol = 1e-4; @@ -42,18 +42,18 @@ function testModels() if(ismember(testname, ignoredTests)) continue end - + display(testname); [results,options,data,t,theta,kappa] = readDataFromHDF5(info.Groups(imodel).Groups(itest),hdf5file); - + % rebuild model old_path = addpath([model_dir modelname]); old_pwd = cd([model_dir modelname]); rebuild = str2func(['rebuild_' modelname]); rebuild(); cd(old_pwd); - + sol = getResults(modelname,options,data,t,theta,kappa); compareResults(sol,results); path(old_path); diff --git a/deps/AMICI/python/tests/test_pregenerated_models.py b/deps/AMICI/python/tests/test_pregenerated_models.py index 6bd63768b..470cc8ed8 100755 --- a/deps/AMICI/python/tests/test_pregenerated_models.py +++ b/deps/AMICI/python/tests/test_pregenerated_models.py @@ -3,19 +3,18 @@ """Run simulations with Matlab-AMICI pre-generated models and verify using saved expectations.""" -import h5py -import amici import os -from amici.gradient_check import check_derivatives, check_results -import pytest +from pathlib import Path +import amici +import h5py import numpy as np +import pytest +from amici.gradient_check import check_derivatives, check_results - -options_file = os.path.join(os.path.dirname(__file__), '..', '..', - 'tests', 'cpputest', 'testOptions.h5') -expected_results_file = os.path.join(os.path.dirname(__file__), '..', '..', - 'tests', 'cpputest', 'expectedResults.h5') +cpp_test_dir = Path(__file__).parents[2] / 'tests' / 'cpp' +options_file = str(cpp_test_dir / 'testOptions.h5') +expected_results_file = str(cpp_test_dir / 'expectedResults.h5') expected_results = h5py.File(expected_results_file, 'r') model_cases = [(sub_test, case) @@ -43,10 +42,9 @@ def test_pregenerated_model(sub_test, case): else: model_name = sub_test - model_swig_folder = \ - os.path.join(os.path.dirname(__file__), '..', '..', 'build', 'tests', - 'cpputest', f'external_{model_name}-prefix', - 'src', f'external_{model_name}-build', 'swig') + model_swig_folder = str(Path(__file__).parents[2] / 'build' / 'tests' + / 'cpp' / f'external_{model_name}-prefix' / 'src' + / f'external_{model_name}-build' / 'swig') test_model_module = amici.import_model_module( module_name=model_name, module_path=model_swig_folder) @@ -64,7 +62,7 @@ def test_pregenerated_model(sub_test, case): edata = None if 'data' in expected_results[sub_test][case].keys(): edata = amici.readSimulationExpData( - expected_results_file, + str(expected_results_file), f'/{sub_test}/{case}/data', model.get() ) rdata = amici.runAmiciSimulation(model, solver, diff --git a/deps/AMICI/scripts/README.md b/deps/AMICI/scripts/README.md index 5434d511b..67f64dca5 100644 --- a/deps/AMICI/scripts/README.md +++ b/deps/AMICI/scripts/README.md @@ -14,12 +14,7 @@ This directory contains a number of build, installation, and CI scripts. Download and build [BioNetGen](https://www.csb.pitt.edu/Faculty/Faeder/?page_id=409) (required for some tests) - -* `buildCpputest.sh` - - Download and build [CppUTest](https://cpputest.github.io/) - (required for C++ test suite) - + * `buildSuiteSparse.sh` Build [SuiteSparse](http://faculty.cse.tamu.edu/davis/suitesparse.html) @@ -64,7 +59,7 @@ This directory contains a number of build, installation, and CI scripts. Run static code analysis -* `run-cpputest.sh` +* `run-cpp-tests.sh` Run C++ unit and integration tests diff --git a/deps/AMICI/scripts/buildAmici.sh b/deps/AMICI/scripts/buildAmici.sh index 446fe1d8b..735e05fd3 100755 --- a/deps/AMICI/scripts/buildAmici.sh +++ b/deps/AMICI/scripts/buildAmici.sh @@ -2,7 +2,8 @@ # # Build libamici # -set -e +set -eou pipefail + cmake=${CMAKE:-cmake} make=${MAKE:-make} @@ -12,28 +13,26 @@ amici_build_dir="${amici_path}/build" mkdir -p "${amici_build_dir}" cd "${amici_build_dir}" -cpputest_build_dir="${amici_path}/ThirdParty/cpputest-master/build/" - -if [[ $TRAVIS = true ]] || [[ $GITHUB_ACTIONS = true ]] || [[ $ENABLE_AMICI_DEBUGGING = TRUE ]]; -then +if [ "${TRAVIS:-}" = true ] || + [ "${GITHUB_ACTIONS:-}" = true ] || + [ "${ENABLE_AMICI_DEBUGGING:-}" = TRUE ]; then # Running on CI server build_type="Debug" else build_type="RelWithDebInfo" fi -CppUTest_DIR=${cpputest_build_dir} \ - ${cmake} \ - -DCMAKE_CXX_FLAGS="-Wall -Wextra -Werror" \ - -DCMAKE_BUILD_TYPE=$build_type \ - -DPython3_EXECUTABLE="$(command -v python3)" .. +${cmake} \ + -DCMAKE_CXX_FLAGS="-Wall -Wextra -Werror" \ + -DCMAKE_BUILD_TYPE=$build_type \ + -DPython3_EXECUTABLE="$(command -v python3)" .. # build, with or without sonarcloud wrapper -if [[ "$CI_SONARCLOUD" == "TRUE" ]]; then +if [ "${CI_SONARCLOUD:-}" = "TRUE" ]; then build-wrapper-linux-x86-64 \ --out-dir "${amici_path}/bw-output" \ cmake --build . --parallel -elif [[ "$TRAVIS" == "true" ]]; then +elif [ "${TRAVIS:-}" = "true" ]; then cmake --build . ${make} python-sdist else diff --git a/deps/AMICI/scripts/buildCpputest.sh b/deps/AMICI/scripts/buildCpputest.sh deleted file mode 100755 index 1d4a8f468..000000000 --- a/deps/AMICI/scripts/buildCpputest.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash -# -# Build CppUTest -# -set -e - -script_path=$(dirname "$BASH_SOURCE") -amici_path=$(cd "$script_path/.." && pwd) - -cmake=${CMAKE:-cmake} -make=${MAKE:-make} - -# Cpputest -mkdir -p "${amici_path}/ThirdParty" -cd "${amici_path}/ThirdParty" -export CPPUTEST_BUILD_DIR="${amici_path}/ThirdParty/cpputest-master/" - -if [ ! -d "cpputest-master" ]; then - if [ ! -e "cpputest-master.zip" ]; then - wget -q -O cpputest-master.zip https://codeload.github.com/cpputest/cpputest/zip/master - fi - unzip -q cpputest-master.zip - #cd cpputest-master/ && ./autogen.sh && ./configure && make -fi - -cd cpputest-master -mkdir -p build -cd build -${cmake} -DTESTS=OFF -DBUILD_TESTING=OFF -DCMAKE_BUILD_TYPE=Release \ - -DC++11=ON -DMEMORY_LEAK_DETECTION=OFF .. -${make} -j4 - diff --git a/deps/AMICI/scripts/buildDependencies.sh b/deps/AMICI/scripts/buildDependencies.sh index 51a971a88..1caaf0eb4 100755 --- a/deps/AMICI/scripts/buildDependencies.sh +++ b/deps/AMICI/scripts/buildDependencies.sh @@ -9,5 +9,4 @@ script_path=$(cd "$script_path" && pwd) "${script_path}/buildSuiteSparse.sh" "${script_path}/buildSundials.sh" -"${script_path}/buildCpputest.sh" "${script_path}/buildBNGL.sh" diff --git a/deps/AMICI/scripts/buildXcode.sh b/deps/AMICI/scripts/buildXcode.sh index d78edde4b..97dfbf24e 100755 --- a/deps/AMICI/scripts/buildXcode.sh +++ b/deps/AMICI/scripts/buildXcode.sh @@ -12,18 +12,16 @@ CMAKE=${CMAKE:-cmake} ${AMICI_PATH}/scripts/buildSuiteSparse.sh ${AMICI_PATH}/scripts/buildSundials.sh ${AMICI_PATH}/scripts/buildAmici.sh -${AMICI_PATH}/scripts/buildCpputest.sh -cp ${AMICI_PATH}/tests/cpputest/expectedResults.h5 ./expectedResults.h5 +cp ${AMICI_PATH}/tests/cpp/expectedResults.h5 ./expectedResults.h5 mkdir -p ${AMICI_PATH}/build_xcode cd ${AMICI_PATH}/build_xcode -CPPUTEST_BUILD_DIR=${AMICI_PATH}/ThirdParty/cpputest-master/build/ -CppUTest_DIR=${CPPUTEST_BUILD_DIR} ${CMAKE} -G"Xcode" -DCMAKE_BUILD_TYPE=Debug .. +${CMAKE} -G"Xcode" -DCMAKE_BUILD_TYPE=Debug .. for model in steadystate robertson neuron neuron_o2 jakstat_adjoint jakstat_adjoint_o2 dirac events nested_events do - cp ${AMICI_PATH}/build/tests/cpputest/external_model_${model}-prefix/src/external_model_${model}-build/libmodel_${model}.a \ - ${AMICI_PATH}/build_xcode/tests/cpputest/external_model_${model}-prefix/src/external_model_${model}-build/libmodel_${model}.a + cp ${AMICI_PATH}/build/tests/cpp/external_model_${model}-prefix/src/external_model_${model}-build/libmodel_${model}.a \ + ${AMICI_PATH}/build_xcode/tests/cpp/external_model_${model}-prefix/src/external_model_${model}-build/libmodel_${model}.a done diff --git a/deps/AMICI/scripts/run-cpp-tests.sh b/deps/AMICI/scripts/run-cpp-tests.sh new file mode 100755 index 000000000..963ef3c51 --- /dev/null +++ b/deps/AMICI/scripts/run-cpp-tests.sh @@ -0,0 +1,20 @@ +#!/bin/bash +set -eou pipefail + +SCRIPT_PATH=$(dirname "$BASH_SOURCE") +AMICI_PATH=$(cd "$SCRIPT_PATH/.." && pwd) + +if [[ "${ENABLE_GCOV_COVERAGE:-}" == TRUE ]]; then + lcov --base-directory "${AMICI_PATH}" \ + --directory "${AMICI_PATH}/build/CMakeFiles/amici.dir/src" \ + --zerocounters -q +fi + +# run tests +cd "${AMICI_PATH}/build" + +ctest -V +ret=$? +if [[ $ret != 0 ]]; then exit $ret; fi +mv "${AMICI_PATH}/tests/cpp/writeResults.h5" \ + "${AMICI_PATH}/tests/cpp/writeResults.h5.bak" diff --git a/deps/AMICI/scripts/run-cppcheck.sh b/deps/AMICI/scripts/run-cppcheck.sh index 82018977e..57c669439 100755 --- a/deps/AMICI/scripts/run-cppcheck.sh +++ b/deps/AMICI/scripts/run-cppcheck.sh @@ -1,15 +1,18 @@ #!/bin/bash # Check test suite with valgrind -# Note: CppuTest memcheck should be disabled # Note: Consider using ctest -T memcheck instead +set -euo pipefail + SCRIPT_PATH=$(dirname "$BASH_SOURCE") AMICI_PATH=$(cd "$SCRIPT_PATH"/.. && pwd) -cd ${AMICI_PATH} +cd "${AMICI_PATH}" -cppcheck -i"${AMICI_PATH}"/src/doc "${AMICI_PATH}"/src \ - -I$"{AMICI_PATH}"/include/ \ - --enable=style \ - --exitcode-suppressions="${AMICI_PATH}"/.cppcheck-exitcode-suppressions +cppcheck \ + "-i${AMICI_PATH}/src/doc" \ + "${AMICI_PATH}/src" \ + "-I${AMICI_PATH}/include/" \ + --enable=style \ + "--exitcode-suppressions=${AMICI_PATH}/.cppcheck-exitcode-suppressions" diff --git a/deps/AMICI/scripts/run-cpputest.sh b/deps/AMICI/scripts/run-cpputest.sh deleted file mode 100755 index 781a31d32..000000000 --- a/deps/AMICI/scripts/run-cpputest.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash -SCRIPT_PATH=$(dirname $BASH_SOURCE) -AMICI_PATH=$(cd $SCRIPT_PATH/.. && pwd) - -if [[ "$ENABLE_GCOV_COVERAGE" == TRUE ]]; then lcov --base-directory ${AMICI_PATH} --directory ${AMICI_PATH}/build/CMakeFiles/amici.dir/src --zerocounters -q; fi - -# run tests -cd ${AMICI_PATH}/build - -ctest -V -ret=$? -if [[ $ret != 0 ]]; then exit $ret; fi -mv ${AMICI_PATH}/tests/cpputest/writeResults.h5 ${AMICI_PATH}/tests/cpputest/writeResults.h5.bak diff --git a/deps/AMICI/scripts/run-valgrind-cpp.sh b/deps/AMICI/scripts/run-valgrind-cpp.sh index af0daa95b..7d868d98e 100755 --- a/deps/AMICI/scripts/run-valgrind-cpp.sh +++ b/deps/AMICI/scripts/run-valgrind-cpp.sh @@ -1,19 +1,19 @@ #!/bin/bash # Check test suite with valgrind -# Note: CppuTest memcheck should be disabled # Note: Consider using ctest -T memcheck instead -SCRIPT_PATH=$(dirname $BASH_SOURCE) -AMICI_PATH=$(cd $SCRIPT_PATH/.. && pwd) +SCRIPT_PATH=$(dirname "$BASH_SOURCE") +AMICI_PATH=$(cd "$SCRIPT_PATH/.." && pwd) -set -e +set -eou pipefail # run tests -cd ${AMICI_PATH}/build/tests/cpputest/ +cd "${AMICI_PATH}/build/tests/cpp/" VALGRIND_OPTS="--leak-check=full --error-exitcode=1 --trace-children=yes --show-leak-kinds=definite" set -x for MODEL in $(ctest -N | grep "Test[ ]*#" | grep -v unittests | sed --regexp-extended 's/ *Test[ ]*#[0-9]+: model_(.*)_test/\1/') - do cd ${AMICI_PATH}/build/tests/cpputest/${MODEL}/ && valgrind ${VALGRIND_OPTS} ./model_${MODEL}_test + do cd "${AMICI_PATH}/build/tests/cpp/${MODEL}/" && valgrind ${VALGRIND_OPTS} "./model_${MODEL}_test" done -cd ${AMICI_PATH}/build/tests/cpputest/unittests/ && valgrind ${VALGRIND_OPTS} ./unittests +cd "${AMICI_PATH}/build/tests/cpp/unittests/" +valgrind ${VALGRIND_OPTS} ./unittests diff --git a/deps/AMICI/tests/README.md b/deps/AMICI/tests/README.md index 488d136f0..0758daf48 100644 --- a/deps/AMICI/tests/README.md +++ b/deps/AMICI/tests/README.md @@ -2,7 +2,7 @@ This directory contains: -- C++ unit tests and integration tests (`cpputest/`) +- C++ unit tests and integration tests (`cpp/`) - Scripts for running the SBML semantic test suite, exercising the Python interface - Scripts for running the PEtab test suite, exercising the Python interface diff --git a/deps/AMICI/tests/cpputest/CMakeLists.txt b/deps/AMICI/tests/cpp/CMakeLists.txt similarity index 60% rename from deps/AMICI/tests/cpputest/CMakeLists.txt rename to deps/AMICI/tests/cpp/CMakeLists.txt index fa441f324..6586fb3ec 100644 --- a/deps/AMICI/tests/cpputest/CMakeLists.txt +++ b/deps/AMICI/tests/cpp/CMakeLists.txt @@ -1,15 +1,43 @@ -project(amiciIntegrationTests) +# ------------------------------------------------------------------------------ +# Set up google test +# ------------------------------------------------------------------------------ + +# Download and unpack googletest at configure time +configure_file(CMakeLists.txt.in googletest-download/CMakeLists.txt) +execute_process( + COMMAND ${CMAKE_COMMAND} -G "${CMAKE_GENERATOR}" . + RESULT_VARIABLE result + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/googletest-download) +if(result) + message(FATAL_ERROR "CMake step for googletest failed: ${result}") +endif() +execute_process( + COMMAND ${CMAKE_COMMAND} --build . + RESULT_VARIABLE result + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/googletest-download) +if(result) + message(FATAL_ERROR "Build step for googletest failed: ${result}") +endif() + +# Prevent overriding the parent project's compiler/linker settings on Windows +set(gtest_force_shared_crt + ON + CACHE BOOL "" FORCE) -find_package(CppUTest REQUIRED) -# because Cpputest doesn't seem to care about MEMORY_LEAK_DETECTION=OFF -add_definitions(-DD_MemoryLeakWarningPlugin_h) +# Add googletest directly to our build. This defines the gtest and gtest_main +# targets. +add_subdirectory(${CMAKE_CURRENT_BINARY_DIR}/googletest-src + ${CMAKE_CURRENT_BINARY_DIR}/googletest-build EXCLUDE_FROM_ALL) + +# ------------------------------------------------------------------------------ +# AMICI C++ tests +# ------------------------------------------------------------------------------ + +project(amiciIntegrationTests) # models depend on Upstream::amici add_library(Upstream::amici ALIAS amici) -set(CMAKE_CXX_FLAGS_OLD "${CMAKE_CXX_FLAGS}") -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -include sstream -include functional") - # Amici testing library add_library(amici-testing testfunctions.cpp) target_compile_definitions(amici-testing @@ -19,11 +47,10 @@ target_compile_definitions(amici-testing ) target_include_directories(amici-testing PUBLIC ${CMAKE_CURRENT_SOURCE_DIR} - PUBLIC ${CppUTest_INCLUDE_DIRS} ) target_link_libraries(amici-testing PUBLIC Upstream::amici - PUBLIC ${CppUTest_LIBRARIES} + PUBLIC gtest_main ) # Names of models for which tests are to be run diff --git a/deps/AMICI/tests/cpp/CMakeLists.txt.in b/deps/AMICI/tests/cpp/CMakeLists.txt.in new file mode 100644 index 000000000..6e80530a7 --- /dev/null +++ b/deps/AMICI/tests/cpp/CMakeLists.txt.in @@ -0,0 +1,15 @@ +cmake_minimum_required(VERSION 2.8.12) + +project(googletest-download NONE) + +include(ExternalProject) +ExternalProject_Add(googletest + GIT_REPOSITORY https://github.com/google/googletest.git + GIT_TAG master + SOURCE_DIR "${CMAKE_CURRENT_BINARY_DIR}/googletest-src" + BINARY_DIR "${CMAKE_CURRENT_BINARY_DIR}/googletest-build" + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" +) diff --git a/deps/AMICI/tests/cpputest/robertson/CMakeLists.txt b/deps/AMICI/tests/cpp/calvetti/CMakeLists.txt similarity index 67% rename from deps/AMICI/tests/cpputest/robertson/CMakeLists.txt rename to deps/AMICI/tests/cpp/calvetti/CMakeLists.txt index 388fdf915..5f3db0773 100644 --- a/deps/AMICI/tests/cpputest/robertson/CMakeLists.txt +++ b/deps/AMICI/tests/cpp/calvetti/CMakeLists.txt @@ -2,18 +2,20 @@ get_filename_component(MODEL_NAME ${CMAKE_CURRENT_LIST_DIR} NAME) project(model_${MODEL_NAME}_test) set(SRC_LIST - ../main.cpp tests1.cpp ) -include_directories(${CMAKE_CURRENT_SOURCE_DIR} ${CppUTest_INCLUDE_DIRS}) - add_executable(${PROJECT_NAME} ${SRC_LIST}) + +target_include_directories(${PROJECT_NAME} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}) + add_dependencies(${PROJECT_NAME} external_model_${MODEL_NAME}) target_link_libraries(${PROJECT_NAME} amici-testing model_${MODEL_NAME} + gtest_main ) -add_test(NAME ${PROJECT_NAME} COMMAND ./${PROJECT_NAME} -c) +include(GoogleTest) +gtest_discover_tests(${PROJECT_NAME}) diff --git a/deps/AMICI/tests/cpputest/calvetti/tests1.cpp b/deps/AMICI/tests/cpp/calvetti/tests1.cpp similarity index 50% rename from deps/AMICI/tests/cpputest/calvetti/tests1.cpp rename to deps/AMICI/tests/cpp/calvetti/tests1.cpp index dfde30fd9..fb093dc5b 100644 --- a/deps/AMICI/tests/cpputest/calvetti/tests1.cpp +++ b/deps/AMICI/tests/cpp/calvetti/tests1.cpp @@ -1,13 +1,10 @@ -#include "CppUTest/TestHarness.h" -#include "CppUTestExt/MockSupport.h" - #include "wrapfunctions.h" #include #include -TEST_GROUP(groupCalvetti){}; +#include -TEST(groupCalvetti, testSimulation) +TEST(ExampleCalvetti, Simulation) { amici::simulateVerifyWrite("/model_calvetti/nosensi/"); } diff --git a/deps/AMICI/tests/cpputest/calvetti/CMakeLists.txt b/deps/AMICI/tests/cpp/dirac/CMakeLists.txt similarity index 67% rename from deps/AMICI/tests/cpputest/calvetti/CMakeLists.txt rename to deps/AMICI/tests/cpp/dirac/CMakeLists.txt index 388fdf915..5f3db0773 100644 --- a/deps/AMICI/tests/cpputest/calvetti/CMakeLists.txt +++ b/deps/AMICI/tests/cpp/dirac/CMakeLists.txt @@ -2,18 +2,20 @@ get_filename_component(MODEL_NAME ${CMAKE_CURRENT_LIST_DIR} NAME) project(model_${MODEL_NAME}_test) set(SRC_LIST - ../main.cpp tests1.cpp ) -include_directories(${CMAKE_CURRENT_SOURCE_DIR} ${CppUTest_INCLUDE_DIRS}) - add_executable(${PROJECT_NAME} ${SRC_LIST}) + +target_include_directories(${PROJECT_NAME} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}) + add_dependencies(${PROJECT_NAME} external_model_${MODEL_NAME}) target_link_libraries(${PROJECT_NAME} amici-testing model_${MODEL_NAME} + gtest_main ) -add_test(NAME ${PROJECT_NAME} COMMAND ./${PROJECT_NAME} -c) +include(GoogleTest) +gtest_discover_tests(${PROJECT_NAME}) diff --git a/deps/AMICI/tests/cpputest/dirac/tests1.cpp b/deps/AMICI/tests/cpp/dirac/tests1.cpp similarity index 54% rename from deps/AMICI/tests/cpputest/dirac/tests1.cpp rename to deps/AMICI/tests/cpp/dirac/tests1.cpp index 89d71f1f9..d1d79af7f 100644 --- a/deps/AMICI/tests/cpputest/dirac/tests1.cpp +++ b/deps/AMICI/tests/cpp/dirac/tests1.cpp @@ -1,19 +1,17 @@ -#include "CppUTest/TestHarness.h" -#include "CppUTestExt/MockSupport.h" - #include #include "wrapfunctions.h" #include -TEST_GROUP(groupDirac){}; +#include + -TEST(groupDirac, testSimulation) +TEST(ExampleDirac, Simulation) { amici::simulateVerifyWrite("/model_dirac/nosensi/"); } -TEST(groupDirac, testSensitivityForward) +TEST(ExampleDirac, SensitivityForward) { amici::simulateVerifyWrite("/model_dirac/sensiforward/"); } diff --git a/deps/AMICI/tests/cpputest/dirac/CMakeLists.txt b/deps/AMICI/tests/cpp/events/CMakeLists.txt similarity index 54% rename from deps/AMICI/tests/cpputest/dirac/CMakeLists.txt rename to deps/AMICI/tests/cpp/events/CMakeLists.txt index 99958ded6..5f3db0773 100644 --- a/deps/AMICI/tests/cpputest/dirac/CMakeLists.txt +++ b/deps/AMICI/tests/cpp/events/CMakeLists.txt @@ -2,17 +2,20 @@ get_filename_component(MODEL_NAME ${CMAKE_CURRENT_LIST_DIR} NAME) project(model_${MODEL_NAME}_test) set(SRC_LIST - ../main.cpp tests1.cpp ) -include_directories(${CMAKE_CURRENT_SOURCE_DIR} ${CppUTest_INCLUDE_DIRS}) - add_executable(${PROJECT_NAME} ${SRC_LIST}) +target_include_directories(${PROJECT_NAME} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}) + +add_dependencies(${PROJECT_NAME} external_model_${MODEL_NAME}) + target_link_libraries(${PROJECT_NAME} amici-testing model_${MODEL_NAME} + gtest_main ) -add_test(NAME ${PROJECT_NAME} COMMAND ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME} -c) +include(GoogleTest) +gtest_discover_tests(${PROJECT_NAME}) diff --git a/deps/AMICI/tests/cpputest/events/tests1.cpp b/deps/AMICI/tests/cpp/events/tests1.cpp similarity index 55% rename from deps/AMICI/tests/cpputest/events/tests1.cpp rename to deps/AMICI/tests/cpp/events/tests1.cpp index f5ee53a7f..a9b1ff844 100644 --- a/deps/AMICI/tests/cpputest/events/tests1.cpp +++ b/deps/AMICI/tests/cpp/events/tests1.cpp @@ -1,24 +1,21 @@ -#include "CppUTest/TestHarness.h" -#include "CppUTestExt/MockSupport.h" - #include "testfunctions.h" #include "wrapfunctions.h" #include -TEST_GROUP(groupEvents){}; +#include -TEST(groupEvents, testDefault) +TEST(ExampleEvents, Default) { amici::simulateWithDefaultOptions(); } -TEST(groupEvents, testSimulation) +TEST(ExampleEvents, Simulation) { amici::simulateVerifyWrite("/model_events/nosensi/"); } -TEST(groupEvents, testSensitivityForward) +TEST(ExampleEvents, SensitivityForward) { amici::simulateVerifyWrite("/model_events/sensiforward/"); } diff --git a/deps/AMICI/tests/cpputest/expectedResults.h5 b/deps/AMICI/tests/cpp/expectedResults.h5 similarity index 100% rename from deps/AMICI/tests/cpputest/expectedResults.h5 rename to deps/AMICI/tests/cpp/expectedResults.h5 diff --git a/deps/AMICI/tests/cpputest/jakstat_adjoint/CMakeLists.txt b/deps/AMICI/tests/cpp/jakstat_adjoint/CMakeLists.txt similarity index 54% rename from deps/AMICI/tests/cpputest/jakstat_adjoint/CMakeLists.txt rename to deps/AMICI/tests/cpp/jakstat_adjoint/CMakeLists.txt index 3197ed7a7..5f3db0773 100644 --- a/deps/AMICI/tests/cpputest/jakstat_adjoint/CMakeLists.txt +++ b/deps/AMICI/tests/cpp/jakstat_adjoint/CMakeLists.txt @@ -2,17 +2,20 @@ get_filename_component(MODEL_NAME ${CMAKE_CURRENT_LIST_DIR} NAME) project(model_${MODEL_NAME}_test) set(SRC_LIST - ../main.cpp tests1.cpp ) -include_directories(${CMAKE_CURRENT_SOURCE_DIR} ${CppUTest_INCLUDE_DIRS}) - add_executable(${PROJECT_NAME} ${SRC_LIST}) +target_include_directories(${PROJECT_NAME} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}) + +add_dependencies(${PROJECT_NAME} external_model_${MODEL_NAME}) + target_link_libraries(${PROJECT_NAME} amici-testing model_${MODEL_NAME} + gtest_main ) -add_test(NAME ${PROJECT_NAME} COMMAND ./${PROJECT_NAME} -c) +include(GoogleTest) +gtest_discover_tests(${PROJECT_NAME}) diff --git a/deps/AMICI/tests/cpputest/jakstat_adjoint/tests1.cpp b/deps/AMICI/tests/cpp/jakstat_adjoint/tests1.cpp similarity index 84% rename from deps/AMICI/tests/cpputest/jakstat_adjoint/tests1.cpp rename to deps/AMICI/tests/cpp/jakstat_adjoint/tests1.cpp index a69ed24a5..2ca15fdf6 100644 --- a/deps/AMICI/tests/cpputest/jakstat_adjoint/tests1.cpp +++ b/deps/AMICI/tests/cpp/jakstat_adjoint/tests1.cpp @@ -1,46 +1,43 @@ -#include "CppUTest/TestHarness.h" -#include "CppUTestExt/MockSupport.h" - #include "testfunctions.h" #include "wrapfunctions.h" #include -TEST_GROUP(groupJakstatAdjoint){}; +#include -TEST(groupJakstatAdjoint, testSimulation) +TEST(ExampleJakstatAdjoint, Simulation) { amici::simulateVerifyWrite("/model_jakstat_adjoint/nosensi/"); } -TEST(groupJakstatAdjoint, testSensitivityForward) +TEST(ExampleJakstatAdjoint, SensitivityForward) { amici::simulateVerifyWrite("/model_jakstat_adjoint/sensiforward/"); } -TEST(groupJakstatAdjoint, testSensitivityForwardLogParam) +TEST(ExampleJakstatAdjoint, SensitivityForwardLogParam) { amici::simulateVerifyWrite("/model_jakstat_adjoint/sensiforwardlogparam/"); } -TEST(groupJakstatAdjoint, testSensitivityAdjoint) +TEST(ExampleJakstatAdjoint, SensitivityAdjoint) { amici::simulateVerifyWrite("/model_jakstat_adjoint/sensiadjoint/"); } -TEST(groupJakstatAdjoint, testSensitivityForwardEmptySensInd) +TEST(ExampleJakstatAdjoint, SensitivityForwardEmptySensInd) { amici::simulateVerifyWrite( "/model_jakstat_adjoint/sensiforwardemptysensind/"); } -TEST(groupJakstatAdjoint, testSensitivityAdjointEmptySensInd) +TEST(ExampleJakstatAdjoint, SensitivityAdjointEmptySensInd) { amici::simulateVerifyWrite( "/model_jakstat_adjoint/sensiadjointemptysensind/"); } -IGNORE_TEST(groupJakstatAdjoint, testSensitivityAdjointUnusedNanOutputs) +TEST(ExampleJakstatAdjoint, DISABLED_SensitivityAdjointUnusedNanOutputs) { /* UN-IGNORE ONCE THIS MODEL HAS BEEN IMPORTED VIA PYTHON INTERFACE */ auto model = amici::generic_model::getModel(); @@ -71,10 +68,10 @@ IGNORE_TEST(groupJakstatAdjoint, testSensitivityAdjointUnusedNanOutputs) auto rdata = runAmiciSimulation(*solver, edata.get(), *model); for (int i = 0; i < model->nplist(); ++i) - CHECK_FALSE(std::isnan(rdata->sllh[i])); + ASSERT_FALSE(std::isnan(rdata->sllh[i])); } -TEST(groupJakstatAdjoint, testSensitivityReplicates) +TEST(ExampleJakstatAdjoint, SensitivityReplicates) { // Check that we can handle replicates correctly @@ -124,11 +121,11 @@ TEST(groupJakstatAdjoint, testSensitivityReplicates) solver->setSensitivityMethod(amici::SensitivityMethod::forward); auto rdata2 = runAmiciSimulation(*solver, &edata, *model); auto llh2 = rdata2->llh; - DOUBLES_EQUAL(2.0 * llh1, llh2, 1e-6); + ASSERT_NEAR(2.0 * llh1, llh2, 1e-6); // adjoint + replicates solver->setSensitivityMethod(amici::SensitivityMethod::adjoint); auto rdata3 = runAmiciSimulation(*solver, &edata, *model); auto llh3 = rdata3->llh; - DOUBLES_EQUAL(llh2, llh3, 1e-6); + ASSERT_NEAR(llh2, llh3, 1e-6); } diff --git a/deps/AMICI/tests/cpp/jakstat_adjoint_o2/CMakeLists.txt b/deps/AMICI/tests/cpp/jakstat_adjoint_o2/CMakeLists.txt new file mode 100644 index 000000000..5f3db0773 --- /dev/null +++ b/deps/AMICI/tests/cpp/jakstat_adjoint_o2/CMakeLists.txt @@ -0,0 +1,21 @@ +get_filename_component(MODEL_NAME ${CMAKE_CURRENT_LIST_DIR} NAME) +project(model_${MODEL_NAME}_test) + +set(SRC_LIST + tests1.cpp +) + +add_executable(${PROJECT_NAME} ${SRC_LIST}) + +target_include_directories(${PROJECT_NAME} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}) + +add_dependencies(${PROJECT_NAME} external_model_${MODEL_NAME}) + +target_link_libraries(${PROJECT_NAME} + amici-testing + model_${MODEL_NAME} + gtest_main +) + +include(GoogleTest) +gtest_discover_tests(${PROJECT_NAME}) diff --git a/deps/AMICI/tests/cpputest/jakstat_adjoint_o2/tests1.cpp b/deps/AMICI/tests/cpp/jakstat_adjoint_o2/tests1.cpp similarity index 53% rename from deps/AMICI/tests/cpputest/jakstat_adjoint_o2/tests1.cpp rename to deps/AMICI/tests/cpp/jakstat_adjoint_o2/tests1.cpp index 1331c0c36..b03b9da13 100644 --- a/deps/AMICI/tests/cpputest/jakstat_adjoint_o2/tests1.cpp +++ b/deps/AMICI/tests/cpp/jakstat_adjoint_o2/tests1.cpp @@ -1,24 +1,22 @@ -#include "CppUTest/TestHarness.h" -#include "CppUTestExt/MockSupport.h" - #include "testfunctions.h" #include "wrapfunctions.h" #include -TEST_GROUP(groupJakstatAdjointO2){}; +#include + -TEST(groupJakstatAdjointO2, testSensitivityForward2) +TEST(ExampleJakstatAdjointO2, SensitivityForward2) { amici::simulateVerifyWrite("/model_jakstat_adjoint/sensi2forward/"); } -TEST(groupJakstatAdjointO2, testSensitivityForward2LogParam) +TEST(ExampleJakstatAdjointO2, SensitivityForward2LogParam) { amici::simulateVerifyWrite("/model_jakstat_adjoint/sensi2forwardlogparam/"); } -TEST(groupJakstatAdjointO2, testSensitivityAdjoint2) +TEST(ExampleJakstatAdjointO2, SensitivityAdjoint2) { amici::simulateVerifyWrite("/model_jakstat_adjoint/sensi2adjoint/"); } diff --git a/deps/AMICI/tests/cpp/nested_events/CMakeLists.txt b/deps/AMICI/tests/cpp/nested_events/CMakeLists.txt new file mode 100644 index 000000000..5f3db0773 --- /dev/null +++ b/deps/AMICI/tests/cpp/nested_events/CMakeLists.txt @@ -0,0 +1,21 @@ +get_filename_component(MODEL_NAME ${CMAKE_CURRENT_LIST_DIR} NAME) +project(model_${MODEL_NAME}_test) + +set(SRC_LIST + tests1.cpp +) + +add_executable(${PROJECT_NAME} ${SRC_LIST}) + +target_include_directories(${PROJECT_NAME} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}) + +add_dependencies(${PROJECT_NAME} external_model_${MODEL_NAME}) + +target_link_libraries(${PROJECT_NAME} + amici-testing + model_${MODEL_NAME} + gtest_main +) + +include(GoogleTest) +gtest_discover_tests(${PROJECT_NAME}) diff --git a/deps/AMICI/tests/cpputest/nested_events/tests1.cpp b/deps/AMICI/tests/cpp/nested_events/tests1.cpp similarity index 55% rename from deps/AMICI/tests/cpputest/nested_events/tests1.cpp rename to deps/AMICI/tests/cpp/nested_events/tests1.cpp index 7208b713d..7a74f5465 100644 --- a/deps/AMICI/tests/cpputest/nested_events/tests1.cpp +++ b/deps/AMICI/tests/cpp/nested_events/tests1.cpp @@ -1,19 +1,16 @@ -#include "CppUTest/TestHarness.h" -#include "CppUTestExt/MockSupport.h" - #include "testfunctions.h" #include "wrapfunctions.h" #include -TEST_GROUP(groupEvents){}; +#include -TEST(groupEvents, testSimulation) +TEST(ExampleNestedEvents, Simulation) { amici::simulateVerifyWrite("/model_nested_events/nosensi/"); } -TEST(groupEvents, testSensitivityForward) +TEST(ExampleNestedEvents, SensitivityForward) { amici::simulateVerifyWrite("/model_nested_events/sensiforward/"); } diff --git a/deps/AMICI/tests/cpp/neuron/CMakeLists.txt b/deps/AMICI/tests/cpp/neuron/CMakeLists.txt new file mode 100644 index 000000000..5f3db0773 --- /dev/null +++ b/deps/AMICI/tests/cpp/neuron/CMakeLists.txt @@ -0,0 +1,21 @@ +get_filename_component(MODEL_NAME ${CMAKE_CURRENT_LIST_DIR} NAME) +project(model_${MODEL_NAME}_test) + +set(SRC_LIST + tests1.cpp +) + +add_executable(${PROJECT_NAME} ${SRC_LIST}) + +target_include_directories(${PROJECT_NAME} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}) + +add_dependencies(${PROJECT_NAME} external_model_${MODEL_NAME}) + +target_link_libraries(${PROJECT_NAME} + amici-testing + model_${MODEL_NAME} + gtest_main +) + +include(GoogleTest) +gtest_discover_tests(${PROJECT_NAME}) diff --git a/deps/AMICI/tests/cpputest/neuron/tests1.cpp b/deps/AMICI/tests/cpp/neuron/tests1.cpp similarity index 62% rename from deps/AMICI/tests/cpputest/neuron/tests1.cpp rename to deps/AMICI/tests/cpp/neuron/tests1.cpp index f5d645b7a..cfdc04ef9 100644 --- a/deps/AMICI/tests/cpputest/neuron/tests1.cpp +++ b/deps/AMICI/tests/cpp/neuron/tests1.cpp @@ -1,20 +1,17 @@ -#include "CppUTest/TestHarness.h" -#include "CppUTestExt/MockSupport.h" - #include "testfunctions.h" #include "wrapfunctions.h" #include -TEST_GROUP(groupNeuron){}; +#include -TEST(groupNeuron, testSimulation) +TEST(ExampleNeuron, Simulation) { amici::simulateVerifyWrite( "/model_neuron/nosensi/", 100 * TEST_ATOL, 100 * TEST_RTOL); } -TEST(groupNeuron, testSensitivityForward) +TEST(ExampleNeuron, SensitivityForward) { amici::simulateVerifyWrite( "/model_neuron/sensiforward/", 10 * TEST_ATOL, 10 * TEST_RTOL); diff --git a/deps/AMICI/tests/cpp/neuron_o2/CMakeLists.txt b/deps/AMICI/tests/cpp/neuron_o2/CMakeLists.txt new file mode 100644 index 000000000..5f3db0773 --- /dev/null +++ b/deps/AMICI/tests/cpp/neuron_o2/CMakeLists.txt @@ -0,0 +1,21 @@ +get_filename_component(MODEL_NAME ${CMAKE_CURRENT_LIST_DIR} NAME) +project(model_${MODEL_NAME}_test) + +set(SRC_LIST + tests1.cpp +) + +add_executable(${PROJECT_NAME} ${SRC_LIST}) + +target_include_directories(${PROJECT_NAME} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}) + +add_dependencies(${PROJECT_NAME} external_model_${MODEL_NAME}) + +target_link_libraries(${PROJECT_NAME} + amici-testing + model_${MODEL_NAME} + gtest_main +) + +include(GoogleTest) +gtest_discover_tests(${PROJECT_NAME}) diff --git a/deps/AMICI/tests/cpputest/neuron_o2/tests1.cpp b/deps/AMICI/tests/cpp/neuron_o2/tests1.cpp similarity index 56% rename from deps/AMICI/tests/cpputest/neuron_o2/tests1.cpp rename to deps/AMICI/tests/cpp/neuron_o2/tests1.cpp index 57b9ac47b..5be0c9ea5 100644 --- a/deps/AMICI/tests/cpputest/neuron_o2/tests1.cpp +++ b/deps/AMICI/tests/cpp/neuron_o2/tests1.cpp @@ -1,14 +1,11 @@ -#include "CppUTest/TestHarness.h" -#include "CppUTestExt/MockSupport.h" - #include "testfunctions.h" #include "wrapfunctions.h" #include -TEST_GROUP(groupNeuronO2){}; +#include -TEST(groupNeuronO2, testSensitivity2) +TEST(ExampleNeuronO2, Sensitivity2) { amici::simulateVerifyWrite( "/model_neuron/sensi2forward/", 10 * TEST_ATOL, 10 * TEST_RTOL); diff --git a/deps/AMICI/tests/cpp/robertson/CMakeLists.txt b/deps/AMICI/tests/cpp/robertson/CMakeLists.txt new file mode 100644 index 000000000..5f3db0773 --- /dev/null +++ b/deps/AMICI/tests/cpp/robertson/CMakeLists.txt @@ -0,0 +1,21 @@ +get_filename_component(MODEL_NAME ${CMAKE_CURRENT_LIST_DIR} NAME) +project(model_${MODEL_NAME}_test) + +set(SRC_LIST + tests1.cpp +) + +add_executable(${PROJECT_NAME} ${SRC_LIST}) + +target_include_directories(${PROJECT_NAME} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}) + +add_dependencies(${PROJECT_NAME} external_model_${MODEL_NAME}) + +target_link_libraries(${PROJECT_NAME} + amici-testing + model_${MODEL_NAME} + gtest_main +) + +include(GoogleTest) +gtest_discover_tests(${PROJECT_NAME}) diff --git a/deps/AMICI/tests/cpputest/robertson/tests1.cpp b/deps/AMICI/tests/cpp/robertson/tests1.cpp similarity index 63% rename from deps/AMICI/tests/cpputest/robertson/tests1.cpp rename to deps/AMICI/tests/cpp/robertson/tests1.cpp index 441eac0b1..3cb74ada3 100644 --- a/deps/AMICI/tests/cpputest/robertson/tests1.cpp +++ b/deps/AMICI/tests/cpp/robertson/tests1.cpp @@ -1,30 +1,27 @@ -#include "CppUTest/TestHarness.h" -#include "CppUTestExt/MockSupport.h" - #include "wrapfunctions.h" #include #include -TEST_GROUP(groupRobertson){}; +#include -TEST(groupRobertson, testSimulation) +TEST(ExampleRobertson, Simulation) { amici::simulateVerifyWrite("/model_robertson/nosensi/"); } -TEST(groupRobertson, testSensitivityForward) +TEST(ExampleRobertson, SensitivityForward) { amici::simulateVerifyWrite( "/model_robertson/sensiforward/", 1e6 * TEST_ATOL, 1e2 * TEST_RTOL); } -TEST(groupRobertson, testSensitivityForwardDense) +TEST(ExampleRobertson, SensitivityForwardDense) { amici::simulateVerifyWrite( "/model_robertson/sensiforwarddense/", 1e6 * TEST_ATOL, 1e2 * TEST_RTOL); } -TEST(groupRobertson, testSensitivityForwardSPBCG) +TEST(ExampleRobertson, SensitivityForwardSPBCG) { amici::simulateVerifyWrite( "/model_robertson/sensiforwardSPBCG/", 1e7 * TEST_ATOL, 1e2 * TEST_RTOL); diff --git a/deps/AMICI/tests/cpp/steadystate/CMakeLists.txt b/deps/AMICI/tests/cpp/steadystate/CMakeLists.txt new file mode 100644 index 000000000..5f3db0773 --- /dev/null +++ b/deps/AMICI/tests/cpp/steadystate/CMakeLists.txt @@ -0,0 +1,21 @@ +get_filename_component(MODEL_NAME ${CMAKE_CURRENT_LIST_DIR} NAME) +project(model_${MODEL_NAME}_test) + +set(SRC_LIST + tests1.cpp +) + +add_executable(${PROJECT_NAME} ${SRC_LIST}) + +target_include_directories(${PROJECT_NAME} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}) + +add_dependencies(${PROJECT_NAME} external_model_${MODEL_NAME}) + +target_link_libraries(${PROJECT_NAME} + amici-testing + model_${MODEL_NAME} + gtest_main +) + +include(GoogleTest) +gtest_discover_tests(${PROJECT_NAME}) diff --git a/deps/AMICI/tests/cpputest/steadystate/tests1.cpp b/deps/AMICI/tests/cpp/steadystate/tests1.cpp similarity index 64% rename from deps/AMICI/tests/cpputest/steadystate/tests1.cpp rename to deps/AMICI/tests/cpp/steadystate/tests1.cpp index 78be0a2ad..505f8f15e 100644 --- a/deps/AMICI/tests/cpputest/steadystate/tests1.cpp +++ b/deps/AMICI/tests/cpp/steadystate/tests1.cpp @@ -1,19 +1,16 @@ -#include "CppUTest/TestHarness.h" -#include "CppUTestExt/MockSupport.h" - #include "testfunctions.h" #include "wrapfunctions.h" #include -TEST_GROUP(groupSteadystate){}; +#include -TEST(groupSteadystate, testDefault) +TEST(ExampleSteadystate, Default) { amici::simulateWithDefaultOptions(); } -TEST(groupSteadystate, testModelFromHDF5) +TEST(ExampleSteadystate, ModelFromHDF5) { // Test reading some python-written options std::vector pExp{ 1, 0.5, 0.4, 2, 0.1 }; @@ -25,43 +22,43 @@ TEST(groupSteadystate, testModelFromHDF5) amici::checkEqualArray( kExp, model->getFixedParameters(), TEST_ATOL, TEST_RTOL, "k"); - CHECK_EQUAL(51, model->nt()); - CHECK_EQUAL(0.0, model->getTimepoint(0)); - CHECK_EQUAL(100.0, model->getTimepoint(model->nt() - 2)); - CHECK_EQUAL(INFINITY, model->getTimepoint(model->nt() - 1)); + ASSERT_EQ(51, model->nt()); + ASSERT_EQ(0.0, model->getTimepoint(0)); + ASSERT_EQ(100.0, model->getTimepoint(model->nt() - 2)); + ASSERT_EQ(INFINITY, model->getTimepoint(model->nt() - 1)); for (int i = 0; i < model->np(); ++i) { - CHECK_EQUAL(pExp[i], model->getUnscaledParameters()[i]); - CHECK_EQUAL(log10(pExp[i]), model->getParameters()[i]); + ASSERT_EQ(pExp[i], model->getUnscaledParameters()[i]); + ASSERT_EQ(log10(pExp[i]), model->getParameters()[i]); } } -TEST(groupSteadystate, testInequality) +TEST(ExampleSteadystate, Inequality) { auto modelA = amici::generic_model::getModel(); - auto modelB = std::unique_ptr(new amici::Model_Test()); + auto modelB = std::make_unique(); - CHECK_FALSE(*modelA == *modelB); + ASSERT_FALSE(*modelA == *modelB); } -TEST(groupSteadystate, testCopyModel) +TEST(ExampleSteadystate, CopyModel) { auto modelA = amici::generic_model::getModel(); auto modelB = std::unique_ptr(modelA->clone()); - CHECK_TRUE(*modelA == *modelB); + ASSERT_EQ(*modelA, *modelB); } -TEST(groupSteadystate, testCloneModel) +TEST(ExampleSteadystate, CloneModel) { auto modelA = amici::generic_model::getModel(); - auto modelB = std::unique_ptr( - new amici::model_model_steadystate::Model_model_steadystate()); + auto modelB = std::make_unique< + amici::model_model_steadystate::Model_model_steadystate>(); - CHECK_TRUE(*modelA == *modelB); + ASSERT_EQ(*modelA, *modelB); } -TEST(groupSteadystate, testExpDataFromReturnData) +TEST(ExampleSteadystate, ExpDataFromReturnData) { auto model = amici::generic_model::getModel(); auto solver = model->getSolver(); @@ -76,7 +73,7 @@ TEST(groupSteadystate, testExpDataFromReturnData) runAmiciSimulation(*solver, &edata, *model); } -TEST(groupSteadystate, testReuseSolver) +TEST(ExampleSteadystate, ReuseSolver) { auto model = amici::generic_model::getModel(); auto solver = model->getSolver(); @@ -90,7 +87,7 @@ TEST(groupSteadystate, testReuseSolver) runAmiciSimulation(*solver, nullptr, *model); } -TEST(groupSteadystate, testRethrow) +TEST(ExampleSteadystate, Rethrow) { auto model = amici::generic_model::getModel(); auto solver = model->getSolver(); @@ -109,12 +106,12 @@ TEST(groupSteadystate, testRethrow) runAmiciSimulation(*solver, nullptr, *model); // must throw - CHECK_THROWS(amici::IntegrationFailure, - runAmiciSimulation(*solver, nullptr, *model, true)); + ASSERT_THROW(runAmiciSimulation(*solver, nullptr, *model, true), + amici::IntegrationFailure); } -TEST(groupSteadystate, testMaxtime) +TEST(ExampleSteadystate, Maxtime) { auto model = amici::generic_model::getModel(); auto solver = model->getSolver(); @@ -125,100 +122,100 @@ TEST(groupSteadystate, testMaxtime) NEW_OPTION_FILE, *solver, "/model_steadystate/nosensi/options"); auto rdata = runAmiciSimulation(*solver, nullptr, *model); - CHECK_EQUAL(amici::AMICI_SUCCESS, rdata->status); + ASSERT_EQ(amici::AMICI_SUCCESS, rdata->status); solver->setMaxTime(0.000001); // must throw rdata = runAmiciSimulation(*solver, nullptr, *model); - CHECK_EQUAL(amici::AMICI_MAX_TIME_EXCEEDED, rdata->status); + ASSERT_EQ(amici::AMICI_MAX_TIME_EXCEEDED, rdata->status); } -TEST(groupSteadystate, testInitialStatesNonEmpty) +TEST(ExampleSteadystate, InitialStatesNonEmpty) { auto model = amici::generic_model::getModel(); - CHECK_FALSE(model->getInitialStates().empty()); + ASSERT_FALSE(model->getInitialStates().empty()); } -TEST(groupSteadystate, testInitialStateSensitivitiesNonEmpty) +TEST(ExampleSteadystate, InitialStateSensitivitiesNonEmpty) { auto model = amici::generic_model::getModel(); - CHECK_FALSE(model->getInitialStateSensitivities().empty()); + ASSERT_FALSE(model->getInitialStateSensitivities().empty()); } -TEST(groupSteadystate, testSimulation) +TEST(ExampleSteadystate, Simulation) { amici::simulateVerifyWrite( "/model_steadystate/nosensi/", 100 * TEST_ATOL, 100 * TEST_RTOL); } -TEST(groupSteadystate, testSensitivityForward) +TEST(ExampleSteadystate, SensitivityForward) { amici::simulateVerifyWrite("/model_steadystate/sensiforward/"); } -TEST(groupSteadystate, testSensitivityForwardPlist) +TEST(ExampleSteadystate, SensitivityForwardPlist) { amici::simulateVerifyWrite("/model_steadystate/sensiforwardplist/"); } -TEST(groupSteadystate, testSensitivityForwardErrorInt) +TEST(ExampleSteadystate, SensitivityForwardErrorInt) { amici::simulateVerifyWrite("/model_steadystate/sensiforwarderrorint/"); } -TEST(groupSteadystate, testSensitivityForwardErrorNewt) +TEST(ExampleSteadystate, SensitivityForwardErrorNewt) { amici::simulateVerifyWrite("/model_steadystate/sensiforwarderrornewt/"); } -TEST(groupSteadystate, testSensitivityForwardDense) +TEST(ExampleSteadystate, SensitivityForwardDense) { amici::simulateVerifyWrite("/model_steadystate/sensiforwarddense/"); } -TEST(groupSteadystate, testSensitivityForwardSPBCG) +TEST(ExampleSteadystate, SensitivityForwardSPBCG) { amici::simulateVerifyWrite( "/model_steadystate/nosensiSPBCG/", 10 * TEST_ATOL, 10 * TEST_RTOL); } -TEST(groupSteadystate, testSensiFwdNewtonPreeq) +TEST(ExampleSteadystate, SensiFwdNewtonPreeq) { amici::simulateVerifyWrite("/model_steadystate/sensifwdnewtonpreeq/"); } -TEST(groupSteadystate, testSensiAdjNewtonPreeq) +TEST(ExampleSteadystate, SensiAdjNewtonPreeq) { amici::simulateVerifyWrite("/model_steadystate/sensiadjnewtonpreeq/"); } -TEST(groupSteadystate, testSensiFwdSimPreeq) +TEST(ExampleSteadystate, SensiFwdSimPreeq) { amici::simulateVerifyWrite("/model_steadystate/sensifwdsimpreeq/"); } -TEST(groupSteadystate, testSensiFwdSimPreeqFSA) +TEST(ExampleSteadystate, SensiFwdSimPreeqFSA) { amici::simulateVerifyWrite("/model_steadystate/sensifwdsimpreeqFSA/"); } -TEST(groupSteadystate, testSensiAdjSimPreeq) +TEST(ExampleSteadystate, SensiAdjSimPreeq) { amici::simulateVerifyWrite("/model_steadystate/sensiadjsimpreeq/"); } -TEST(groupSteadystate, testSensiAdjSimPreeqFSA) +TEST(ExampleSteadystate, SensiAdjSimPreeqFSA) { amici::simulateVerifyWrite("/model_steadystate/sensiadjsimpreeqFSA/"); } -TEST(groupSteadystate, testSensiFwdByhandPreeq) +TEST(ExampleSteadystate, SensiFwdByhandPreeq) { amici::simulateVerifyWrite("/model_steadystate/sensifwdbyhandpreeq/"); } -TEST(groupSteadystate, testSensiAdjByhandPreeq) +TEST(ExampleSteadystate, SensiAdjByhandPreeq) { amici::simulateVerifyWrite("/model_steadystate/sensiadjbyhandpreeq/"); } diff --git a/deps/AMICI/tests/cpputest/testOptions.h5 b/deps/AMICI/tests/cpp/testOptions.h5 similarity index 100% rename from deps/AMICI/tests/cpputest/testOptions.h5 rename to deps/AMICI/tests/cpp/testOptions.h5 diff --git a/deps/AMICI/tests/cpputest/testfunctions.cpp b/deps/AMICI/tests/cpp/testfunctions.cpp similarity index 91% rename from deps/AMICI/tests/cpputest/testfunctions.cpp rename to deps/AMICI/tests/cpp/testfunctions.cpp index 382e7fa43..3e03eedfc 100644 --- a/deps/AMICI/tests/cpputest/testfunctions.cpp +++ b/deps/AMICI/tests/cpp/testfunctions.cpp @@ -9,8 +9,7 @@ #include #include -#include -#include +#include "gtest/gtest.h" namespace amici { @@ -120,12 +119,12 @@ bool withinTolerance(double expected, double actual, double atol, double rtol, i void checkEqualArray(std::vector const& expected, std::vector const& actual, double atol, double rtol, std::string const& name) { - CHECK_EQUAL(expected.size(), actual.size()); + ASSERT_EQ(expected.size(), actual.size()); for(int i = 0; (unsigned) i < expected.size(); ++i) { bool withinTol = withinTolerance(expected[i], actual[i], atol, rtol, i, name.c_str()); - CHECK_TRUE(withinTol); + ASSERT_TRUE(withinTol); } } @@ -135,12 +134,12 @@ void checkEqualArray(const double *expected, const double *actual, const int len if(!length) return; - CHECK_TRUE(expected && actual); + ASSERT_TRUE(expected && actual); for(int i = 0; i < length; ++i) { bool withinTol = withinTolerance(expected[i], actual[i], atol, rtol, i, name); - CHECK_TRUE(withinTol); + ASSERT_TRUE(withinTol); } } @@ -148,19 +147,19 @@ void checkEqualArrayStrided(const double *expected, const double *actual, int le if(!expected && !actual) return; - CHECK_TRUE(expected && actual); + ASSERT_TRUE(expected && actual); for(int i = 0; i < length; ++i) { bool withinTol = withinTolerance(expected[i * strideExpected], actual[i * strideActual], atol, rtol, i, name); - CHECK_TRUE(withinTol); + ASSERT_TRUE(withinTol); } } void verifyReturnData(std::string const& hdffile, std::string const& resultPath, const ReturnData *rdata, const Model *model, double atol, double rtol) { - CHECK_FALSE(rdata == nullptr); + ASSERT_FALSE(rdata == nullptr); if(!hdf5::locationExists(hdffile, resultPath)) { fprintf(stderr, "ERROR: No results available for %s!\n", @@ -176,19 +175,19 @@ void verifyReturnData(std::string const& hdffile, std::string const& resultPath, std::vector expected; auto statusExp = hdf5::getIntScalarAttribute(file, resultPath, "status"); - CHECK_EQUAL(statusExp, rdata->status); + ASSERT_EQ(statusExp, rdata->status); double llhExp = hdf5::getDoubleScalarAttribute(file, resultPath, "llh"); - CHECK_TRUE(withinTolerance(llhExp, rdata->llh, atol, rtol, 1, "llh")); + ASSERT_TRUE(withinTolerance(llhExp, rdata->llh, atol, rtol, 1, "llh")); double chi2Exp = hdf5::getDoubleScalarAttribute(file, resultPath, "chi2"); - CHECK_TRUE(withinTolerance(chi2Exp, rdata->chi2, atol, rtol, 1, "chi2")); + ASSERT_TRUE(withinTolerance(chi2Exp, rdata->chi2, atol, rtol, 1, "chi2")); if(hdf5::locationExists(file, resultPath + "/x")) { expected = hdf5::getDoubleDataset2D(file, resultPath + "/x", m, n); checkEqualArray(expected, rdata->x, atol, rtol, "x"); } else { - CHECK_TRUE(rdata->x.empty()); + ASSERT_TRUE(rdata->x.empty()); } // CHECK_EQUAL(AMICI_O2MODE_FULL, udata->o2mode); @@ -197,42 +196,42 @@ void verifyReturnData(std::string const& hdffile, std::string const& resultPath, expected = hdf5::getDoubleDataset2D(file, resultPath + "/diagnosis/J", m, n); checkEqualArray(expected, rdata->J, atol, rtol, "J"); } else { - CHECK_TRUE(rdata->J.empty()); + ASSERT_TRUE(rdata->J.empty()); } if(hdf5::locationExists(file, resultPath + "/y")) { expected = hdf5::getDoubleDataset2D(file, resultPath + "/y", m, n); checkEqualArray(expected, rdata->y, atol, rtol, "y"); } else { - CHECK_TRUE(rdata->y.empty()); + ASSERT_TRUE(rdata->y.empty()); } if(hdf5::locationExists(file, resultPath + "/res")) { expected = hdf5::getDoubleDataset1D(file, resultPath + "/res"); checkEqualArray(expected, rdata->res, atol, rtol, "res"); } else { - CHECK_TRUE(rdata->res.empty()); + ASSERT_TRUE(rdata->res.empty()); } if(hdf5::locationExists(file, resultPath + "/z")) { expected = hdf5::getDoubleDataset2D(file, resultPath + "/z", m, n); checkEqualArray(expected, rdata->z, atol, rtol, "z"); } else { - CHECK_TRUE(rdata->z.empty()); + ASSERT_TRUE(rdata->z.empty()); } if(hdf5::locationExists(file, resultPath + "/rz")) { expected = hdf5::getDoubleDataset2D(file, resultPath + "/rz", m, n); checkEqualArray(expected, rdata->rz, atol, rtol, "rz"); } else { - CHECK_TRUE(rdata->rz.empty()); + ASSERT_TRUE(rdata->rz.empty()); } if(hdf5::locationExists(file, resultPath + "/sigmaz")) { expected = hdf5::getDoubleDataset2D(file, resultPath + "/sigmaz", m, n); checkEqualArray(expected, rdata->sigmaz, atol, rtol, "sigmaz"); } else { - CHECK_TRUE(rdata->sigmaz.empty()); + ASSERT_TRUE(rdata->sigmaz.empty()); } expected = hdf5::getDoubleDataset1D(file, resultPath + "/diagnosis/xdot"); @@ -244,8 +243,8 @@ void verifyReturnData(std::string const& hdffile, std::string const& resultPath, if(rdata->sensi >= SensitivityOrder::first) { verifyReturnDataSensitivities(file, resultPath, rdata, model, atol, rtol); } else { - CHECK_EQUAL(0, rdata->sllh.size()); - CHECK_EQUAL(0, rdata->s2llh.size()); + ASSERT_EQ(0, rdata->sllh.size()); + ASSERT_EQ(0, rdata->s2llh.size()); } } @@ -257,7 +256,7 @@ void verifyReturnDataSensitivities(H5::H5File const& file, std::string const& re expected = hdf5::getDoubleDataset1D(file, resultPath + "/sllh"); checkEqualArray(expected, rdata->sllh, atol, rtol, "sllh"); } else { - CHECK_TRUE(rdata->sllh.empty()); + ASSERT_TRUE(rdata->sllh.empty()); } if(rdata->sensi_meth == SensitivityMethod::forward) { @@ -266,21 +265,21 @@ void verifyReturnDataSensitivities(H5::H5File const& file, std::string const& re expected = hdf5::getDoubleDataset2D(file, resultPath + "/sx0", m, n); checkEqualArray(expected, rdata->sx0, atol, rtol, "sx0"); } else { - CHECK_TRUE(rdata->sx0.empty()); + ASSERT_TRUE(rdata->sx0.empty()); } if(hdf5::locationExists(file, resultPath + "/sres")) { expected = hdf5::getDoubleDataset2D(file, resultPath + "/sres", m, n); checkEqualArray(expected, rdata->sres, atol, rtol, "sres"); } else { - CHECK_TRUE(rdata->sres.empty()); + ASSERT_TRUE(rdata->sres.empty()); } if(hdf5::locationExists(file, resultPath + "/FIM")) { expected = hdf5::getDoubleDataset2D(file, resultPath + "/FIM", m, n); checkEqualArray(expected, rdata->FIM, atol, rtol, "FIM"); } else { - CHECK_TRUE(rdata->FIM.empty()); + ASSERT_TRUE(rdata->FIM.empty()); } @@ -295,7 +294,7 @@ void verifyReturnDataSensitivities(H5::H5File const& file, std::string const& re &rdata->sx[ip * model->nt() * rdata->nx], model->nt() * model->nxtrue_rdata, atol, rtol, "sx"); } else { - CHECK_TRUE(rdata->sx.empty()); + ASSERT_TRUE(rdata->sx.empty()); } if(hdf5::locationExists(file, resultPath + "/sy")) { @@ -305,7 +304,7 @@ void verifyReturnDataSensitivities(H5::H5File const& file, std::string const& re &rdata->sy[ip * model->nt() * model->ny], model->nt() * model->nytrue, atol, rtol, "sy"); } else { - CHECK_TRUE(rdata->sy.empty()); + ASSERT_TRUE(rdata->sy.empty()); } @@ -349,8 +348,8 @@ void verifyReturnDataSensitivities(H5::H5File const& file, std::string const& re expected = hdf5::getDoubleDataset2D(file, resultPath + "/s2llh", m, n); checkEqualArray(expected, rdata->s2llh, atol, rtol, "s2llh"); } else { - CHECK_EQUAL(0, rdata->s2llh.size()); - CHECK_EQUAL(0, rdata->s2rz.size()); + ASSERT_EQ(0, rdata->s2llh.size()); + ASSERT_EQ(0, rdata->s2rz.size()); } } diff --git a/deps/AMICI/tests/cpputest/testfunctions.h b/deps/AMICI/tests/cpp/testfunctions.h similarity index 98% rename from deps/AMICI/tests/cpputest/testfunctions.h rename to deps/AMICI/tests/cpp/testfunctions.h index 2cb465822..2d1d0f732 100644 --- a/deps/AMICI/tests/cpputest/testfunctions.h +++ b/deps/AMICI/tests/cpp/testfunctions.h @@ -10,12 +10,9 @@ #include #endif -#include // make std::ostringstream available (needs to come before TestHarness.h) +#include #include -#include -#include - namespace amici { class ReturnData; diff --git a/deps/AMICI/tests/cpp/unittests/CMakeLists.txt b/deps/AMICI/tests/cpp/unittests/CMakeLists.txt new file mode 100644 index 000000000..5e1f7e68a --- /dev/null +++ b/deps/AMICI/tests/cpp/unittests/CMakeLists.txt @@ -0,0 +1,27 @@ +project(unittests) + +find_package(Boost COMPONENTS serialization) + +set(SRC_LIST + testMisc.cpp + testExpData.cpp +) + +add_executable(${PROJECT_NAME} ${SRC_LIST}) + +target_include_directories(${PROJECT_NAME} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) + +if(Boost_FOUND) + target_sources(${PROJECT_NAME} PRIVATE testSerialization.cpp) + target_include_directories(${PROJECT_NAME} PRIVATE "${Boost_INCLUDE_DIR}") +endif() +target_link_libraries(${PROJECT_NAME} + amici-testing + Upstream::amici + ${Boost_LIBRARIES} + gtest_main + ) + +include(GoogleTest) + +gtest_discover_tests(${PROJECT_NAME}) diff --git a/deps/AMICI/tests/cpp/unittests/testExpData.cpp b/deps/AMICI/tests/cpp/unittests/testExpData.cpp new file mode 100644 index 000000000..4cf947341 --- /dev/null +++ b/deps/AMICI/tests/cpp/unittests/testExpData.cpp @@ -0,0 +1,328 @@ +#include "testfunctions.h" + +#include +#include +#include + +#include +#include +#include +#include + +#include + +namespace amici { +namespace generic_model { +std::unique_ptr getModel(); +} // namespace generic_model +} // namespace amici + +using namespace amici; + +namespace { + +class ExpDataTest : public ::testing::Test { + protected: + void SetUp() override { + model->setTimepoints(timepoints); + model->setNMaxEvent(nmaxevent); + testModel.setTimepoints(timepoints); + testModel.setNMaxEvent(nmaxevent); + } + + int nx = 1, ny = 2, nz = 3, nmaxevent = 4; + std::vector timepoints = { 1, 2, 3, 4 }; + + std::unique_ptr model = generic_model::getModel(); + + Model_Test testModel = Model_Test( + ModelDimensions( + nx, // nx_rdata + nx, // nxtrue_rdata + nx, // nx_solver + nx, // nxtrue_solver + 0, // nx_solver_reinit + 1, // np + 3, // nk + ny, // ny + ny, // nytrue + nz, // nz + nz, // nztrue + nmaxevent, // ne + 0, // nJ + 0, // nw + 0, // ndwdx + 0, // ndwdp + 0, // dwdw + 0, // ndxdotdw + {}, // ndJydy + 0, // nnz + 0, // ubw + 0 // lbw + ), + SimulationParameters( + std::vector(3, 0.0), + std::vector(1, 0.0), + std::vector(2, 1) + ), + SecondOrderMode::none, + std::vector(), + std::vector()); +}; + +TEST_F(ExpDataTest, DefaultConstructable) +{ + ExpData edata{}; + ASSERT_EQ(edata.nytrue(), 0); + ASSERT_EQ(edata.nztrue(), 0); + ASSERT_EQ(edata.nmaxevent(), 0); +} +TEST_F(ExpDataTest, ModelCtor) +{ + ExpData edata(model->nytrue, model->nztrue, model->nMaxEvent()); + ASSERT_EQ(edata.nytrue(), model->nytrue); + ASSERT_EQ(edata.nztrue(), model->nztrue); + ASSERT_EQ(edata.nmaxevent(), model->nMaxEvent()); +} + +TEST_F(ExpDataTest, DimensionCtor) +{ + ExpData edata(model->nytrue, model->nztrue, model->nMaxEvent(), timepoints); + ASSERT_EQ(edata.nytrue(), model->nytrue); + ASSERT_EQ(edata.nztrue(), model->nztrue); + ASSERT_EQ(edata.nmaxevent(), model->nMaxEvent()); + ASSERT_EQ(edata.nt(), model->nt()); + checkEqualArray( + timepoints, edata.getTimepoints(), TEST_ATOL, TEST_RTOL, "ts"); +} + +TEST_F(ExpDataTest, MeasurementCtor) +{ + std::vector y(ny * timepoints.size(), 0.0); + std::vector y_std(ny * timepoints.size(), 0.1); + std::vector z(nz * nmaxevent, 0.0); + std::vector z_std(nz * nmaxevent, 0.1); + + ExpData edata(testModel.nytrue, + testModel.nztrue, + testModel.nMaxEvent(), + timepoints, + y, + y_std, + z, + z_std); + ASSERT_EQ(edata.nytrue(), testModel.nytrue); + ASSERT_EQ(edata.nztrue(), testModel.nztrue); + ASSERT_EQ(edata.nmaxevent(), testModel.nMaxEvent()); + ASSERT_EQ(edata.nt(), testModel.nt()); + checkEqualArray( + timepoints, edata.getTimepoints(), TEST_ATOL, TEST_RTOL, "ts"); + checkEqualArray( + y, edata.getObservedData(), TEST_ATOL, TEST_RTOL, "observedData"); + checkEqualArray(y_std, + edata.getObservedDataStdDev(), + TEST_ATOL, + TEST_RTOL, + "observedDataStdDev"); + checkEqualArray( + z, edata.getObservedEvents(), TEST_ATOL, TEST_RTOL, "observedEvents"); + checkEqualArray(z_std, + edata.getObservedEventsStdDev(), + TEST_ATOL, + TEST_RTOL, + "observedEventsStdDev"); + + ExpData edata_copy(edata); + ASSERT_EQ(edata.nytrue(), edata_copy.nytrue()); + ASSERT_EQ(edata.nztrue(), edata_copy.nztrue()); + ASSERT_EQ(edata.nmaxevent(), edata_copy.nmaxevent()); + ASSERT_EQ(edata.nt(), edata_copy.nt()); + checkEqualArray(edata_copy.getTimepoints(), + edata.getTimepoints(), + TEST_ATOL, + TEST_RTOL, + "ts"); + checkEqualArray(edata_copy.getObservedData(), + edata.getObservedData(), + TEST_ATOL, + TEST_RTOL, + "observedData"); + checkEqualArray(edata_copy.getObservedDataStdDev(), + edata.getObservedDataStdDev(), + TEST_ATOL, + TEST_RTOL, + "observedDataStdDev"); + checkEqualArray(edata_copy.getObservedEvents(), + edata.getObservedEvents(), + TEST_ATOL, + TEST_RTOL, + "observedEvents"); + checkEqualArray(edata_copy.getObservedEventsStdDev(), + edata.getObservedEventsStdDev(), + TEST_ATOL, + TEST_RTOL, + "observedEventsStdDev"); +} + +TEST_F(ExpDataTest, CopyConstructable) +{ + testModel.setTimepoints(timepoints); + auto edata = ExpData(testModel); + ASSERT_EQ(edata.nytrue(), testModel.nytrue); + ASSERT_EQ(edata.nztrue(), testModel.nztrue); + ASSERT_EQ(edata.nmaxevent(), testModel.nMaxEvent()); + ASSERT_EQ(edata.nt(), testModel.nt()); + checkEqualArray(testModel.getTimepoints(), + edata.getTimepoints(), + TEST_ATOL, + TEST_RTOL, + "ts"); +} + +TEST_F(ExpDataTest, DimensionChecks) +{ + std::vector bad_std(ny, -0.1); + std::vector y(ny * timepoints.size(), 0.0); + std::vector y_std(ny * timepoints.size(), 0.1); + std::vector z(nz * nmaxevent, 0.0); + std::vector z_std(nz * nmaxevent, 0.1); + + ASSERT_THROW(ExpData(testModel.nytrue, + testModel.nztrue, + testModel.nMaxEvent(), + timepoints, + z, + z_std, + z, + z_std), + AmiException); + + ASSERT_THROW(ExpData(testModel.nytrue, + testModel.nztrue, + testModel.nMaxEvent(), + timepoints, + z, + bad_std, + z, + z_std), + AmiException); + + ExpData edata(testModel); + + std::vector bad_y(ny * timepoints.size() + 1, 0.0); + std::vector bad_y_std(ny * timepoints.size() + 1, 0.1); + std::vector bad_z(nz * nmaxevent + 1, 0.0); + std::vector bad_z_std(nz * nmaxevent + 1, 0.1); + + ASSERT_THROW(edata.setObservedData(bad_y), AmiException); + ASSERT_THROW(edata.setObservedDataStdDev(bad_y_std), AmiException); + ASSERT_THROW(edata.setObservedEvents(bad_z), AmiException); + ASSERT_THROW(edata.setObservedEventsStdDev(bad_y_std), AmiException); + + std::vector bad_single_y(edata.nt() + 1, 0.0); + std::vector bad_single_y_std(edata.nt() + 1, 0.1); + std::vector bad_single_z(edata.nmaxevent() + 1, 0.0); + std::vector bad_single_z_std(edata.nmaxevent() + 1, 0.1); + + ASSERT_THROW(edata.setObservedData(bad_single_y, 0), + AmiException); + ASSERT_THROW(edata.setObservedDataStdDev(bad_single_y_std, 0), + AmiException); + ASSERT_THROW(edata.setObservedEvents(bad_single_z, 0), + AmiException); + ASSERT_THROW(edata.setObservedEventsStdDev(bad_single_y_std, 0), + AmiException); + + ASSERT_THROW(edata.setTimepoints(std::vector{ 0.0, 1.0, 0.5 }), + AmiException); +} + +TEST_F(ExpDataTest, SettersGetters) +{ + ExpData edata(testModel); + + std::vector y(ny * timepoints.size(), 0.0); + std::vector y_std(ny * timepoints.size(), 0.1); + std::vector z(nz * nmaxevent, 0.0); + std::vector z_std(nz * nmaxevent, 0.1); + + edata.setObservedData(y); + checkEqualArray( + edata.getObservedData(), y, TEST_ATOL, TEST_RTOL, "ObservedData"); + edata.setObservedDataStdDev(y_std); + checkEqualArray(edata.getObservedDataStdDev(), + y_std, + TEST_ATOL, + TEST_RTOL, + "ObservedDataStdDev"); + edata.setObservedEvents(z); + checkEqualArray( + edata.getObservedEvents(), z, TEST_ATOL, TEST_RTOL, "ObservedEvents"); + edata.setObservedEventsStdDev(z_std); + checkEqualArray(edata.getObservedEventsStdDev(), + z_std, + TEST_ATOL, + TEST_RTOL, + "ObservedEventsStdDev"); + + std::vector single_y(edata.nt(), 0.0); + std::vector single_y_std(edata.nt(), 0.1); + + for (int iy = 0; iy < ny; ++iy) { + edata.setObservedData(single_y, iy); + edata.setObservedDataStdDev(single_y_std, iy); + } + ASSERT_THROW(edata.setObservedData(single_y, ny), std::exception); + ASSERT_THROW(edata.setObservedData(single_y, -1), std::exception); + ASSERT_THROW(edata.setObservedDataStdDev(single_y_std, ny), std::exception); + ASSERT_THROW(edata.setObservedDataStdDev(single_y_std, -1), std::exception); + + std::vector single_z(edata.nmaxevent(), 0.0); + std::vector single_z_std(edata.nmaxevent(), 0.1); + + for (int iz = 0; iz < nz; ++iz) { + edata.setObservedEvents(single_z, iz); + edata.setObservedEventsStdDev(single_z_std, iz); + } + + ASSERT_THROW(edata.setObservedEvents(single_z, nz), std::exception); + ASSERT_THROW(edata.setObservedEvents(single_z, -1), std::exception); + ASSERT_THROW(edata.setObservedEventsStdDev(single_z_std, nz), + std::exception); + ASSERT_THROW(edata.setObservedEventsStdDev(single_z_std, -1), + std::exception); + + ASSERT_TRUE(edata.getObservedDataPtr(0)); + ASSERT_TRUE(edata.getObservedDataStdDevPtr(0)); + ASSERT_TRUE(edata.getObservedEventsPtr(0)); + ASSERT_TRUE(edata.getObservedEventsStdDevPtr(0)); + + std::vector empty(0, 0.0); + + edata.setObservedData(empty); + edata.setObservedDataStdDev(empty); + edata.setObservedEvents(empty); + edata.setObservedEventsStdDev(empty); + + ASSERT_TRUE(!edata.getObservedDataPtr(0)); + ASSERT_TRUE(!edata.getObservedDataStdDevPtr(0)); + ASSERT_TRUE(!edata.getObservedEventsPtr(0)); + ASSERT_TRUE(!edata.getObservedEventsStdDevPtr(0)); + + checkEqualArray( + edata.getObservedData(), empty, TEST_ATOL, TEST_RTOL, "ObservedData"); + checkEqualArray(edata.getObservedDataStdDev(), + empty, + TEST_ATOL, + TEST_RTOL, + "ObservedDataStdDev"); + checkEqualArray( + edata.getObservedEvents(), empty, TEST_ATOL, TEST_RTOL, "ObservedEvents"); + checkEqualArray(edata.getObservedEventsStdDev(), + empty, + TEST_ATOL, + TEST_RTOL, + "ObservedEventsStdDev"); +} + +} // namespace diff --git a/deps/AMICI/tests/cpp/unittests/testMisc.cpp b/deps/AMICI/tests/cpp/unittests/testMisc.cpp new file mode 100644 index 000000000..4106742db --- /dev/null +++ b/deps/AMICI/tests/cpp/unittests/testMisc.cpp @@ -0,0 +1,597 @@ +#include "testfunctions.h" + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +namespace amici { +namespace generic_model { + +std::unique_ptr getModel() +{ + return std::make_unique(); +} + +} // namespace generic_model +} // namespace amici + +using namespace amici; + +namespace { + +void +testSolverGetterSetters(CVodeSolver solver, + SensitivityMethod sensi_meth, + SensitivityOrder sensi, + InternalSensitivityMethod ism, + InterpolationType interp, + NonlinearSolverIteration iter, + LinearMultistepMethod lmm, + int steps, + int badsteps, + double tol, + double badtol); + +class ModelTest : public ::testing::Test { + protected: + int nx = 1, ny = 2, nz = 3, nmaxevent = 4; + std::vector p{ 1.0 }; + std::vector k{ 0.5, 0.4, 0.7 }; + std::vector plist{ 1 }; + std::vector idlist{ 0 }; + std::vector z2event{ 0, 0, 0 }; + Model_Test model = Model_Test( + ModelDimensions( + nx, // nx_rdata + nx, // nxtrue_rdata + nx, // nx_solver + nx, // nxtrue_solver + 0, // nx_solver_reinit + static_cast(p.size()), // np + static_cast(k.size()), // nk + ny, // ny + ny, // nytrue + nz, // nz + nz, // nztrue + nmaxevent, // ne + 0, // nJ + 0, // nw + 0, // ndwdx + 0, // ndwdp + 0, // dwdw + 0, // ndxdotdw + {}, // ndJydy + 0, // nnz + 0, // ubw + 0 // lbw + ), + SimulationParameters(k, p, plist), + SecondOrderMode::none, + idlist, + z2event); + std::vector unscaled{ NAN }; +}; + + +TEST_F(ModelTest, LinScaledParameterIsNotTransformed) +{ + model.setParameterScale(ParameterScaling::none); + + ASSERT_EQ(p[0], model.getParameters()[0]); +} + +TEST_F(ModelTest, LogScaledParameterIsTransformed) +{ + model.setParameterScale(ParameterScaling::ln); + + ASSERT_NEAR(std::log(p[0]), model.getParameters()[0], 1e-16); +} + +TEST_F(ModelTest, Log10ScaledParameterIsTransformed) +{ + model.setParameterScale(ParameterScaling::log10); + + ASSERT_NEAR(std::log10(p[0]), model.getParameters()[0], 1e-16); +} + +TEST_F(ModelTest, ParameterScaleTooShort) +{ + std::vector pscale(p.size() - 1, + ParameterScaling::log10); + ASSERT_THROW(model.setParameterScale(pscale), AmiException); + +} + +TEST_F(ModelTest, ParameterScaleTooLong) +{ + std::vector pscale (p.size() + 1, + ParameterScaling::log10); + ASSERT_THROW(model.setParameterScale(pscale), AmiException); +} + +TEST_F(ModelTest, UnsortedTimepointsThrow){ + ASSERT_THROW(model.setTimepoints(std::vector{ 0.0, 1.0, 0.5 }), + AmiException); +} + +TEST_F(ModelTest, ParameterNameIdGetterSetter) +{ + model.setParameterById("p0", 3.0); + ASSERT_NEAR(model.getParameterById("p0"), 3.0, 1e-16); + ASSERT_THROW(model.getParameterById("p1"), AmiException); + ASSERT_NEAR( + model.setParametersByIdRegex("p[\\d]+", 5.0), p.size(), 1e-16); + for (const auto& ip : model.getParameters()) + ASSERT_NEAR(ip, 5.0, 1e-16); + ASSERT_THROW(model.setParametersByIdRegex("k[\\d]+", 5.0), AmiException); + + model.setParameterByName("p0", 3.0); + ASSERT_NEAR(model.getParameterByName("p0"), 3.0, 1e-16); + ASSERT_THROW(model.getParameterByName("p1"), AmiException); + ASSERT_NEAR( + model.setParametersByNameRegex("p[\\d]+", 5.0), p.size(), 1e-16); + for (const auto& ip : model.getParameters()) + ASSERT_NEAR(ip, 5.0, 1e-16); + ASSERT_THROW(model.setParametersByNameRegex("k[\\d]+", 5.0), AmiException); + + model.setFixedParameterById("k0", 3.0); + ASSERT_NEAR(model.getFixedParameterById("k0"), 3.0, 1e-16); + ASSERT_THROW(model.getFixedParameterById("k4"), AmiException); + ASSERT_NEAR( + model.setFixedParametersByIdRegex("k[\\d]+", 5.0), k.size(), 1e-16); + for (const auto& ik : model.getFixedParameters()) + ASSERT_NEAR(ik, 5.0, 1e-16); + ASSERT_THROW(model.setFixedParametersByIdRegex("p[\\d]+", 5.0), AmiException); + + model.setFixedParameterByName("k0", 3.0); + ASSERT_NEAR(model.getFixedParameterByName("k0"), 3.0, 1e-16); + ASSERT_THROW(model.getFixedParameterByName("k4"), AmiException); + ASSERT_NEAR( + model.setFixedParametersByNameRegex("k[\\d]+", 5.0), k.size(), 1e-16); + for (const auto& ik : model.getFixedParameters()) + ASSERT_NEAR(ik, 5.0, 1e-16); + ASSERT_THROW(model.setFixedParametersByNameRegex("p[\\d]+", 5.0), + AmiException); +} + +TEST_F(ModelTest, ReinitializeFixedParameterInitialStates) +{ + ASSERT_THROW(model.setReinitializeFixedParameterInitialStates(true), + AmiException); + model.setReinitializeFixedParameterInitialStates(false); + ASSERT_TRUE(!model.getReinitializeFixedParameterInitialStates()); + AmiVector x(nx); + AmiVectorArray sx(model.np(), nx); +} + +TEST(SymbolicFunctionsTest, Sign) +{ + ASSERT_EQ(-1, sign(-2)); + ASSERT_EQ(0, sign(0)); + ASSERT_EQ(1, sign(2)); +} + +TEST(SymbolicFunctionsTest, Heaviside) +{ + ASSERT_EQ(0, heaviside(-1)); + ASSERT_EQ(1, heaviside(0)); + ASSERT_EQ(1, heaviside(1)); +} + +TEST(SymbolicFunctionsTest, Min) +{ + ASSERT_EQ(-1, min(-1, 2, 0)); + ASSERT_EQ(-2, min(1, -2, 0)); + ASSERT_TRUE(isNaN(min(getNaN(), getNaN(), 0))); + ASSERT_EQ(-1, min(-1, getNaN(), 0)); + ASSERT_EQ(-1, min(getNaN(), -1, 0)); +} + +TEST(SymbolicFunctionsTest, Max) +{ + ASSERT_EQ(2, max(-1, 2, 0)); + ASSERT_EQ(1, max(1, -2, 0)); + ASSERT_TRUE(isNaN(max(getNaN(), getNaN(), 0))); + ASSERT_EQ(-1, max(-1, getNaN(), 0)); + ASSERT_EQ(-1, max(getNaN(), -1, 0)); +} + +TEST(SymbolicFunctionsTest, DMin) +{ + ASSERT_EQ(0, Dmin(1, -1, -2, 0)); + ASSERT_EQ(1, Dmin(1, -1, 2, 0)); + ASSERT_EQ(1, Dmin(2, -1, -2, 0)); + ASSERT_EQ(0, Dmin(2, -1, 2, 0)); +} + +TEST(SymbolicFunctionsTest, DMax) +{ + ASSERT_EQ(1, Dmax(1, -1, -2, 0)); + ASSERT_EQ(0, Dmax(1, -1, 2, 0)); + ASSERT_EQ(0, Dmax(2, -1, -2, 0)); + ASSERT_EQ(1, Dmax(2, -1, 2, 0)); +} + +TEST(SymbolicFunctionsTest, pos_pow) +{ + ASSERT_EQ(0, pos_pow(-0.1, 3)); + ASSERT_EQ(pow(0.1, 3), pos_pow(0.1, 3)); +} + +TEST(SolverTestBasic, Equality) +{ + IDASolver i1, i2; + CVodeSolver c1, c2; + + ASSERT_EQ(i1, i2); + ASSERT_EQ(c1, c2); + ASSERT_FALSE(i1 == c1); +} + +TEST(SolverTestBasic, Clone) +{ + IDASolver i1; + std::unique_ptr i2(i1.clone()); + ASSERT_EQ(i1, *i2); + + CVodeSolver c1; + std::unique_ptr c2(c1.clone()); + ASSERT_TRUE(c1 == *c2); + ASSERT_FALSE(*i2 == *c2); +} + +TEST(SolverIdasTest, DefaultConstructableAndNotLeaky) +{ + IDASolver solver; +} + + +class SolverTest : public ::testing::Test { + protected: + void SetUp() override { + tol = 0.01; + badtol = -0.01; + sensi_meth = SensitivityMethod::adjoint; + sensi = SensitivityOrder::first; + steps = 1000; + badsteps = -1; + lmm = LinearMultistepMethod::adams; + iter = NonlinearSolverIteration::fixedpoint; + ism = InternalSensitivityMethod::staggered1; + interp = InterpolationType::polynomial; + } + + int nx = 1, ny = 2, nz = 3, ne = 0; + double tol, badtol; + std::vector timepoints = { 1, 2, 3, 4 }; + + std::unique_ptr model = generic_model::getModel(); + SensitivityMethod sensi_meth; + SensitivityOrder sensi; + int steps, badsteps; + LinearMultistepMethod lmm; + NonlinearSolverIteration iter; + InternalSensitivityMethod ism; + InterpolationType interp; + + Model_Test testModel = Model_Test( + ModelDimensions( + nx, // nx_rdata + nx, // nxtrue_rdata + nx, // nx_solver + nx, // nxtrue_solver + 0, // nx_solver_reinit + 1, // np + 3, // nk + ny, // ny + ny, // nytrue + nz, // nz + nz, // nztrue + ne, // ne + 0, // nJ + 0, // nw + 0, // ndwdx + 0, // ndwdp + 0, // dwdw + 0, // ndxdotdw + {}, // ndJydy + 1, // nnz + 0, // ubw + 0 // lbw + ), + SimulationParameters( + std::vector(3, 0.0), + std::vector(1, 0.0), + std::vector(2, 1) + ), + SecondOrderMode::none, + std::vector(0, 0.0), + std::vector()); + + CVodeSolver solver = CVodeSolver(); +}; + +TEST_F(SolverTest, SettersGettersNoSetup) +{ + testSolverGetterSetters(solver, + sensi_meth, + sensi, + ism, + interp, + iter, + lmm, + steps, + badsteps, + tol, + badtol); +} + +TEST_F(SolverTest, SettersGettersWithSetup) +{ + + solver.setSensitivityMethod(sensi_meth); + ASSERT_EQ(static_cast(solver.getSensitivityMethod()), + static_cast(sensi_meth)); + + auto rdata = std::make_unique(solver, testModel); + AmiVector x(nx), dx(nx); + AmiVectorArray sx(nx, 1), sdx(nx, 1); + + testModel.setInitialStates(std::vector{ 0 }); + + solver.setup(0, &testModel, x, dx, sx, sdx); + + testSolverGetterSetters(solver, + sensi_meth, + sensi, + ism, + interp, + iter, + lmm, + steps, + badsteps, + tol, + badtol); +} + +void +testSolverGetterSetters(CVodeSolver solver, + SensitivityMethod sensi_meth, + SensitivityOrder sensi, + InternalSensitivityMethod ism, + InterpolationType interp, + NonlinearSolverIteration iter, + LinearMultistepMethod lmm, + int steps, + int badsteps, + double tol, + double badtol) +{ + + solver.setSensitivityMethod(sensi_meth); + ASSERT_EQ(static_cast(solver.getSensitivityMethod()), + static_cast(sensi_meth)); + + solver.setSensitivityOrder(sensi); + ASSERT_EQ(static_cast(solver.getSensitivityOrder()), + static_cast(sensi)); + + solver.setInternalSensitivityMethod(ism); + ASSERT_EQ(static_cast(solver.getInternalSensitivityMethod()), + static_cast(ism)); + + solver.setInterpolationType(interp); + ASSERT_EQ(static_cast(solver.getInterpolationType()), + static_cast(interp)); + + solver.setNonlinearSolverIteration(iter); + ASSERT_EQ(static_cast(solver.getNonlinearSolverIteration()), + static_cast(iter)); + + solver.setLinearMultistepMethod(lmm); + ASSERT_EQ(static_cast(solver.getLinearMultistepMethod()), + static_cast(lmm)); + + solver.setPreequilibration(true); + ASSERT_EQ(solver.getPreequilibration(), true); + + solver.setStabilityLimitFlag(true); + ASSERT_EQ(solver.getStabilityLimitFlag(), true); + + ASSERT_THROW(solver.setNewtonMaxSteps(badsteps), AmiException); + solver.setNewtonMaxSteps(steps); + ASSERT_EQ(solver.getNewtonMaxSteps(), steps); + + ASSERT_THROW(solver.setNewtonMaxLinearSteps(badsteps), AmiException); + solver.setNewtonMaxLinearSteps(steps); + ASSERT_EQ(solver.getNewtonMaxLinearSteps(), steps); + + ASSERT_THROW(solver.setMaxSteps(badsteps), AmiException); + solver.setMaxSteps(steps); + ASSERT_EQ(solver.getMaxSteps(), steps); + + ASSERT_THROW(solver.setMaxStepsBackwardProblem(badsteps), AmiException); + solver.setMaxStepsBackwardProblem(steps); + ASSERT_EQ(solver.getMaxStepsBackwardProblem(), steps); + + ASSERT_THROW(solver.setRelativeTolerance(badtol), AmiException); + solver.setRelativeTolerance(tol); + ASSERT_EQ(solver.getRelativeTolerance(), tol); + + ASSERT_THROW(solver.setAbsoluteTolerance(badtol), AmiException); + solver.setAbsoluteTolerance(tol); + ASSERT_EQ(solver.getAbsoluteTolerance(), tol); + + ASSERT_THROW(solver.setRelativeToleranceQuadratures(badtol), AmiException); + solver.setRelativeToleranceQuadratures(tol); + ASSERT_EQ(solver.getRelativeToleranceQuadratures(), tol); + + ASSERT_THROW(solver.setAbsoluteToleranceQuadratures(badtol), AmiException); + solver.setAbsoluteToleranceQuadratures(tol); + ASSERT_EQ(solver.getAbsoluteToleranceQuadratures(), tol); + + ASSERT_THROW(solver.setRelativeToleranceSteadyState(badtol), AmiException); + solver.setRelativeToleranceSteadyState(tol); + ASSERT_EQ(solver.getRelativeToleranceSteadyState(), tol); + + ASSERT_THROW(solver.setAbsoluteToleranceSteadyState(badtol), AmiException); + solver.setAbsoluteToleranceSteadyState(tol); + ASSERT_EQ(solver.getAbsoluteToleranceSteadyState(), tol); +} + +class AmiVectorTest : public ::testing::Test { + protected: + std::vector vec1{ 1, 2, 4, 3 }; + std::vector vec2{ 4, 1, 2, 3 }; + std::vector vec3{ 4, 4, 2, 1 }; +}; + +TEST_F(AmiVectorTest, Vector) +{ + AmiVector av(vec1); + N_Vector nvec = av.getNVector(); + for (int i = 0; i < av.getLength(); ++i) + ASSERT_EQ(av.at(i), NV_Ith_S(nvec, i)); +} + +TEST_F(AmiVectorTest, VectorArray) +{ + AmiVectorArray ava(4, 3); + AmiVector av1(vec1), av2(vec2), av3(vec3); + std::vector avs{ av1, av2, av3 }; + for (int i = 0; i < ava.getLength(); ++i) + ava[i] = avs.at(i); + + std::vector badLengthVector(13, 0.0); + std::vector flattened(12, 0.0); + + ASSERT_THROW(ava.flatten_to_vector(badLengthVector), AmiException); + ava.flatten_to_vector(flattened); + for (int i = 0; i < ava.getLength(); ++i) { + const AmiVector av = ava[i]; + for (int j = 0; j < av.getLength(); ++j) + ASSERT_EQ(flattened.at(i * av.getLength() + j), av.at(j)); + } +} + +class SunMatrixWrapperTest : public ::testing::Test { + protected: + void SetUp() override { + A.set_data(0, 0, 0.69); + A.set_data(1, 0, 0.32); + A.set_data(2, 0, 0.95); + A.set_data(0, 1, 0.03); + A.set_data(1, 1, 0.44); + A.set_data(2, 1, 0.38); + + B.set_indexptr(0, 0); + B.set_indexptr(1, 2); + B.set_indexptr(2, 4); + B.set_indexptr(3, 5); + B.set_indexptr(4, 7); + B.set_data(0, 3); + B.set_data(1, 1); + B.set_data(2, 3); + B.set_data(3, 7); + B.set_data(4, 1); + B.set_data(5, 2); + B.set_data(6, 9); + B.set_indexval(0, 1); + B.set_indexval(1, 3); + B.set_indexval(2, 0); + B.set_indexval(3, 2); + B.set_indexval(4, 0); + B.set_indexval(5, 1); + B.set_indexval(6, 3); + } + + //inputs + std::vector a{0.82, 0.91, 0.13}; + std::vector b{0.77, 0.80}; + SUNMatrixWrapper A = SUNMatrixWrapper(3, 2); + SUNMatrixWrapper B = SUNMatrixWrapper(4, 4, 7, CSC_MAT); + // result + std::vector d{1.3753, 1.5084, 1.1655}; +}; + +TEST_F(SunMatrixWrapperTest, SparseMultiply) +{ + + auto A_sparse = SUNMatrixWrapper(A, 0.0, CSC_MAT); + auto c(a); //copy c + A_sparse.multiply(c, b); + checkEqualArray(d, c, TEST_ATOL, TEST_RTOL, "multiply"); +} + +TEST_F(SunMatrixWrapperTest, SparseMultiplyEmpty) +{ + // Ensure empty Matrix vector multiplication succeeds + auto A_sparse = SUNMatrixWrapper(1, 1, 0, CSR_MAT); + std::vector b {0.1}; + std::vector c {0.1}; + A_sparse.multiply(c, b); + ASSERT_TRUE(c[0] == 0.1); + + A_sparse = SUNMatrixWrapper(1, 1, 0, CSC_MAT); + A_sparse.multiply(c, b); + ASSERT_TRUE(c[0] == 0.1); +} + +TEST_F(SunMatrixWrapperTest, DenseMultiply) +{ + auto c(a); //copy c + A.multiply(c, b); + checkEqualArray(d, c, TEST_ATOL, TEST_RTOL, "multiply"); +} + +TEST_F(SunMatrixWrapperTest, StdVectorCtor) +{ + auto b_amivector = AmiVector(b); + auto a_amivector = AmiVector(a); +} + +TEST_F(SunMatrixWrapperTest, TransformThrows) +{ + ASSERT_THROW(SUNMatrixWrapper(A, 0.0, 13), std::invalid_argument); + auto A_sparse = SUNMatrixWrapper(A, 0.0, CSR_MAT); + ASSERT_THROW(SUNMatrixWrapper(A_sparse, 0.0, CSR_MAT), + std::invalid_argument); +} + +TEST_F(SunMatrixWrapperTest, BlockTranspose) +{ + SUNMatrixWrapper B_sparse(4, 4, 7, CSR_MAT); + ASSERT_THROW(B.transpose(B_sparse, 1.0, 4), std::domain_error); + + B_sparse = SUNMatrixWrapper(4, 4, 7, CSC_MAT); + B.transpose(B_sparse, -1.0, 2); + for (int idx = 0; idx < 7; idx++) { + ASSERT_EQ(SM_INDEXVALS_S(B.get())[idx], + SM_INDEXVALS_S(B_sparse.get())[idx]); + if (idx == 1) { + ASSERT_EQ(SM_DATA_S(B.get())[idx], + -SM_DATA_S(B_sparse.get())[3]); + } else if (idx == 3) { + ASSERT_EQ(SM_DATA_S(B.get())[idx], + -SM_DATA_S(B_sparse.get())[1]); + } else { + ASSERT_EQ(SM_DATA_S(B.get())[idx], + -SM_DATA_S(B_sparse.get())[idx]); + } + } + for (int icol = 0; icol <= 4; icol++) + ASSERT_EQ(SM_INDEXPTRS_S(B.get())[icol], + SM_INDEXPTRS_S(B_sparse.get())[icol]); +} + +} // namespace diff --git a/deps/AMICI/tests/cpp/unittests/testSerialization.cpp b/deps/AMICI/tests/cpp/unittests/testSerialization.cpp new file mode 100644 index 000000000..15c8df9d8 --- /dev/null +++ b/deps/AMICI/tests/cpp/unittests/testSerialization.cpp @@ -0,0 +1,257 @@ +#include +#include +#include + +#include "testfunctions.h" + +#include + +#include + +void +checkReturnDataEqual(amici::ReturnData const& r, amici::ReturnData const& s) +{ + ASSERT_EQ(r.np, s.np); + ASSERT_EQ(r.nk, s.nk); + ASSERT_EQ(r.nx, s.nx); + ASSERT_EQ(r.nxtrue, s.nxtrue); + ASSERT_EQ(r.nx_solver, s.nx_solver); + ASSERT_EQ(r.nx_solver_reinit, s.nx_solver_reinit); + ASSERT_EQ(r.ny, s.ny); + ASSERT_EQ(r.nytrue, s.nytrue); + ASSERT_EQ(r.nz, s.nz); + ASSERT_EQ(r.nztrue, s.nztrue); + ASSERT_EQ(r.ne, s.ne); + ASSERT_EQ(r.nJ, s.nJ); + ASSERT_EQ(r.nplist, s.nplist); + ASSERT_EQ(r.nmaxevent, s.nmaxevent); + ASSERT_EQ(r.nt, s.nt); + ASSERT_EQ(r.newton_maxsteps, s.newton_maxsteps); + ASSERT_EQ(r.pscale, s.pscale); + ASSERT_EQ(static_cast(r.o2mode), static_cast(s.o2mode)); + ASSERT_EQ(static_cast(r.sensi), static_cast(s.sensi)); + ASSERT_EQ(static_cast(r.sensi_meth), static_cast(s.sensi_meth)); + + using amici::checkEqualArray; + checkEqualArray(r.ts, s.ts, 1e-16, 1e-16, "ts"); + checkEqualArray(r.xdot, s.xdot, 1e-16, 1e-16, "xdot"); + checkEqualArray(r.J, s.J, 1e-16, 1e-16, "J"); + checkEqualArray(r.z, s.z, 1e-16, 1e-16, "z"); + checkEqualArray(r.sigmaz, s.sigmaz, 1e-16, 1e-16, "sigmaz"); + checkEqualArray(r.sz, s.sz, 1e-16, 1e-16, "sz"); + checkEqualArray(r.ssigmaz, s.ssigmaz, 1e-16, 1e-16, "ssigmaz"); + checkEqualArray(r.rz, s.rz, 1e-16, 1e-16, "rz"); + checkEqualArray(r.srz, s.srz, 1e-16, 1e-16, "srz"); + checkEqualArray(r.s2rz, s.s2rz, 1e-16, 1e-16, "s2rz"); + checkEqualArray(r.x, s.x, 1e-16, 1e-16, "x"); + checkEqualArray(r.sx, s.sx, 1e-16, 1e-16, "sx"); + + checkEqualArray(r.y, s.y, 1e-16, 1e-16, "y"); + checkEqualArray(r.sigmay, s.sigmay, 1e-16, 1e-16, "sigmay"); + checkEqualArray(r.sy, s.sy, 1e-16, 1e-16, "sy"); + checkEqualArray(r.ssigmay, s.ssigmay, 1e-16, 1e-16, "ssigmay"); + + ASSERT_EQ(r.numsteps, s.numsteps); + ASSERT_EQ(r.numstepsB, s.numstepsB); + ASSERT_EQ(r.numrhsevals, s.numrhsevals); + ASSERT_EQ(r.numrhsevalsB, s.numrhsevalsB); + ASSERT_EQ(r.numerrtestfails, s.numerrtestfails); + ASSERT_EQ(r.numerrtestfailsB, s.numerrtestfailsB); + ASSERT_EQ(r.numnonlinsolvconvfails, s.numnonlinsolvconvfails); + ASSERT_EQ(r.numnonlinsolvconvfailsB, s.numnonlinsolvconvfailsB); + ASSERT_EQ(r.order, s.order); + ASSERT_EQ(r.cpu_time, s.cpu_time); + ASSERT_EQ(r.cpu_timeB, s.cpu_timeB); + + ASSERT_EQ(r.preeq_status, s.preeq_status); + ASSERT_TRUE(r.preeq_t == s.preeq_t || + (std::isnan(r.preeq_t) && std::isnan(s.preeq_t))); + ASSERT_TRUE(r.preeq_wrms == s.preeq_wrms || + (std::isnan(r.preeq_wrms) && std::isnan(s.preeq_wrms))); + ASSERT_EQ(r.preeq_numsteps, s.preeq_numsteps); + ASSERT_EQ(r.preeq_numlinsteps, s.preeq_numlinsteps); + EXPECT_NEAR(r.preeq_cpu_time, s.preeq_cpu_time, 1e-16); + + ASSERT_EQ(r.posteq_status, s.posteq_status); + ASSERT_TRUE(r.posteq_t == s.posteq_t || + (std::isnan(r.posteq_t) && std::isnan(s.posteq_t))); + ASSERT_TRUE(r.posteq_wrms == s.posteq_wrms || + (std::isnan(r.posteq_wrms) && std::isnan(s.posteq_wrms))); + ASSERT_EQ(r.posteq_numsteps, s.posteq_numsteps); + ASSERT_EQ(r.posteq_numlinsteps, s.posteq_numlinsteps); + EXPECT_NEAR(r.posteq_cpu_time, s.posteq_cpu_time, 1e-16); + + checkEqualArray(r.x0, s.x0, 1e-16, 1e-16, "x0"); + checkEqualArray(r.sx0, s.sx0, 1e-16, 1e-16, "sx0"); + + ASSERT_TRUE(r.llh == s.llh || (std::isnan(r.llh) && std::isnan(s.llh))); + ASSERT_TRUE(r.chi2 == s.chi2 || (std::isnan(r.llh) && std::isnan(s.llh))); + ASSERT_EQ(r.status, s.status); + + checkEqualArray(r.sllh, s.sllh, 1e-5, 1e-5, "sllh"); + checkEqualArray(r.s2llh, s.s2llh, 1e-5, 1e-5, "s2llh"); +} + +class SolverSerializationTest : public ::testing::Test { + protected: + void SetUp() override { + // set non-default values for all members + solver.setAbsoluteTolerance(1e-4); + solver.setRelativeTolerance(1e-5); + solver.setAbsoluteToleranceQuadratures(1e-6); + solver.setRelativeToleranceQuadratures(1e-7); + solver.setAbsoluteToleranceSteadyState(1e-8); + solver.setRelativeToleranceSteadyState(1e-9); + solver.setSensitivityMethod(amici::SensitivityMethod::adjoint); + solver.setSensitivityOrder(amici::SensitivityOrder::second); + solver.setMaxSteps(1e1); + solver.setMaxStepsBackwardProblem(1e2); + solver.setNewtonMaxSteps(1e3); + solver.setNewtonMaxLinearSteps(1e4); + solver.setPreequilibration(true); + solver.setStateOrdering(static_cast(amici::SUNLinSolKLU::StateOrdering::COLAMD)); + solver.setInterpolationType(amici::InterpolationType::polynomial); + solver.setStabilityLimitFlag(false); + solver.setLinearSolver(amici::LinearSolver::dense); + solver.setLinearMultistepMethod(amici::LinearMultistepMethod::adams); + solver.setNonlinearSolverIteration(amici::NonlinearSolverIteration::newton); + solver.setInternalSensitivityMethod(amici::InternalSensitivityMethod::staggered); + solver.setReturnDataReportingMode(amici::RDataReporting::likelihood); + } + + amici::CVodeSolver solver; +}; + +TEST(ModelSerializationTest, ToFile) +{ + int np = 1; + int nk = 2; + int nx = 3; + int ny = 4; + int nz = 5; + int ne = 6; + amici::CVodeSolver solver; + amici::Model_Test m = amici::Model_Test( + amici::ModelDimensions( + nx, // nx_rdata + nx, // nxtrue_rdata + nx, // nx_solver + nx, // nxtrue_solver + 0, // nx_solver_reinit + np, // np + nk, // nk + ny, // ny + ny, // nytrue + nz, // nz + nz, // nztrue + ne, // ne + 0, // nJ + 9, // nw + 2, // ndwdx + 2, // ndwdp + 2, // dwdw + 13, // ndxdotdw + {}, // ndJydy + 15, // nnz + 16, // ubw + 17 // lbw + ), + amici::SimulationParameters( + std::vector(nk, 0.0), + std::vector(np, 0.0), + std::vector(np, 0) + ), + amici::SecondOrderMode::none, + std::vector(nx, 0.0), + std::vector(nz, 0)); + + { + std::ofstream ofs("sstore.dat"); + boost::archive::text_oarchive oar(ofs); + // oar & static_cast(solver); + oar& static_cast(m); + } + { + std::ifstream ifs("sstore.dat"); + boost::archive::text_iarchive iar(ifs); + amici::CVodeSolver v; + amici::Model_Test n; + // iar &static_cast(v); + iar& static_cast(n); + // CHECK_TRUE(solver == v); + ASSERT_EQ(m, n); + } +} + +TEST(ReturnDataSerializationTest, ToString) +{ + int np = 1; + int nk = 2; + int nx = 3; + int ny = 4; + int nz = 5; + int ne = 6; + amici::CVodeSolver solver; + amici::Model_Test m = amici::Model_Test( + amici::ModelDimensions( + nx, // nx_rdata + nx, // nxtrue_rdata + nx, // nx_solver + nx, // nxtrue_solver + 0, // nx_solver_reinit + np, // np + nk, // nk + ny, // ny + ny, // nytrue + nz, // nz + nz, // nztrue + ne, // ne + 0, // nJ + 9, // nw + 10, // ndwdx + 2, // ndwdp + 12, // dwdw + 13, // ndxdotdw + {}, // ndJydy + 15, // nnz + 16, // ubw + 17 // lbw + ), + amici::SimulationParameters( + std::vector(nk, 0.0), + std::vector(np, 0.0), + std::vector(np, 0) + ), + amici::SecondOrderMode::none, + std::vector(nx, 0.0), + std::vector(nz, 0)); + + amici::ReturnData r(solver, m); + + std::string serialized = amici::serializeToString(r); + + checkReturnDataEqual( + r, amici::deserializeFromString(serialized)); +} + +TEST_F(SolverSerializationTest, ToChar) +{ + int length; + char* buf = amici::serializeToChar(solver, &length); + + amici::CVodeSolver v = + amici::deserializeFromChar(buf, length); + + delete[] buf; + ASSERT_EQ(solver, v); +} + +TEST_F(SolverSerializationTest, ToStdVec) +{ + + auto buf = amici::serializeToStdVec(solver); + amici::CVodeSolver v = + amici::deserializeFromChar(buf.data(), buf.size()); + + ASSERT_EQ(solver, v); +} diff --git a/deps/AMICI/tests/cpputest/wrapTestModels.m b/deps/AMICI/tests/cpp/wrapTestModels.m similarity index 100% rename from deps/AMICI/tests/cpputest/wrapTestModels.m rename to deps/AMICI/tests/cpp/wrapTestModels.m diff --git a/deps/AMICI/tests/cpputest/events/CMakeLists.txt b/deps/AMICI/tests/cpputest/events/CMakeLists.txt deleted file mode 100644 index 3197ed7a7..000000000 --- a/deps/AMICI/tests/cpputest/events/CMakeLists.txt +++ /dev/null @@ -1,18 +0,0 @@ -get_filename_component(MODEL_NAME ${CMAKE_CURRENT_LIST_DIR} NAME) -project(model_${MODEL_NAME}_test) - -set(SRC_LIST - ../main.cpp - tests1.cpp -) - -include_directories(${CMAKE_CURRENT_SOURCE_DIR} ${CppUTest_INCLUDE_DIRS}) - -add_executable(${PROJECT_NAME} ${SRC_LIST}) - -target_link_libraries(${PROJECT_NAME} - amici-testing - model_${MODEL_NAME} -) - -add_test(NAME ${PROJECT_NAME} COMMAND ./${PROJECT_NAME} -c) diff --git a/deps/AMICI/tests/cpputest/jakstat_adjoint_o2/CMakeLists.txt b/deps/AMICI/tests/cpputest/jakstat_adjoint_o2/CMakeLists.txt deleted file mode 100644 index 3197ed7a7..000000000 --- a/deps/AMICI/tests/cpputest/jakstat_adjoint_o2/CMakeLists.txt +++ /dev/null @@ -1,18 +0,0 @@ -get_filename_component(MODEL_NAME ${CMAKE_CURRENT_LIST_DIR} NAME) -project(model_${MODEL_NAME}_test) - -set(SRC_LIST - ../main.cpp - tests1.cpp -) - -include_directories(${CMAKE_CURRENT_SOURCE_DIR} ${CppUTest_INCLUDE_DIRS}) - -add_executable(${PROJECT_NAME} ${SRC_LIST}) - -target_link_libraries(${PROJECT_NAME} - amici-testing - model_${MODEL_NAME} -) - -add_test(NAME ${PROJECT_NAME} COMMAND ./${PROJECT_NAME} -c) diff --git a/deps/AMICI/tests/cpputest/main.cpp b/deps/AMICI/tests/cpputest/main.cpp deleted file mode 100644 index e588e8ed5..000000000 --- a/deps/AMICI/tests/cpputest/main.cpp +++ /dev/null @@ -1,10 +0,0 @@ -#include -#include - -#include "CppUTest/CommandLineTestRunner.h" -#include "CppUTest/TestHarness.h" - -int main(int argc, char** argv) -{ - return CommandLineTestRunner::RunAllTests(argc, argv); -} diff --git a/deps/AMICI/tests/cpputest/nested_events/CMakeLists.txt b/deps/AMICI/tests/cpputest/nested_events/CMakeLists.txt deleted file mode 100644 index 3197ed7a7..000000000 --- a/deps/AMICI/tests/cpputest/nested_events/CMakeLists.txt +++ /dev/null @@ -1,18 +0,0 @@ -get_filename_component(MODEL_NAME ${CMAKE_CURRENT_LIST_DIR} NAME) -project(model_${MODEL_NAME}_test) - -set(SRC_LIST - ../main.cpp - tests1.cpp -) - -include_directories(${CMAKE_CURRENT_SOURCE_DIR} ${CppUTest_INCLUDE_DIRS}) - -add_executable(${PROJECT_NAME} ${SRC_LIST}) - -target_link_libraries(${PROJECT_NAME} - amici-testing - model_${MODEL_NAME} -) - -add_test(NAME ${PROJECT_NAME} COMMAND ./${PROJECT_NAME} -c) diff --git a/deps/AMICI/tests/cpputest/neuron/CMakeLists.txt b/deps/AMICI/tests/cpputest/neuron/CMakeLists.txt deleted file mode 100644 index 3197ed7a7..000000000 --- a/deps/AMICI/tests/cpputest/neuron/CMakeLists.txt +++ /dev/null @@ -1,18 +0,0 @@ -get_filename_component(MODEL_NAME ${CMAKE_CURRENT_LIST_DIR} NAME) -project(model_${MODEL_NAME}_test) - -set(SRC_LIST - ../main.cpp - tests1.cpp -) - -include_directories(${CMAKE_CURRENT_SOURCE_DIR} ${CppUTest_INCLUDE_DIRS}) - -add_executable(${PROJECT_NAME} ${SRC_LIST}) - -target_link_libraries(${PROJECT_NAME} - amici-testing - model_${MODEL_NAME} -) - -add_test(NAME ${PROJECT_NAME} COMMAND ./${PROJECT_NAME} -c) diff --git a/deps/AMICI/tests/cpputest/neuron_o2/CMakeLists.txt b/deps/AMICI/tests/cpputest/neuron_o2/CMakeLists.txt deleted file mode 100644 index 3197ed7a7..000000000 --- a/deps/AMICI/tests/cpputest/neuron_o2/CMakeLists.txt +++ /dev/null @@ -1,18 +0,0 @@ -get_filename_component(MODEL_NAME ${CMAKE_CURRENT_LIST_DIR} NAME) -project(model_${MODEL_NAME}_test) - -set(SRC_LIST - ../main.cpp - tests1.cpp -) - -include_directories(${CMAKE_CURRENT_SOURCE_DIR} ${CppUTest_INCLUDE_DIRS}) - -add_executable(${PROJECT_NAME} ${SRC_LIST}) - -target_link_libraries(${PROJECT_NAME} - amici-testing - model_${MODEL_NAME} -) - -add_test(NAME ${PROJECT_NAME} COMMAND ./${PROJECT_NAME} -c) diff --git a/deps/AMICI/tests/cpputest/steadystate/CMakeLists.txt b/deps/AMICI/tests/cpputest/steadystate/CMakeLists.txt deleted file mode 100644 index 3197ed7a7..000000000 --- a/deps/AMICI/tests/cpputest/steadystate/CMakeLists.txt +++ /dev/null @@ -1,18 +0,0 @@ -get_filename_component(MODEL_NAME ${CMAKE_CURRENT_LIST_DIR} NAME) -project(model_${MODEL_NAME}_test) - -set(SRC_LIST - ../main.cpp - tests1.cpp -) - -include_directories(${CMAKE_CURRENT_SOURCE_DIR} ${CppUTest_INCLUDE_DIRS}) - -add_executable(${PROJECT_NAME} ${SRC_LIST}) - -target_link_libraries(${PROJECT_NAME} - amici-testing - model_${MODEL_NAME} -) - -add_test(NAME ${PROJECT_NAME} COMMAND ./${PROJECT_NAME} -c) diff --git a/deps/AMICI/tests/cpputest/unittests/CMakeLists.txt b/deps/AMICI/tests/cpputest/unittests/CMakeLists.txt deleted file mode 100644 index 376a2dbe9..000000000 --- a/deps/AMICI/tests/cpputest/unittests/CMakeLists.txt +++ /dev/null @@ -1,27 +0,0 @@ -project(unittests) - -# cannot mix CppuTest new override togeter with Boost -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS_OLD}") -find_package(Boost COMPONENTS serialization) - -set(SRC_LIST - ../main.cpp - tests1.cpp -) - -if(Boost_FOUND) - set(SRC_LIST ${SRC_LIST} testsSerialization.cpp) - include_directories("${Boost_INCLUDE_DIR}") -endif() - -include_directories(${CMAKE_CURRENT_SOURCE_DIR}) - -add_executable(${PROJECT_NAME} ${SRC_LIST}) - -target_link_libraries(${PROJECT_NAME} - amici-testing - Upstream::amici - ${Boost_LIBRARIES} - ) - -add_test(NAME unittests COMMAND ./unittests -c) diff --git a/deps/AMICI/tests/cpputest/unittests/tests1.cpp b/deps/AMICI/tests/cpputest/unittests/tests1.cpp deleted file mode 100644 index 869c6dc9f..000000000 --- a/deps/AMICI/tests/cpputest/unittests/tests1.cpp +++ /dev/null @@ -1,911 +0,0 @@ -#include "testfunctions.h" - -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include "CppUTest/TestHarness.h" -#include "CppUTestExt/MockSupport.h" - -namespace amici { -namespace generic_model { - -std::unique_ptr getModel() -{ - return std::make_unique(); -} - -} // namespace generic_model -} // namespace amici - -using namespace amici; - -void -testSolverGetterSetters(CVodeSolver solver, - SensitivityMethod sensi_meth, - SensitivityOrder sensi, - InternalSensitivityMethod ism, - InterpolationType interp, - NonlinearSolverIteration iter, - LinearMultistepMethod lmm, - int steps, - int badsteps, - double tol, - double badtol); - -TEST_GROUP(amici){}; - -TEST_GROUP(model) -{ - int nx = 1, ny = 2, nz = 3, nmaxevent = 4; - std::vector p{ 1.0 }; - std::vector k{ 0.5, 0.4, 0.7 }; - std::vector plist{ 1 }; - std::vector idlist{ 0 }; - std::vector z2event{ 0, 0, 0 }; - Model_Test model = Model_Test( - ModelDimensions( - nx, // nx_rdata - nx, // nxtrue_rdata - nx, // nx_solver - nx, // nxtrue_solver - 0, // nx_solver_reinit - static_cast(p.size()), // np - static_cast(k.size()), // nk - ny, // ny - ny, // nytrue - nz, // nz - nz, // nztrue - nmaxevent, // ne - 0, // nJ - 0, // nw - 0, // ndwdx - 0, // ndwdp - 0, // dwdw - 0, // ndxdotdw - {}, // ndJydy - 0, // nnz - 0, // ubw - 0 // lbw - ), - SimulationParameters(k, p, plist), - SecondOrderMode::none, - idlist, - z2event); - - std::vector unscaled{ NAN }; -}; - -TEST(model, testScalingLin) -{ - model.setParameterScale(ParameterScaling::none); - - CHECK_EQUAL(p[0], model.getParameters()[0]); -} - -TEST(model, testScalingLog) -{ - model.setParameterScale(ParameterScaling::ln); - - DOUBLES_EQUAL(std::log(p[0]), model.getParameters()[0], 1e-16); -} - -TEST(model, testScalingLog10) -{ - model.setParameterScale(ParameterScaling::log10); - - DOUBLES_EQUAL(std::log10(p[0]), model.getParameters()[0], 1e-16); -} - -TEST(model, testParameterScalingLengthMismatch) -{ - // too short - auto pscale = - std::vector(p.size() - 1, ParameterScaling::log10); - CHECK_THROWS(AmiException, model.setParameterScale(pscale)); - - // too long - pscale = - std::vector(p.size() + 1, ParameterScaling::log10); - CHECK_THROWS(AmiException, model.setParameterScale(pscale)); -} - -TEST(model, testSetTimepoints){ - CHECK_THROWS(AmiException, - model.setTimepoints(std::vector{ 0.0, 1.0, 0.5 })); -} - -TEST(model, testNameIdGetterSetter) -{ - model.setParameterById("p0", 3.0); - DOUBLES_EQUAL(model.getParameterById("p0"), 3.0, 1e-16); - CHECK_THROWS(AmiException, model.getParameterById("p1")); - DOUBLES_EQUAL( - model.setParametersByIdRegex("p[\\d]+", 5.0), p.size(), 1e-16); - for (const auto& ip : model.getParameters()) - DOUBLES_EQUAL(ip, 5.0, 1e-16); - CHECK_THROWS(AmiException, model.setParametersByIdRegex("k[\\d]+", 5.0)); - - model.setParameterByName("p0", 3.0); - DOUBLES_EQUAL(model.getParameterByName("p0"), 3.0, 1e-16); - CHECK_THROWS(AmiException, model.getParameterByName("p1")); - DOUBLES_EQUAL( - model.setParametersByNameRegex("p[\\d]+", 5.0), p.size(), 1e-16); - for (const auto& ip : model.getParameters()) - DOUBLES_EQUAL(ip, 5.0, 1e-16); - CHECK_THROWS(AmiException, model.setParametersByNameRegex("k[\\d]+", 5.0)); - - model.setFixedParameterById("k0", 3.0); - DOUBLES_EQUAL(model.getFixedParameterById("k0"), 3.0, 1e-16); - CHECK_THROWS(AmiException, model.getFixedParameterById("k4")); - DOUBLES_EQUAL( - model.setFixedParametersByIdRegex("k[\\d]+", 5.0), k.size(), 1e-16); - for (const auto& ik : model.getFixedParameters()) - DOUBLES_EQUAL(ik, 5.0, 1e-16); - CHECK_THROWS(AmiException, - model.setFixedParametersByIdRegex("p[\\d]+", 5.0)); - - model.setFixedParameterByName("k0", 3.0); - DOUBLES_EQUAL(model.getFixedParameterByName("k0"), 3.0, 1e-16); - CHECK_THROWS(AmiException, model.getFixedParameterByName("k4")); - DOUBLES_EQUAL( - model.setFixedParametersByNameRegex("k[\\d]+", 5.0), k.size(), 1e-16); - for (const auto& ik : model.getFixedParameters()) - DOUBLES_EQUAL(ik, 5.0, 1e-16); - CHECK_THROWS(AmiException, - model.setFixedParametersByNameRegex("p[\\d]+", 5.0)); -} - -TEST(model, reinitializeFixedParameterInitialStates) -{ - CHECK_THROWS(AmiException, - model.setReinitializeFixedParameterInitialStates(true)); - model.setReinitializeFixedParameterInitialStates(false); - CHECK_TRUE(!model.getReinitializeFixedParameterInitialStates()); - AmiVector x(nx); - AmiVectorArray sx(model.np(), nx); -} - -TEST_GROUP(symbolicFunctions){}; - -TEST(symbolicFunctions, testSign) -{ - CHECK_EQUAL(-1, sign(-2)); - CHECK_EQUAL(0, sign(0)); - CHECK_EQUAL(1, sign(2)); -} - -TEST(symbolicFunctions, testHeaviside) -{ - CHECK_EQUAL(0, heaviside(-1)); - CHECK_EQUAL(1, heaviside(0)); - CHECK_EQUAL(1, heaviside(1)); -} - -TEST(symbolicFunctions, testMin) -{ - CHECK_EQUAL(-1, amici::min(-1, 2, 0)); - CHECK_EQUAL(-2, amici::min(1, -2, 0)); - CHECK_TRUE(amici::isNaN(amici::min(amici::getNaN(), amici::getNaN(), 0))); - CHECK_EQUAL(-1, amici::min(-1, amici::getNaN(), 0)); - CHECK_EQUAL(-1, amici::min(amici::getNaN(), -1, 0)); -} - -TEST(symbolicFunctions, testMax) -{ - CHECK_EQUAL(2, amici::max(-1, 2, 0)); - CHECK_EQUAL(1, amici::max(1, -2, 0)); - CHECK_TRUE(amici::isNaN(amici::max(amici::getNaN(), amici::getNaN(), 0))); - CHECK_EQUAL(-1, amici::max(-1, amici::getNaN(), 0)); - CHECK_EQUAL(-1, amici::max(amici::getNaN(), -1, 0)); -} - -TEST(symbolicFunctions, testDMin) -{ - CHECK_EQUAL(0, amici::Dmin(1, -1, -2, 0)); - CHECK_EQUAL(1, amici::Dmin(1, -1, 2, 0)); - CHECK_EQUAL(1, amici::Dmin(2, -1, -2, 0)); - CHECK_EQUAL(0, amici::Dmin(2, -1, 2, 0)); -} - -TEST(symbolicFunctions, testDMax) -{ - CHECK_EQUAL(1, amici::Dmax(1, -1, -2, 0)); - CHECK_EQUAL(0, amici::Dmax(1, -1, 2, 0)); - CHECK_EQUAL(0, amici::Dmax(2, -1, -2, 0)); - CHECK_EQUAL(1, amici::Dmax(2, -1, 2, 0)); -} - -TEST(symbolicFunctions, testpos_pow) -{ - CHECK_EQUAL(0, amici::pos_pow(-0.1, 3)); - CHECK_EQUAL(pow(0.1, 3), amici::pos_pow(0.1, 3)); -} - -TEST_GROUP(amiciSolver){ }; - -TEST(amiciSolver, testEquality) -{ - IDASolver i1, i2; - CVodeSolver c1, c2; - - CHECK_TRUE(i1 == i2); - CHECK_TRUE(c1 == c2); - CHECK_FALSE(i1 == c1); -} - -TEST(amiciSolver, testClone) -{ - IDASolver i1; - auto i2 = std::unique_ptr(i1.clone()); - CHECK_TRUE(i1 == *i2); - - CVodeSolver c1; - auto c2 = std::unique_ptr(c1.clone()); - CHECK_TRUE(c1 == *c2); - CHECK_FALSE(*i2 == *c2); -} - -TEST_GROUP(amiciSolverIdas){}; - -TEST(amiciSolverIdas, testConstructionDestruction) -{ - IDASolver solver; -} - -TEST_GROUP(edata) -{ - int nx = 1, ny = 2, nz = 3, nmaxevent = 4; - std::vector timepoints = { 1, 2, 3, 4 }; - - std::unique_ptr model = amici::generic_model::getModel(); - - Model_Test testModel = Model_Test( - ModelDimensions( - nx, // nx_rdata - nx, // nxtrue_rdata - nx, // nx_solver - nx, // nxtrue_solver - 0, // nx_solver_reinit - 1, // np - 3, // nk - ny, // ny - ny, // nytrue - nz, // nz - nz, // nztrue - nmaxevent, // ne - 0, // nJ - 0, // nw - 0, // ndwdx - 0, // ndwdp - 0, // dwdw - 0, // ndxdotdw - {}, // ndJydy - 0, // nnz - 0, // ubw - 0 // lbw - ), - SimulationParameters( - std::vector(3, 0.0), - std::vector(1, 0.0), - std::vector(2, 1) - ), - SecondOrderMode::none, - std::vector(), - std::vector()); - void setup() override - { - model->setTimepoints(timepoints); - model->setNMaxEvent(nmaxevent); - testModel.setTimepoints(timepoints); - testModel.setNMaxEvent(nmaxevent); - } - - void teardown() override {} -}; - -TEST(edata, testConstructors1) -{ - auto edata = ExpData(); - CHECK_TRUE(edata.nytrue() == 0); - CHECK_TRUE(edata.nztrue() == 0); - CHECK_TRUE(edata.nmaxevent() == 0); -} -TEST(edata, testConstructors2) -{ - auto edata = ExpData(model->nytrue, model->nztrue, model->nMaxEvent()); - CHECK_TRUE(edata.nytrue() == model->nytrue); - CHECK_TRUE(edata.nztrue() == model->nztrue); - CHECK_TRUE(edata.nmaxevent() == model->nMaxEvent()); -} - -TEST(edata, testConstructors3) -{ - auto edata = - ExpData(model->nytrue, model->nztrue, model->nMaxEvent(), timepoints); - CHECK_TRUE(edata.nytrue() == model->nytrue); - CHECK_TRUE(edata.nztrue() == model->nztrue); - CHECK_TRUE(edata.nmaxevent() == model->nMaxEvent()); - CHECK_TRUE(edata.nt() == model->nt()); - checkEqualArray( - timepoints, edata.getTimepoints(), TEST_ATOL, TEST_RTOL, "ts"); -} - -TEST(edata, testConstructors4) -{ - std::vector y(ny * timepoints.size(), 0.0); - std::vector y_std(ny * timepoints.size(), 0.1); - std::vector z(nz * nmaxevent, 0.0); - std::vector z_std(nz * nmaxevent, 0.1); - - auto edata = ExpData(testModel.nytrue, - testModel.nztrue, - testModel.nMaxEvent(), - timepoints, - y, - y_std, - z, - z_std); - CHECK_TRUE(edata.nytrue() == testModel.nytrue); - CHECK_TRUE(edata.nztrue() == testModel.nztrue); - CHECK_TRUE(edata.nmaxevent() == testModel.nMaxEvent()); - CHECK_TRUE(edata.nt() == testModel.nt()); - checkEqualArray( - timepoints, edata.getTimepoints(), TEST_ATOL, TEST_RTOL, "ts"); - checkEqualArray( - y, edata.getObservedData(), TEST_ATOL, TEST_RTOL, "observedData"); - checkEqualArray(y_std, - edata.getObservedDataStdDev(), - TEST_ATOL, - TEST_RTOL, - "observedDataStdDev"); - checkEqualArray( - z, edata.getObservedEvents(), TEST_ATOL, TEST_RTOL, "observedEvents"); - checkEqualArray(z_std, - edata.getObservedEventsStdDev(), - TEST_ATOL, - TEST_RTOL, - "observedEventsStdDev"); - - auto edata_copy = ExpData(edata); - CHECK_TRUE(edata.nytrue() == edata_copy.nytrue()); - CHECK_TRUE(edata.nztrue() == edata_copy.nztrue()); - CHECK_TRUE(edata.nmaxevent() == edata_copy.nmaxevent()); - CHECK_TRUE(edata.nt() == edata_copy.nt()); - checkEqualArray(edata_copy.getTimepoints(), - edata.getTimepoints(), - TEST_ATOL, - TEST_RTOL, - "ts"); - checkEqualArray(edata_copy.getObservedData(), - edata.getObservedData(), - TEST_ATOL, - TEST_RTOL, - "observedData"); - checkEqualArray(edata_copy.getObservedDataStdDev(), - edata.getObservedDataStdDev(), - TEST_ATOL, - TEST_RTOL, - "observedDataStdDev"); - checkEqualArray(edata_copy.getObservedEvents(), - edata.getObservedEvents(), - TEST_ATOL, - TEST_RTOL, - "observedEvents"); - checkEqualArray(edata_copy.getObservedEventsStdDev(), - edata.getObservedEventsStdDev(), - TEST_ATOL, - TEST_RTOL, - "observedEventsStdDev"); -} - -TEST(edata, testConstructors5) -{ - testModel.setTimepoints(timepoints); - auto edata = ExpData(testModel); - CHECK_TRUE(edata.nytrue() == testModel.nytrue); - CHECK_TRUE(edata.nztrue() == testModel.nztrue); - CHECK_TRUE(edata.nmaxevent() == testModel.nMaxEvent()); - CHECK_TRUE(edata.nt() == testModel.nt()); - checkEqualArray(testModel.getTimepoints(), - edata.getTimepoints(), - TEST_ATOL, - TEST_RTOL, - "ts"); -} - -TEST(edata, testDimensionChecks) -{ - - std::vector bad_std(ny, -0.1); - - std::vector y(ny * timepoints.size(), 0.0); - std::vector y_std(ny * timepoints.size(), 0.1); - std::vector z(nz * nmaxevent, 0.0); - std::vector z_std(nz * nmaxevent, 0.1); - - CHECK_THROWS(AmiException, - ExpData(testModel.nytrue, - testModel.nztrue, - testModel.nMaxEvent(), - timepoints, - z, - z_std, - z, - z_std)); - - CHECK_THROWS(AmiException, - ExpData(testModel.nytrue, - testModel.nztrue, - testModel.nMaxEvent(), - timepoints, - z, - bad_std, - z, - z_std)); - - auto edata = ExpData(testModel); - - std::vector bad_y(ny * timepoints.size() + 1, 0.0); - std::vector bad_y_std(ny * timepoints.size() + 1, 0.1); - std::vector bad_z(nz * nmaxevent + 1, 0.0); - std::vector bad_z_std(nz * nmaxevent + 1, 0.1); - - CHECK_THROWS(AmiException, edata.setObservedData(bad_y)); - CHECK_THROWS(AmiException, edata.setObservedDataStdDev(bad_y_std)); - CHECK_THROWS(AmiException, edata.setObservedEvents(bad_z)); - CHECK_THROWS(AmiException, edata.setObservedEventsStdDev(bad_y_std)); - - std::vector bad_single_y(edata.nt() + 1, 0.0); - std::vector bad_single_y_std(edata.nt() + 1, 0.1); - std::vector bad_single_z(edata.nmaxevent() + 1, 0.0); - std::vector bad_single_z_std(edata.nmaxevent() + 1, 0.1); - - CHECK_THROWS(AmiException, edata.setObservedData(bad_single_y, 0)); - CHECK_THROWS(AmiException, - edata.setObservedDataStdDev(bad_single_y_std, 0)); - CHECK_THROWS(AmiException, edata.setObservedEvents(bad_single_z, 0)); - CHECK_THROWS(AmiException, - edata.setObservedEventsStdDev(bad_single_y_std, 0)); - - CHECK_THROWS(AmiException, - edata.setTimepoints(std::vector{ 0.0, 1.0, 0.5 })); -} - -TEST(edata, testSettersGetters) -{ - auto edata = ExpData(testModel); - - std::vector y(ny * timepoints.size(), 0.0); - std::vector y_std(ny * timepoints.size(), 0.1); - std::vector z(nz * nmaxevent, 0.0); - std::vector z_std(nz * nmaxevent, 0.1); - - edata.setObservedData(y); - checkEqualArray( - edata.getObservedData(), y, TEST_ATOL, TEST_RTOL, "ObservedData"); - edata.setObservedDataStdDev(y_std); - checkEqualArray(edata.getObservedDataStdDev(), - y_std, - TEST_ATOL, - TEST_RTOL, - "ObservedDataStdDev"); - edata.setObservedEvents(z); - checkEqualArray( - edata.getObservedEvents(), z, TEST_ATOL, TEST_RTOL, "ObservedEvents"); - edata.setObservedEventsStdDev(z_std); - checkEqualArray(edata.getObservedEventsStdDev(), - z_std, - TEST_ATOL, - TEST_RTOL, - "ObservedEventsStdDev"); - - std::vector single_y(edata.nt(), 0.0); - std::vector single_y_std(edata.nt(), 0.1); - - for (int iy = 0; iy < ny; ++iy) { - edata.setObservedData(single_y, iy); - edata.setObservedDataStdDev(single_y_std, iy); - } - CHECK_THROWS(std::exception, edata.setObservedData(single_y, ny)); - CHECK_THROWS(std::exception, edata.setObservedData(single_y, -1)); - CHECK_THROWS(std::exception, edata.setObservedDataStdDev(single_y_std, ny)); - CHECK_THROWS(std::exception, edata.setObservedDataStdDev(single_y_std, -1)); - - std::vector single_z(edata.nmaxevent(), 0.0); - std::vector single_z_std(edata.nmaxevent(), 0.1); - - for (int iz = 0; iz < nz; ++iz) { - edata.setObservedEvents(single_z, iz); - edata.setObservedEventsStdDev(single_z_std, iz); - } - - CHECK_THROWS(std::exception, edata.setObservedEvents(single_z, nz)); - CHECK_THROWS(std::exception, edata.setObservedEvents(single_z, -1)); - CHECK_THROWS(std::exception, - edata.setObservedEventsStdDev(single_z_std, nz)); - CHECK_THROWS(std::exception, - edata.setObservedEventsStdDev(single_z_std, -1)); - - CHECK_TRUE(edata.getObservedDataPtr(0)); - CHECK_TRUE(edata.getObservedDataStdDevPtr(0)); - CHECK_TRUE(edata.getObservedEventsPtr(0)); - CHECK_TRUE(edata.getObservedEventsStdDevPtr(0)); - - std::vector empty(0, 0.0); - - edata.setObservedData(empty); - edata.setObservedDataStdDev(empty); - edata.setObservedEvents(empty); - edata.setObservedEventsStdDev(empty); - - CHECK_TRUE(!edata.getObservedDataPtr(0)); - CHECK_TRUE(!edata.getObservedDataStdDevPtr(0)); - CHECK_TRUE(!edata.getObservedEventsPtr(0)); - CHECK_TRUE(!edata.getObservedEventsStdDevPtr(0)); - - checkEqualArray( - edata.getObservedData(), empty, TEST_ATOL, TEST_RTOL, "ObservedData"); - checkEqualArray(edata.getObservedDataStdDev(), - empty, - TEST_ATOL, - TEST_RTOL, - "ObservedDataStdDev"); - checkEqualArray( - edata.getObservedEvents(), empty, TEST_ATOL, TEST_RTOL, "ObservedEvents"); - checkEqualArray(edata.getObservedEventsStdDev(), - empty, - TEST_ATOL, - TEST_RTOL, - "ObservedEventsStdDev"); -} - -TEST_GROUP(solver) -{ - int nx = 1, ny = 2, nz = 3, ne = 0; - double tol, badtol; - std::vector timepoints = { 1, 2, 3, 4 }; - - std::unique_ptr model = amici::generic_model::getModel(); - SensitivityMethod sensi_meth; - SensitivityOrder sensi; - int steps, badsteps; - LinearMultistepMethod lmm; - NonlinearSolverIteration iter; - InternalSensitivityMethod ism; - InterpolationType interp; - - Model_Test testModel = Model_Test( - ModelDimensions( - nx, // nx_rdata - nx, // nxtrue_rdata - nx, // nx_solver - nx, // nxtrue_solver - 0, // nx_solver_reinit - 1, // np - 3, // nk - ny, // ny - ny, // nytrue - nz, // nz - nz, // nztrue - ne, // ne - 0, // nJ - 0, // nw - 0, // ndwdx - 0, // ndwdp - 0, // dwdw - 0, // ndxdotdw - {}, // ndJydy - 1, // nnz - 0, // ubw - 0 // lbw - ), - SimulationParameters( - std::vector(3, 0.0), - std::vector(1, 0.0), - std::vector(2, 1) - ), - SecondOrderMode::none, - std::vector(0, 0.0), - std::vector()); - - CVodeSolver solver = CVodeSolver(); - - void setup() - { - tol = 0.01; - badtol = -0.01; - sensi_meth = SensitivityMethod::adjoint; - sensi = SensitivityOrder::first; - steps = 1000; - badsteps = -1; - lmm = LinearMultistepMethod::adams; - iter = NonlinearSolverIteration::fixedpoint; - ism = InternalSensitivityMethod::staggered1; - interp = InterpolationType::polynomial; - } - - void teardown() {} -}; - -TEST(solver, testSettersGettersNoSetup) -{ - testSolverGetterSetters(solver, - sensi_meth, - sensi, - ism, - interp, - iter, - lmm, - steps, - badsteps, - tol, - badtol); -} - -TEST(solver, testSettersGettersWithSetup) -{ - - solver.setSensitivityMethod(sensi_meth); - CHECK_EQUAL(static_cast(solver.getSensitivityMethod()), - static_cast(sensi_meth)); - - auto rdata = - std::unique_ptr(new ReturnData(solver, testModel)); - AmiVector x(nx), dx(nx); - AmiVectorArray sx(nx, 1), sdx(nx, 1); - - testModel.setInitialStates(std::vector{ 0 }); - - solver.setup(0, &testModel, x, dx, sx, sdx); - - testSolverGetterSetters(solver, - sensi_meth, - sensi, - ism, - interp, - iter, - lmm, - steps, - badsteps, - tol, - badtol); -} - -void -testSolverGetterSetters(CVodeSolver solver, - SensitivityMethod sensi_meth, - SensitivityOrder sensi, - InternalSensitivityMethod ism, - InterpolationType interp, - NonlinearSolverIteration iter, - LinearMultistepMethod lmm, - int steps, - int badsteps, - double tol, - double badtol) -{ - - solver.setSensitivityMethod(sensi_meth); - CHECK_EQUAL(static_cast(solver.getSensitivityMethod()), - static_cast(sensi_meth)); - - solver.setSensitivityOrder(sensi); - CHECK_EQUAL(static_cast(solver.getSensitivityOrder()), - static_cast(sensi)); - - solver.setInternalSensitivityMethod(ism); - CHECK_EQUAL(static_cast(solver.getInternalSensitivityMethod()), - static_cast(ism)); - - solver.setInterpolationType(interp); - CHECK_EQUAL(static_cast(solver.getInterpolationType()), - static_cast(interp)); - - solver.setNonlinearSolverIteration(iter); - CHECK_EQUAL(static_cast(solver.getNonlinearSolverIteration()), - static_cast(iter)); - - solver.setLinearMultistepMethod(lmm); - CHECK_EQUAL(static_cast(solver.getLinearMultistepMethod()), - static_cast(lmm)); - - solver.setPreequilibration(true); - CHECK_EQUAL(solver.getPreequilibration(), true); - - solver.setStabilityLimitFlag(true); - CHECK_EQUAL(solver.getStabilityLimitFlag(), true); - - CHECK_THROWS(AmiException, solver.setNewtonMaxSteps(badsteps)); - solver.setNewtonMaxSteps(steps); - CHECK_EQUAL(solver.getNewtonMaxSteps(), steps); - - CHECK_THROWS(AmiException, solver.setNewtonMaxLinearSteps(badsteps)); - solver.setNewtonMaxLinearSteps(steps); - CHECK_EQUAL(solver.getNewtonMaxLinearSteps(), steps); - - CHECK_THROWS(AmiException, solver.setMaxSteps(badsteps)); - solver.setMaxSteps(steps); - CHECK_EQUAL(solver.getMaxSteps(), steps); - - CHECK_THROWS(AmiException, solver.setMaxStepsBackwardProblem(badsteps)); - solver.setMaxStepsBackwardProblem(steps); - CHECK_EQUAL(solver.getMaxStepsBackwardProblem(), steps); - - CHECK_THROWS(AmiException, solver.setRelativeTolerance(badtol)); - solver.setRelativeTolerance(tol); - CHECK_EQUAL(solver.getRelativeTolerance(), tol); - - CHECK_THROWS(AmiException, solver.setAbsoluteTolerance(badtol)); - solver.setAbsoluteTolerance(tol); - CHECK_EQUAL(solver.getAbsoluteTolerance(), tol); - - CHECK_THROWS(AmiException, solver.setRelativeToleranceQuadratures(badtol)); - solver.setRelativeToleranceQuadratures(tol); - CHECK_EQUAL(solver.getRelativeToleranceQuadratures(), tol); - - CHECK_THROWS(AmiException, solver.setAbsoluteToleranceQuadratures(badtol)); - solver.setAbsoluteToleranceQuadratures(tol); - CHECK_EQUAL(solver.getAbsoluteToleranceQuadratures(), tol); - - CHECK_THROWS(AmiException, solver.setRelativeToleranceSteadyState(badtol)); - solver.setRelativeToleranceSteadyState(tol); - CHECK_EQUAL(solver.getRelativeToleranceSteadyState(), tol); - - CHECK_THROWS(AmiException, solver.setAbsoluteToleranceSteadyState(badtol)); - solver.setAbsoluteToleranceSteadyState(tol); - CHECK_EQUAL(solver.getAbsoluteToleranceSteadyState(), tol); -} - -TEST_GROUP(amivector) -{ - std::vector vec1{ 1, 2, 4, 3 }; - std::vector vec2{ 4, 1, 2, 3 }; - std::vector vec3{ 4, 4, 2, 1 }; -}; - -TEST(amivector, vector) -{ - AmiVector av(vec1); - N_Vector nvec = av.getNVector(); - for (int i = 0; i < av.getLength(); ++i) - CHECK_EQUAL(av.at(i), NV_Ith_S(nvec, i)); -} - -TEST(amivector, vectorArray) -{ - AmiVectorArray ava(4, 3); - AmiVector av1(vec1), av2(vec2), av3(vec3); - std::vector avs{ av1, av2, av3 }; - for (int i = 0; i < ava.getLength(); ++i) - ava[i] = avs.at(i); - - std::vector badLengthVector(13, 0.0); - std::vector flattened(12, 0.0); - - CHECK_THROWS(AmiException, ava.flatten_to_vector(badLengthVector)); - ava.flatten_to_vector(flattened); - for (int i = 0; i < ava.getLength(); ++i) { - const AmiVector av = ava[i]; - for (int j = 0; j < av.getLength(); ++j) - CHECK_EQUAL(flattened.at(i * av.getLength() + j), av.at(j)); - } -} - -TEST_GROUP(sunmatrixwrapper) -{ - //inputs - std::vector a{0.82, 0.91, 0.13}; - std::vector b{0.77, 0.80}; - SUNMatrixWrapper A = SUNMatrixWrapper(3, 2); - SUNMatrixWrapper B = SUNMatrixWrapper(4, 4, 7, CSC_MAT); - // result - std::vector d{1.3753, 1.5084, 1.1655}; - - void setup() override { - A.set_data(0, 0, 0.69); - A.set_data(1, 0, 0.32); - A.set_data(2, 0, 0.95); - A.set_data(0, 1, 0.03); - A.set_data(1, 1, 0.44); - A.set_data(2, 1, 0.38); - - B.set_indexptr(0, 0); - B.set_indexptr(1, 2); - B.set_indexptr(2, 4); - B.set_indexptr(3, 5); - B.set_indexptr(4, 7); - B.set_data(0, 3); - B.set_data(1, 1); - B.set_data(2, 3); - B.set_data(3, 7); - B.set_data(4, 1); - B.set_data(5, 2); - B.set_data(6, 9); - B.set_indexval(0, 1); - B.set_indexval(1, 3); - B.set_indexval(2, 0); - B.set_indexval(3, 2); - B.set_indexval(4, 0); - B.set_indexval(5, 1); - B.set_indexval(6, 3); - - } -}; - -TEST(sunmatrixwrapper, sparse_multiply) -{ - - auto A_sparse = SUNMatrixWrapper(A, 0.0, CSC_MAT); - auto c(a); //copy c - A_sparse.multiply(c, b); - checkEqualArray(d, c, TEST_ATOL, TEST_RTOL, "multiply"); -} - -TEST(sunmatrixwrapper, sparse_multiply_empty) -{ - // Ensure empty Matrix vector multiplication succeeds - auto A_sparse = SUNMatrixWrapper(1, 1, 0, CSR_MAT); - std::vector b {0.1}; - std::vector c {0.1}; - A_sparse.multiply(c, b); - CHECK_TRUE(c[0] == 0.1); - - A_sparse = SUNMatrixWrapper(1, 1, 0, CSC_MAT); - A_sparse.multiply(c, b); - CHECK_TRUE(c[0] == 0.1); -} - -TEST(sunmatrixwrapper, dense_multiply) -{ - auto c(a); //copy c - A.multiply(c, b); - checkEqualArray(d, c, TEST_ATOL, TEST_RTOL, "multiply"); -} - -TEST(sunmatrixwrapper, multiply_throws) -{ - auto b_amivector = AmiVector(b); - auto a_amivector = AmiVector(a); -} - -TEST(sunmatrixwrapper, transform_throws) -{ - CHECK_THROWS(std::invalid_argument, SUNMatrixWrapper(A, 0.0, 13)); - auto A_sparse = SUNMatrixWrapper(A, 0.0, CSR_MAT); - CHECK_THROWS(std::invalid_argument, SUNMatrixWrapper(A_sparse, 0.0, CSR_MAT)); -} - -TEST(sunmatrixwrapper, block_transpose) -{ - auto B_sparse = SUNMatrixWrapper(4, 4, 7, CSR_MAT); - CHECK_THROWS(std::domain_error, B.transpose(B_sparse, 1.0, 4)); - - B_sparse = SUNMatrixWrapper(4, 4, 7, CSC_MAT); - B.transpose(B_sparse, -1.0, 2); - for (int idx = 0; idx < 7; idx++) { - CHECK_TRUE(SM_INDEXVALS_S(B.get())[idx] - == SM_INDEXVALS_S(B_sparse.get())[idx]); - if (idx == 1) { - CHECK_TRUE(SM_DATA_S(B.get())[idx] - == -SM_DATA_S(B_sparse.get())[3]); - } else if (idx == 3) { - CHECK_TRUE(SM_DATA_S(B.get())[idx] - == -SM_DATA_S(B_sparse.get())[1]); - } else { - CHECK_TRUE(SM_DATA_S(B.get())[idx] - == -SM_DATA_S(B_sparse.get())[idx]); - } - } - for (int icol = 0; icol <= 4; icol++) - CHECK_TRUE(SM_INDEXPTRS_S(B.get())[icol] - == SM_INDEXPTRS_S(B_sparse.get())[icol]); -} diff --git a/deps/AMICI/tests/cpputest/unittests/testsSerialization.cpp b/deps/AMICI/tests/cpputest/unittests/testsSerialization.cpp deleted file mode 100644 index 520f3b34e..000000000 --- a/deps/AMICI/tests/cpputest/unittests/testsSerialization.cpp +++ /dev/null @@ -1,258 +0,0 @@ -#include -#include // needs to be included before cpputest -#include - -#include "testfunctions.h" - -#include - -#include "CppUTest/TestHarness.h" -#include "CppUTestExt/MockSupport.h" - -void -checkReturnDataEqual(amici::ReturnData const& r, amici::ReturnData const& s) -{ - CHECK_EQUAL(r.np, s.np); - CHECK_EQUAL(r.nk, s.nk); - CHECK_EQUAL(r.nx, s.nx); - CHECK_EQUAL(r.nxtrue, s.nxtrue); - CHECK_EQUAL(r.nx_solver, s.nx_solver); - CHECK_EQUAL(r.nx_solver_reinit, s.nx_solver_reinit); - CHECK_EQUAL(r.ny, s.ny); - CHECK_EQUAL(r.nytrue, s.nytrue); - CHECK_EQUAL(r.nz, s.nz); - CHECK_EQUAL(r.nztrue, s.nztrue); - CHECK_EQUAL(r.ne, s.ne); - CHECK_EQUAL(r.nJ, s.nJ); - CHECK_EQUAL(r.nplist, s.nplist); - CHECK_EQUAL(r.nmaxevent, s.nmaxevent); - CHECK_EQUAL(r.nt, s.nt); - CHECK_EQUAL(r.newton_maxsteps, s.newton_maxsteps); - CHECK_TRUE(r.pscale == s.pscale); - CHECK_EQUAL(static_cast(r.o2mode), static_cast(s.o2mode)); - CHECK_EQUAL(static_cast(r.sensi), static_cast(s.sensi)); - CHECK_EQUAL(static_cast(r.sensi_meth), static_cast(s.sensi_meth)); - - using amici::checkEqualArray; - checkEqualArray(r.ts, s.ts, 1e-16, 1e-16, "ts"); - checkEqualArray(r.xdot, s.xdot, 1e-16, 1e-16, "xdot"); - checkEqualArray(r.J, s.J, 1e-16, 1e-16, "J"); - checkEqualArray(r.z, s.z, 1e-16, 1e-16, "z"); - checkEqualArray(r.sigmaz, s.sigmaz, 1e-16, 1e-16, "sigmaz"); - checkEqualArray(r.sz, s.sz, 1e-16, 1e-16, "sz"); - checkEqualArray(r.ssigmaz, s.ssigmaz, 1e-16, 1e-16, "ssigmaz"); - checkEqualArray(r.rz, s.rz, 1e-16, 1e-16, "rz"); - checkEqualArray(r.srz, s.srz, 1e-16, 1e-16, "srz"); - checkEqualArray(r.s2rz, s.s2rz, 1e-16, 1e-16, "s2rz"); - checkEqualArray(r.x, s.x, 1e-16, 1e-16, "x"); - checkEqualArray(r.sx, s.sx, 1e-16, 1e-16, "sx"); - - checkEqualArray(r.y, s.y, 1e-16, 1e-16, "y"); - checkEqualArray(r.sigmay, s.sigmay, 1e-16, 1e-16, "sigmay"); - checkEqualArray(r.sy, s.sy, 1e-16, 1e-16, "sy"); - checkEqualArray(r.ssigmay, s.ssigmay, 1e-16, 1e-16, "ssigmay"); - - CHECK_TRUE(r.numsteps == s.numsteps); - CHECK_TRUE(r.numstepsB == s.numstepsB); - CHECK_TRUE(r.numrhsevals == s.numrhsevals); - CHECK_TRUE(r.numrhsevalsB == s.numrhsevalsB); - CHECK_TRUE(r.numerrtestfails == s.numerrtestfails); - CHECK_TRUE(r.numerrtestfailsB == s.numerrtestfailsB); - CHECK_TRUE(r.numnonlinsolvconvfails == s.numnonlinsolvconvfails); - CHECK_TRUE(r.numnonlinsolvconvfailsB == s.numnonlinsolvconvfailsB); - CHECK_TRUE(r.order == s.order); - CHECK_TRUE(r.cpu_time == s.cpu_time); - CHECK_TRUE(r.cpu_timeB == s.cpu_timeB); - - CHECK_TRUE(r.preeq_status == s.preeq_status); - CHECK_TRUE(r.preeq_t == s.preeq_t || - (std::isnan(r.preeq_t) && std::isnan(s.preeq_t))); - CHECK_TRUE(r.preeq_wrms == s.preeq_wrms || - (std::isnan(r.preeq_wrms) && std::isnan(s.preeq_wrms))); - CHECK_TRUE(r.preeq_numsteps == s.preeq_numsteps); - CHECK_TRUE(r.preeq_numlinsteps == s.preeq_numlinsteps); - DOUBLES_EQUAL(r.preeq_cpu_time, s.preeq_cpu_time, 1e-16); - - CHECK_TRUE(r.posteq_status == s.posteq_status); - CHECK_TRUE(r.posteq_t == s.posteq_t || - (std::isnan(r.posteq_t) && std::isnan(s.posteq_t))); - CHECK_TRUE(r.posteq_wrms == s.posteq_wrms || - (std::isnan(r.posteq_wrms) && std::isnan(s.posteq_wrms))); - CHECK_TRUE(r.posteq_numsteps == s.posteq_numsteps); - CHECK_TRUE(r.posteq_numlinsteps == s.posteq_numlinsteps); - DOUBLES_EQUAL(r.posteq_cpu_time, s.posteq_cpu_time, 1e-16); - - checkEqualArray(r.x0, s.x0, 1e-16, 1e-16, "x0"); - checkEqualArray(r.sx0, s.sx0, 1e-16, 1e-16, "sx0"); - - CHECK_TRUE(r.llh == s.llh || (std::isnan(r.llh) && std::isnan(s.llh))); - CHECK_TRUE(r.chi2 == s.chi2 || (std::isnan(r.llh) && std::isnan(s.llh))); - CHECK_EQUAL(r.status, s.status); - - checkEqualArray(r.sllh, s.sllh, 1e-5, 1e-5, "sllh"); - checkEqualArray(r.s2llh, s.s2llh, 1e-5, 1e-5, "s2llh"); -} - -// clang-format off -TEST_GROUP(dataSerialization){ - amici::CVodeSolver solver; - void setup() override { - // set non-default values for all members - solver.setAbsoluteTolerance(1e-4); - solver.setRelativeTolerance(1e-5); - solver.setAbsoluteToleranceQuadratures(1e-6); - solver.setRelativeToleranceQuadratures(1e-7); - solver.setAbsoluteToleranceSteadyState(1e-8); - solver.setRelativeToleranceSteadyState(1e-9); - solver.setSensitivityMethod(amici::SensitivityMethod::adjoint); - solver.setSensitivityOrder(amici::SensitivityOrder::second); - solver.setMaxSteps(1e1); - solver.setMaxStepsBackwardProblem(1e2); - solver.setNewtonMaxSteps(1e3); - solver.setNewtonMaxLinearSteps(1e4); - solver.setPreequilibration(true); - solver.setStateOrdering(static_cast(amici::SUNLinSolKLU::StateOrdering::COLAMD)); - solver.setInterpolationType(amici::InterpolationType::polynomial); - solver.setStabilityLimitFlag(false); - solver.setLinearSolver(amici::LinearSolver::dense); - solver.setLinearMultistepMethod(amici::LinearMultistepMethod::adams); - solver.setNonlinearSolverIteration(amici::NonlinearSolverIteration::newton); - solver.setInternalSensitivityMethod(amici::InternalSensitivityMethod::staggered); - solver.setReturnDataReportingMode(amici::RDataReporting::likelihood); - } -}; -// clang-format on - -TEST(dataSerialization, testFile) -{ - int np = 1; - int nk = 2; - int nx = 3; - int ny = 4; - int nz = 5; - int ne = 6; - amici::CVodeSolver solver; - amici::Model_Test m = amici::Model_Test( - amici::ModelDimensions( - nx, // nx_rdata - nx, // nxtrue_rdata - nx, // nx_solver - nx, // nxtrue_solver - 0, // nx_solver_reinit - np, // np - nk, // nk - ny, // ny - ny, // nytrue - nz, // nz - nz, // nztrue - ne, // ne - 0, // nJ - 9, // nw - 2, // ndwdx - 2, // ndwdp - 2, // dwdw - 13, // ndxdotdw - {}, // ndJydy - 15, // nnz - 16, // ubw - 17 // lbw - ), - amici::SimulationParameters( - std::vector(nk, 0.0), - std::vector(np, 0.0), - std::vector(np, 0) - ), - amici::SecondOrderMode::none, - std::vector(nx, 0.0), - std::vector(nz, 0)); - - { - std::ofstream ofs("sstore.dat"); - boost::archive::text_oarchive oar(ofs); - // oar & static_cast(solver); - oar& static_cast(m); - } - { - std::ifstream ifs("sstore.dat"); - boost::archive::text_iarchive iar(ifs); - amici::CVodeSolver v; - amici::Model_Test n; - // iar &static_cast(v); - iar& static_cast(n); - // CHECK_TRUE(solver == v); - CHECK_TRUE(m == n); - } -} - -TEST(dataSerialization, testString) -{ - int np = 1; - int nk = 2; - int nx = 3; - int ny = 4; - int nz = 5; - int ne = 6; - amici::CVodeSolver solver; - amici::Model_Test m = amici::Model_Test( - amici::ModelDimensions( - nx, // nx_rdata - nx, // nxtrue_rdata - nx, // nx_solver - nx, // nxtrue_solver - 0, // nx_solver_reinit - np, // np - nk, // nk - ny, // ny - ny, // nytrue - nz, // nz - nz, // nztrue - ne, // ne - 0, // nJ - 9, // nw - 10, // ndwdx - 2, // ndwdp - 12, // dwdw - 13, // ndxdotdw - {}, // ndJydy - 15, // nnz - 16, // ubw - 17 // lbw - ), - amici::SimulationParameters( - std::vector(nk, 0.0), - std::vector(np, 0.0), - std::vector(np, 0) - ), - amici::SecondOrderMode::none, - std::vector(nx, 0.0), - std::vector(nz, 0)); - - amici::ReturnData r(solver, m); - - std::string serialized = amici::serializeToString(r); - - checkReturnDataEqual( - r, amici::deserializeFromString(serialized)); -} - -TEST(dataSerialization, testChar) -{ - int length; - char* buf = amici::serializeToChar(solver, &length); - - amici::CVodeSolver v = - amici::deserializeFromChar(buf, length); - - delete[] buf; - CHECK_TRUE(solver == v); -} - -TEST(dataSerialization, testStdVec) -{ - - auto buf = amici::serializeToStdVec(solver); - amici::CVodeSolver v = - amici::deserializeFromChar(buf.data(), buf.size()); - - CHECK_TRUE(solver == v); -} diff --git a/deps/AMICI/tests/generateTestConfigurationForExamples.sh b/deps/AMICI/tests/generateTestConfigurationForExamples.sh index 578228ee3..e54f6f5ec 100755 --- a/deps/AMICI/tests/generateTestConfigurationForExamples.sh +++ b/deps/AMICI/tests/generateTestConfigurationForExamples.sh @@ -1,22 +1,24 @@ #!/bin/bash # Generate AMICI configuration for test models +set -eou pipefail + # AMICI root directory -AMICI_PATH="`dirname \"$BASH_SOURCE\"`" -AMICI_PATH="`( cd \"$AMICI_PATH/..\" && pwd )`" +AMICI_PATH=$(dirname "$BASH_SOURCE") +AMICI_PATH=$( cd "$AMICI_PATH/.." && pwd ) # File with test configuration -TEST_FILE="${AMICI_PATH}/tests/cpputest/testOptions.h5" +TEST_FILE="${AMICI_PATH}/tests/cpp/testOptions.h5" # Delete old config -rm ${TEST_FILE} +rm "${TEST_FILE}" -cd ${AMICI_PATH}/tests/generateTestConfig -./example_dirac.py ${TEST_FILE} -./example_events.py ${TEST_FILE} -./example_jakstat.py ${TEST_FILE} -./example_nested_events.py ${TEST_FILE} -./example_neuron.py ${TEST_FILE} -./example_robertson.py ${TEST_FILE} -./example_steadystate.py ${TEST_FILE} -./example_calvetti.py ${TEST_FILE} +cd "${AMICI_PATH}/tests/generateTestConfig" +./example_dirac.py "${TEST_FILE}" +./example_events.py "${TEST_FILE}" +./example_jakstat.py "${TEST_FILE}" +./example_nested_events.py "${TEST_FILE}" +./example_neuron.py "${TEST_FILE}" +./example_robertson.py "${TEST_FILE}" +./example_steadystate.py "${TEST_FILE}" +./example_calvetti.py "${TEST_FILE}" diff --git a/templates/CMakeLists.template.txt b/templates/CMakeLists.template.txt index 59add1e36..677ae3119 100644 --- a/templates/CMakeLists.template.txt +++ b/templates/CMakeLists.template.txt @@ -24,8 +24,6 @@ set(CMAKE_CXX_STANDARD_REQUIRED True) # call project to select language and required by some modules project(${MODEL_NAME}) # for IDE -set(BUILD_TESTS FALSE CACHE BOOL "Build tests?") - find_package(ParPE REQUIRED) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wno-unused-function -fopenmp -D_GNU_SOURCE") # -D_GNU_SOURCE for pthread recursive mutex issues @@ -37,29 +35,12 @@ add_definitions(-DGIT_VERSION="${GIT_VERSION}") include_directories(${ParPE_INCLUDE_DIRS}) -if(${BUILD_TESTS}) - include (CTest) - set(AMICI_DIR "${CMAKE_CURRENT_LIST_DIR}/../parPE/deps/AMICI/") - set(CPPUTEST_DIR "${AMICI_DIR}/ThirdParty/cpputest-master/" CACHE PATH "") - - set(CPPUTEST_INCLUDE_DIR "${CPPUTEST_DIR}/include") - set(CPPUTEST_LIBRARY "${CPPUTEST_DIR}/lib/libCppUTest.a" - "${CPPUTEST_DIR}/lib/libCppUTestExt.a") - include_directories(${CPPUTEST_INCLUDE_DIR}) -endif(${BUILD_TESTS}) - ################################################ # AMICI model ################################################ add_subdirectory(model) -# unit tests -if(${BUILD_TESTS}) -# add_subdirectory(tests) -endif(${BUILD_TESTS}) - - ################################# # parameter estimation executable ################################# @@ -74,15 +55,6 @@ target_link_libraries(${PROJECT_NAME} Upstream::parpe ) -if(${BUILD_TESTS}) - # Run program with sample data as test - set(TESTS_MPIEXEC_COMMAND mpiexec -n 4) - - #add_test (NAME ${PROJECT_NAME} - # COMMAND ${TESTS_MPIEXEC_COMMAND} ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME} -o deleteme ${CMAKE_CURRENT_LIST_DIR}/../data/data4c.h5 - # ) -endif(${BUILD_TESTS}) - ################################# # simulation executable ################################# From b84b1b2ded09f82d3b7595c2184920b5578683cd Mon Sep 17 00:00:00 2001 From: Paul Stapor Date: Wed, 14 Jul 2021 15:15:08 +0200 Subject: [PATCH 02/11] Added a draft for minibatch optimization documentation (#355) * added a draft for minibatch optimiization documentation * fix typos --- doc/minibatch_optimization.md | 145 ++++++++++++++++++++++++++++++++++ 1 file changed, 145 insertions(+) create mode 100644 doc/minibatch_optimization.md diff --git a/doc/minibatch_optimization.md b/doc/minibatch_optimization.md new file mode 100644 index 000000000..e4fb0cc49 --- /dev/null +++ b/doc/minibatch_optimization.md @@ -0,0 +1,145 @@ +# Mini-batch optimization with parPE + +Mini-batch optimization usually refers to gradient-based local optimization +strategies such as stochastic gradient descent (SGD) with batch sizes between +1 (sometimes referred to as online learning) and the whole data set (full batch +optimization). In the context of parameter estimation for ODE models, such as +done in parPE, this means that in each step of optimization only a subset of +all experimental conditions is simulated. From those simulations, an estimate +of the objective function and its gradient is computed and used for +local optimization. ParPE has its own mini-batch optimization solver +implemented, which includes a series of different mini-batch optimization +algorithms, tuning possibilities and improvements tailored to parameter +estimation of ODE models. More background on the method can be found int the +[following preprint](https://www.biorxiv.org/content/10.1101/859884v1). + + +## Using the mini-batch optimization solver of parPE + +For the user of parPE, mini-batch optimization works similar to full-batch +optimization with Ipopt or Fides, only a group of optimizer settings needs to +be changed. For this purpose, we assume that the folder `parPE/misc` (which +includes the file `optimizationOptions.py`) is in the python path. +1. The optimizer itself has to be changed to the mini-batch solver via \ +`optimizationOptions.py -S optimizer 10` +2. At the moment, hierarchical optimization has to be disabled, as its +incompatible with the current way how mini-batches are chosen: \ +`optimizationOptions.py -S hierarchicalOptimization 0` +3. As the computation time (but not necessarily the wall time) is typically +reduced when using mini-batch optimization, more local optimizations can be run +for most problems. Those can be set via +`optimizationOptions.py -S numStarts ` + + +## Setting mini-batch optimization specific hyperparameters + +Beyond the mentioned settings, mini-batch optimizers have a couple of specific +hyperparameters which need to be set for the mini-batch optimizer itself. +The most intuitive ones are the number of epochs, i.e., the number of passes +through the whole data set, which roughly corresponds to the maximum number of +iterations in full-batch optimization, and the mini-batch size. Those can be +set via \ +`optimizationOptions.py -S minibatch/batchSize +` \ +or \ +`optimizationOptions.py -S minibatch/maxEpochs +`, \ +respectively. + +### Optimization algorithms +The optimizations algorithm can be set via +`optimizationOptions.py -S +minibatch/parameterUpdater `, where `` +can be chosen among the following ones: + * "Adam": Adapted version of the Adam algorithm by Kingma et al. + * "AdamClassic": Original version of the Adam algorithm by Kingma et al. + * "RmsProp": RMSProp algorithm as proposed by Hinton + * "Vanilla": Simple ("Vanilla") stochastic gradient descent + * "Momentum": Stochastic gradient descent with (vanilla) momentum + +### Learning rate scheduling +The next important hyperparameter is the learning rate, which influences the +step size of the optimizer. ParPE allows to choose an initial learning rate, +a final learning rate (for the last epoch), and a mode of interpolation +between those two. These things can be set via: + * `optimizationOptions.py -S +minibatch/startLearningRate ` + * `optimizationOptions.py -S +minibatch/endLearningRate ` + * `optimizationOptions.py -S +minibatch/learningRateInterpMode `, where the interpolation +mode can be one out of `linear`, `inverseLinear`, and `logarithmic`, where +logarithmic interpolation is the default. + +### ODE-model specific implementations: Rescue interceptor and line-search +In addition to those settings, parPE implements some specific improvements for +mini-batch optimization of ODE models: First, it has a "rescueInterceptor", +which tries to recover local optimization runs by shrinking the step size if +the objective function cannot be evaluated. It can be activated via +`optimizationOptions.py -S +minibatch/rescueInterceptor `, where "integer flag" can be one of + * 0 (interceptor switched off) + * 1 (interceptor switched on) + * 2 (interceptor switched on *and* optimization is trying a cold restart if + the run cannot be recovered despite reaching the maximum number of step size + shrinkage steps) + +Second, parPE allows to perform mini-batch optimization using line-search. +Line-search can be enabled by setting the option +`optimizationOptions.py -S +minibatch/lineSearchSteps `, where the maximum number +of line-search steps needs to be set. A maximum of 3 to 5 steps has shown to +work best. However, enabling line-search will result in slightly higher +computation times, so it is a priori not clear, whether line-search will be +beneficial for a certain application or not. + +## Choosing hyperparameters + +### The learning rate +The learning rate is a crucial, but complicated hyperparameter: +Keep in mind that a good learning rate depends on the optimization algorithm: +For vanilla SGD, the learning rate is equal to the step size in parPE, whereas +for Adam and RmsProp, the learning rate is multiplied with a more sophisticated +parameter update, which has in general a step length larger than 1. Hence, +a good learning rate for Vanilla SGD will have larger values than for RmsProp +or Adam (at least for the implementation in parPE). +For many applications, typical initial optimizer step sizes are often in the +range of `[0.1, 10]`. For high dimensional problems, step sizes are typically +larger than for low dimensional ones. + +For Adam and RmsProp, the initial parameter update, which gets multiplied with +the learning rate, has roughly the size of the square root of the problem +dimension. Hence, if the problems has 10.000 free parameters, the initial step +size is a 100-fold larger than the learning rate. For RmsProp, the step size +behaves similar to the one of Adam, except that the step size is (initially) +biased towards 0 and hence smaller (up to at most 1 order of magnitude). +For SGD with momentum, step sizes should behave similar to those of vanilla +SGD. These things should be considered when choosing a learning rate schedule. + +### The mini-batch size +In first tests, small mini-batch sizes worked clearly better than large ones. +At the moment, there is is no way to a priori determine the best mini-batch +size, but this hyperparameter must be chosen in some trial-and-error manner. +Fur this purpose, it seems reasonable to start with very small mini-batch +sizes and then gradually increase them and check how this affects the +optimization results. + +## Reporting of objective function values and gradients: Important differences +As already mentioned, parPE only evaluates the objective function and the +gradient on a subset of the data set in each optimization step. Hence, the +reported values in the optimization history are only estimates based on the +current optimization step and typically highly stochastic. +If optimization is finished successfully, the objective function is recomputed +once on the whole data set (values stored in the last optimization step), in +order to obtain a more reliable final result. + +However, it can make sense to post-process the optimization runs when +analyzing the optimization result: If a run has finished, the values reported +as "finalParameter" should be used. Otherwise, it makes sense to simply +extract the last reported parameter vector of the optimization history for a +specific local optimization run. +These extracted final parameter vectors can then be used to compute the +actual objective function value on the whole data set, possibly with +hierarchically optimized nuisance parameters, if possible. Such a +post-processing step and post-optimization evaluation allows at the moment the +best possible insight into the optimization result. From 4b44c7e1f0122d21ad72e73d11237dc00460061e Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Sun, 3 Oct 2021 16:11:05 +0200 Subject: [PATCH 03/11] Docker image with fides optimizer (#356) --- CMakeLists.txt | 2 +- container/charliecloud/parpe_base/install.sh | 1 + container/charliecloud/parpe_base/install_parpe.sh | 4 +++- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index aea9f82ee..8677fce28 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -78,7 +78,7 @@ pkg_search_module(IPOPT REQUIRED IMPORTED_TARGET GLOBAL ipopt>=3.11.0 ) if(${PARPE_ENABLE_DLIB}) set(DLIB_DIR "${CMAKE_CURRENT_LIST_DIR}/ThirdParty/dlib-19.7" - CACHE FILEPATH "DLIB base directory") + CACHE PATH "DLIB base directory") endif(${PARPE_ENABLE_DLIB}) # PThreads diff --git a/container/charliecloud/parpe_base/install.sh b/container/charliecloud/parpe_base/install.sh index dcae9bb02..48428d1fe 100755 --- a/container/charliecloud/parpe_base/install.sh +++ b/container/charliecloud/parpe_base/install.sh @@ -31,6 +31,7 @@ apt-get install -q -y \ python3-dev \ python3-pip \ python3-venv \ + libspdlog-dev \ swig3.0 \ unzip \ wget diff --git a/container/charliecloud/parpe_base/install_parpe.sh b/container/charliecloud/parpe_base/install_parpe.sh index 470334030..bbc67ac52 100755 --- a/container/charliecloud/parpe_base/install_parpe.sh +++ b/container/charliecloud/parpe_base/install_parpe.sh @@ -25,7 +25,8 @@ cmake \ -DBUILD_TESTS=OFF \ .. && make -j12 -#- cd $PARPE_BASE/ThirdParty && ./installCeres.sh +# install fides optimizer +cd $PARPE_BASE/ThirdParty && ./installFides.sh # install parPE python requirements pip install -r "${PARPE_BASE}"/python/requirements.txt @@ -47,6 +48,7 @@ mpi_cmd="$mpi_cmd;--mca;btl_tcp_if_include;lo;" mpi_cmd="$mpi_cmd;--mca;orte_base_help_aggregate;0" CC=mpicc CXX=mpiCC cmake \ + -DPARPE_ENABLE_FIDES=ON \ -DIPOPT_INCLUDE_DIRS=/usr/include/coin/ \ -DIPOPT_LIBRARIES=/usr/lib/libipopt.so \ -DMPI_INCLUDE_DIRS=/usr/include/openmpi-x86_64/ \ From 45e2e41b09b125fae392a790d759e944a6f48e25 Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Mon, 4 Oct 2021 13:56:00 +0200 Subject: [PATCH 04/11] Make image work with singularity (#289) - Change installation dir - Fix permissions - Fix CMake package config - Update fides (fixed CMake package config) - Make march=native optional and disable for container image --- CMakeLists.txt | 7 ++- CMakeModules/ParPEConfig.cmake.in | 4 +- ThirdParty/installFides.sh | 2 +- container/charliecloud/parpe_base/Dockerfile | 4 ++ .../charliecloud/parpe_base/install_parpe.sh | 14 +++--- container/singularity/README.md | 44 +++++++++++++++++++ examples/parpeamici/CMakeLists.txt | 2 +- src/parpeamici/optimizationApplication.cpp | 7 ++- 8 files changed, 71 insertions(+), 13 deletions(-) create mode 100644 container/singularity/README.md diff --git a/CMakeLists.txt b/CMakeLists.txt index 8677fce28..229f6e32e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -14,7 +14,6 @@ set(CMAKE_CXX_STANDARD 17) set(CMAKE_CXX_STANDARD_REQUIRED True) include(BuildType) # Ensure CMAKE_BUILD_TYPE is always set -include(BuildOptimized) include(CTest) set(CMAKE_DEBUG_POSTFIX "-dbg") @@ -59,6 +58,12 @@ set(PARPE_ENABLE_TOMS611 FALSE CACHE BOOL "Enable toms611 optimizer?") set(PARPE_ENABLE_FSQP FALSE CACHE BOOL "Enable FSQP optimizer?") set(PARPE_EXPORT_PACKAGE FALSE CACHE BOOL "Export this build to CMake registry?") +set(PARPE_BUILD_OPTIMIZED TRUE CACHE BOOL "Optimize for current CPU?") + +if(${PARPE_BUILD_OPTIMIZED}) + include(BuildOptimized) +endif(${PARPE_BUILD_OPTIMIZED}) + # OpenMP? find_package(OpenMP) diff --git a/CMakeModules/ParPEConfig.cmake.in b/CMakeModules/ParPEConfig.cmake.in index 015538f64..c122eefba 100644 --- a/CMakeModules/ParPEConfig.cmake.in +++ b/CMakeModules/ParPEConfig.cmake.in @@ -19,13 +19,13 @@ endif() if(${PARPE_ENABLE_CERES}) find_dependency(Ceres COMPONENTS - HINTS "${CMAKE_SOURCE_DIR}/ThirdParty/ceres-solver-2.0.0/build/install") + HINTS "@Ceres_DIR@") find_dependency(Eigen3 REQUIRED) endif() if(${PARPE_ENABLE_FIDES}) find_dependency(Fides - HINTS "${CMAKE_SOURCE_DIR}/ThirdParty/fides-cpp/build/") + HINTS "@Fides_DIR@") endif() find_dependency(Amici REQUIRED) diff --git a/ThirdParty/installFides.sh b/ThirdParty/installFides.sh index 39bb9e362..5f4dc6dd6 100755 --- a/ThirdParty/installFides.sh +++ b/ThirdParty/installFides.sh @@ -21,7 +21,7 @@ build_fides() { cd "${script_dir}" git clone https://github.com/dweindl/fides-cpp.git cd "${fides_dir}" - git checkout 9906bdac6a1966ddd4b37b96f98ad8f89770c128 + git checkout 76e1ca57674a20a2821a6ae4987b85035bbad016 fi cd "${fides_dir}" diff --git a/container/charliecloud/parpe_base/Dockerfile b/container/charliecloud/parpe_base/Dockerfile index a94791c47..8465ca3d4 100644 --- a/container/charliecloud/parpe_base/Dockerfile +++ b/container/charliecloud/parpe_base/Dockerfile @@ -7,3 +7,7 @@ RUN /u18/install.sh && rm -rf /tmp && mkdir /tmp ENV BASH_ENV "/etc/drydock/.env" RUN /u18/install_parpe.sh + +ENV PARPE_DIR "/parPE" + +RUN chmod -R ugo+rwX $PARPE_DIR diff --git a/container/charliecloud/parpe_base/install_parpe.sh b/container/charliecloud/parpe_base/install_parpe.sh index bbc67ac52..0ce5b4037 100755 --- a/container/charliecloud/parpe_base/install_parpe.sh +++ b/container/charliecloud/parpe_base/install_parpe.sh @@ -3,14 +3,12 @@ set -euo pipefail set -x -cd +export PARPE_BASE="${PARPE_DIR:-/parPE}" # unpack git archive -mkdir parPE && cd parPE +mkdir "$PARPE_BASE" && cd "$PARPE_BASE" tar -xzf /u18/parpe.tar.gz -export PARPE_BASE=$(pwd) - # Build dependencies # Install AMICI @@ -18,7 +16,7 @@ export AMICI_PATH=${PARPE_BASE}/deps/AMICI/ cd "${AMICI_PATH}" \ && scripts/buildSuiteSparse.sh \ && scripts/buildSundials.sh -mkdir -p "${AMICI_PATH}"/build && cd "${AMICI_PATH}"/build +mkdir -p "${AMICI_PATH}/build" && cd "${AMICI_PATH}/build" cmake \ -DCMAKE_BUILD_TYPE=Debug \ -DENABLE_PYTHON=ON \ @@ -26,10 +24,10 @@ cmake \ .. && make -j12 # install fides optimizer -cd $PARPE_BASE/ThirdParty && ./installFides.sh +cd "$PARPE_BASE/ThirdParty" && ./installFides.sh # install parPE python requirements -pip install -r "${PARPE_BASE}"/python/requirements.txt +pip3 install -r "${PARPE_BASE}"/python/requirements.txt # build parPE cd "${PARPE_BASE}" @@ -48,6 +46,7 @@ mpi_cmd="$mpi_cmd;--mca;btl_tcp_if_include;lo;" mpi_cmd="$mpi_cmd;--mca;orte_base_help_aggregate;0" CC=mpicc CXX=mpiCC cmake \ + -DPARPE_BUILD_OPTIMIZED=OFF \ -DPARPE_ENABLE_FIDES=ON \ -DIPOPT_INCLUDE_DIRS=/usr/include/coin/ \ -DIPOPT_LIBRARIES=/usr/lib/libipopt.so \ @@ -55,6 +54,7 @@ CC=mpicc CXX=mpiCC cmake \ -DBUILD_TESTING=ON \ -DTESTS_MPIEXEC_COMMAND="$mpi_cmd" \ .. + make -j12 VERBOSE=1 # MPI settings for python tests diff --git a/container/singularity/README.md b/container/singularity/README.md new file mode 100644 index 000000000..febee4f27 --- /dev/null +++ b/container/singularity/README.md @@ -0,0 +1,44 @@ +# Using parPE with singularity + +## Singularity + +> Singularity is an open source container platform designed to be simple, fast, +> and secure. Singularity is optimized for compute focused enterprise and HPC +>workloads, allowing untrusted users to run untrusted containers in a trusted +>way. + +--- [https://github.com/hpcng/singularity](https://github.com/hpcng/singularity) + +Documentation: [https://sylabs.io/guides/3.0/user-guide/index.html](https://sylabs.io/guides/3.0/user-guide/index.html) + + +## Using parPE with singularity + +Singularity images can be created from available docker containers using: + +``` +singularity pull docker://dweindl/parpe:develop +``` + +To create a custom docker containers, see +https://parpe.readthedocs.io/en/latest/parpe_with_charliecloud.html#generating-parpe-base-docker-image + + +An example for parameter estimation for the model at https://github.com/Benchmarking-Initiative/Benchmark-Models-PEtab/tree/master/Benchmark-Models/Zheng_PNAS2012 : + +```shell +AMICI_MODEL_DIR=Zheng_PNAS2012 +PARPE_MODEL_DIR=Zheng_PNAS2012_parpe +PETAB_YAML_FILE=Benchmark-Models-PEtab/Benchmark-Models/Zheng_PNAS2012/Zheng_PNAS2012.yaml +MODEL_NAME=Zheng_PNAS2012 +H5_PE_INPUT=$PARPE_MODEL_DIR/data.h5 + +singularity exec parpe_develop.sif /parPE/misc/run_in_venv.sh /parPE/build/venv amici_import_petab -v -y $PETAB_YAML_FILE & +singularity exec parpe_develop.sif /parPE/misc/setup_amici_model.sh ${AMICI_MODEL_DIR} ${PARPE_MODEL_DIR} +singularity exec parpe_develop.sif /parPE/misc/run_in_venv.sh /parPE/build/venv parpe_petab_to_hdf5 \ + -n ${MODEL_NAME} \ + -y ${PETAB_YAML_FILE} \ + -d ${AMICI_MODEL_DIR} \ + -o ${H5_PE_INPUT} + singularity exec parpe_develop.sif ${PARPE_MODEL_DIR}/build/estimate_$MODEL_NAME -o ${MODEL_NAME}_results/ $H5_PE_INPUT +``` diff --git a/examples/parpeamici/CMakeLists.txt b/examples/parpeamici/CMakeLists.txt index 84d124b19..7cab47e96 100644 --- a/examples/parpeamici/CMakeLists.txt +++ b/examples/parpeamici/CMakeLists.txt @@ -1,6 +1,6 @@ find_package(Amici HINTS ${CMAKE_SOURCE_DIR}/deps/AMICI/build) -# create python virtual environment +# rebuild amici add_custom_target(rebuild_amici) add_custom_command( TARGET rebuild_amici PRE_BUILD diff --git a/src/parpeamici/optimizationApplication.cpp b/src/parpeamici/optimizationApplication.cpp index 29ed2aa0c..49003866a 100644 --- a/src/parpeamici/optimizationApplication.cpp +++ b/src/parpeamici/optimizationApplication.cpp @@ -164,8 +164,13 @@ void OptimizationApplication::logParPEVersion(H5::H5File const& file) const void OptimizationApplication::initMPI(int *argc, char ***argv) { #ifdef PARPE_ENABLE_MPI + int thread_support_provided = 0; + int mpiErr = MPI_Init_thread(argc, argv, MPI_THREAD_MULTIPLE, + &thread_support_provided); + + if (thread_support_provided != MPI_THREAD_MULTIPLE) + throw std::runtime_error("MPI_THREAD_MULTIPLE not supported?"); - int mpiErr = MPI_Init(argc, argv); if (mpiErr != MPI_SUCCESS) { logmessage(LOGLVL_CRITICAL, "Problem initializing MPI. Exiting."); exit(1); From 1cb2fa98ed488f08d081f3ccc6e6a49dd0d6ff19 Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Mon, 25 Oct 2021 17:21:08 +0200 Subject: [PATCH 05/11] Update AMICI to v0.11.19 (#357) git subrepo clone (merge) --branch=v0.11.19 --force git@github.com:AMICI-dev/AMICI.git deps/AMICI subrepo: subdir: "deps/AMICI" merged: "4610a608" upstream: origin: "git@github.com:AMICI-dev/AMICI.git" branch: "v0.11.19" commit: "4610a608" git-subrepo: version: "0.4.3" origin: "https://github.com/ingydotnet/git-subrepo" commit: "be9f02a" --- .../workflows/test_petab_test_suite.yml | 2 +- .../workflows/test_python_cplusplus.yml | 4 +- .../workflows/test_python_ver_matrix.yml | 2 +- deps/AMICI/.gitrepo | 6 +- deps/AMICI/CHANGELOG.md | 20 + deps/AMICI/CITATION.cff | 79 +++ deps/AMICI/INSTALL.md | 515 ------------------ deps/AMICI/README.md | 2 - deps/AMICI/documentation/amici_refs.bib | 61 +++ deps/AMICI/documentation/cpp_installation.rst | 2 +- deps/AMICI/documentation/index.rst | 3 - .../documentation/python_installation.rst | 10 +- deps/AMICI/documentation/references.md | 19 +- deps/AMICI/include/amici/defines.h | 7 + deps/AMICI/include/amici/model.h | 7 + deps/AMICI/include/amici/steadystateproblem.h | 4 +- .../include/amici/sundials_matrix_wrapper.h | 102 +++- .../matlab/mtoc/config/Doxyfile.template | 1 - deps/AMICI/python/amici/custom_commands.py | 25 +- deps/AMICI/python/amici/import_utils.py | 33 +- deps/AMICI/python/amici/ode_export.py | 94 +++- deps/AMICI/python/amici/pysb_import.py | 17 +- deps/AMICI/python/amici/sbml_import.py | 57 +- deps/AMICI/python/amici/setup.template.py | 6 +- deps/AMICI/python/amici/setuptools.py | 27 +- deps/AMICI/python/sdist/LICENSE.md | 1 + deps/AMICI/python/sdist/MANIFEST.in | 1 + deps/AMICI/python/sdist/setup.cfg | 3 +- .../python/tests/test_preequilibration.py | 14 +- deps/AMICI/scripts/README.md | 6 +- deps/AMICI/scripts/buildBNGL.sh | 6 +- deps/AMICI/scripts/run-codecov.sh | 2 +- deps/AMICI/scripts/run-python-tests.sh | 2 +- deps/AMICI/scripts/run-valgrind-py.sh | 2 +- deps/AMICI/scripts/travis_wrap.sh | 15 - deps/AMICI/src/model.ODE_template.cpp | 4 + deps/AMICI/src/model.cpp | 4 + deps/AMICI/src/model_header.ODE_template.h | 5 + deps/AMICI/src/rdata.cpp | 45 +- deps/AMICI/src/steadystateproblem.cpp | 16 +- deps/AMICI/src/sundials_matrix_wrapper.cpp | 229 +++----- deps/AMICI/swig/amici.i | 1 + .../petab_test_suite/test_petab_suite.py | 11 +- deps/AMICI/version.txt | 2 +- 44 files changed, 651 insertions(+), 823 deletions(-) create mode 100644 deps/AMICI/CITATION.cff delete mode 100755 deps/AMICI/INSTALL.md create mode 120000 deps/AMICI/python/sdist/LICENSE.md delete mode 100755 deps/AMICI/scripts/travis_wrap.sh diff --git a/deps/AMICI/.github/workflows/test_petab_test_suite.yml b/deps/AMICI/.github/workflows/test_petab_test_suite.yml index e39e80016..26148154a 100644 --- a/deps/AMICI/.github/workflows/test_petab_test_suite.yml +++ b/deps/AMICI/.github/workflows/test_petab_test_suite.yml @@ -44,7 +44,7 @@ jobs: - run: | echo "${HOME}/.local/bin/" >> $GITHUB_PATH echo "${GITHUB_WORKSPACE}/tests/performance/" >> $GITHUB_PATH - echo "BNGPATH=${GITHUB_WORKSPACE}/ThirdParty/BioNetGen-2.5.2" >> $GITHUB_ENV + echo "BNGPATH=${GITHUB_WORKSPACE}/ThirdParty/BioNetGen-2.7.0" >> $GITHUB_ENV # install AMICI - name: Install python package diff --git a/deps/AMICI/.github/workflows/test_python_cplusplus.yml b/deps/AMICI/.github/workflows/test_python_cplusplus.yml index 81d32223c..0451ab88f 100644 --- a/deps/AMICI/.github/workflows/test_python_cplusplus.yml +++ b/deps/AMICI/.github/workflows/test_python_cplusplus.yml @@ -17,7 +17,7 @@ jobs: - run: git fetch --prune --unshallow - run: echo "AMICI_DIR=$(pwd)" >> $GITHUB_ENV - - run: echo "BNGPATH=${GITHUB_WORKSPACE}/ThirdParty/BioNetGen-2.5.2" >> $GITHUB_ENV + - run: echo "BNGPATH=${GITHUB_WORKSPACE}/ThirdParty/BioNetGen-2.7.0" >> $GITHUB_ENV # sonar cloud - run: echo "SONAR_SCANNER_VERSION=4.5.0.2216" >> $GITHUB_ENV @@ -138,7 +138,7 @@ jobs: - run: git fetch --prune --unshallow - run: echo "AMICI_DIR=$(pwd)" >> $GITHUB_ENV - - run: echo "BNGPATH=${AMICI_DIR}/ThirdParty/BioNetGen-2.5.2" >> $GITHUB_ENV + - run: echo "BNGPATH=${AMICI_DIR}/ThirdParty/BioNetGen-2.7.0" >> $GITHUB_ENV # install amici dependencies - name: homebrew diff --git a/deps/AMICI/.github/workflows/test_python_ver_matrix.yml b/deps/AMICI/.github/workflows/test_python_ver_matrix.yml index 17771c2ae..4e730126d 100644 --- a/deps/AMICI/.github/workflows/test_python_ver_matrix.yml +++ b/deps/AMICI/.github/workflows/test_python_ver_matrix.yml @@ -31,7 +31,7 @@ jobs: steps: - run: echo "AMICI_DIR=$(pwd)" >> $GITHUB_ENV - - run: echo "BNGPATH=${AMICI_DIR}/ThirdParty/BioNetGen-2.5.2" >> $GITHUB_ENV + - run: echo "BNGPATH=${AMICI_DIR}/ThirdParty/BioNetGen-2.7.0" >> $GITHUB_ENV - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v2 diff --git a/deps/AMICI/.gitrepo b/deps/AMICI/.gitrepo index 6ba515c24..7ec5bc267 100644 --- a/deps/AMICI/.gitrepo +++ b/deps/AMICI/.gitrepo @@ -5,8 +5,8 @@ ; [subrepo] remote = git@github.com:ICB-DCM/AMICI.git - branch = d6a076839dbeb55951c80d6f0d5ca7ade220ea22 - commit = d6a076839dbeb55951c80d6f0d5ca7ade220ea22 - parent = ea1b80659eaf6a8f734df09c7af64dd55c659d3f + branch = v0.11.19 + commit = 4610a608d05731d245218c88015d8017f81087b5 + parent = 45e2e41b09b125fae392a790d759e944a6f48e25 cmdver = 0.4.3 method = merge diff --git a/deps/AMICI/CHANGELOG.md b/deps/AMICI/CHANGELOG.md index 90649a6fc..f2aeeb35b 100644 --- a/deps/AMICI/CHANGELOG.md +++ b/deps/AMICI/CHANGELOG.md @@ -2,6 +2,26 @@ ## v0.X Series +### v0.11.19 (2021-10-13) + +New: +* Added support for observable transformations (lin/log/log10) (#1567). Thereby supporting additional noise distributions in combination with least squares solvers. + +Fixes: +* Fixed a bug when Newton sensitivity computation was activated despite specifying newton_steps == 0. The error occurs when simulation converges to a steadystate but simulation sensitivities are not converged according to convergence criteria. In that case simulation returned failure, but the newton rootfinding "finds" a steadystate even before the iteration check, leading to the erroneous computation of sensitivities via Newton/IFT. For singular jacobians this means the overall simulation still fails, but a different, more informative error message is displayed. (#1541) +* Fixed a bug where argument "outdir" in ODEExporter.__init__ would not be used (#1543) + +Other: +* Improve checking support for SBML extensions (#1546) +* SBML import: Use more descriptive IDs for flux expressions (#1551) +* Optimized SUNMatrixWrapper functions (#1538) +* C++: Changed test suite from CppUTest to gtest (#1532) +* Add CITATION.cff (#1559) +* Updated documentation (#1563, #1554, #1536) +* Removed distutils dependency (#1557) +* Require sympy<1.9 + + ### v0.11.18 (2021-07-12) New: diff --git a/deps/AMICI/CITATION.cff b/deps/AMICI/CITATION.cff new file mode 100644 index 000000000..c5e6d8d6f --- /dev/null +++ b/deps/AMICI/CITATION.cff @@ -0,0 +1,79 @@ +authors: + - + family-names: "Fröhlich" + given-names: "Fabian" + orcid: "https://orcid.org/0000-0002-5360-4292" + - + family-names: "Weindl" + given-names: "Daniel" + orcid: "https://orcid.org/0000-0001-9963-6057" + - + family-names: "Schälte" + given-names: "Yannik" + orcid: "https://orcid.org/0000-0003-1293-820X" + - + family-names: "Pathirana" + given-names: "Dilan" + orcid: "https://orcid.org/0000-0001-7000-2659" + - + family-names: "Paszkowski" + given-names: "Lukasz" + - + family-names: "Lines" + given-names: "Glenn Terje" + orcid: "https://orcid.org/0000-0002-6294-1805" + - + family-names: "Stapor" + given-names: "Paul" + orcid: "https://orcid.org/0000-0002-7567-3985" + - + family-names: "Hasenauer" + given-names: "Jan" + orcid: "https://orcid.org/0000-0002-4935-3312" +title: "AMICI: High-Performance Sensitivity Analysis for Large Ordinary Differential Equation Models" + +preferred-citation: + type: article + title: "AMICI: High-Performance Sensitivity Analysis for Large Ordinary Differential Equation Models" + doi: 10.1093/bioinformatics/btab227 + journal: "Bioinformatics" + year: 2021 + month: 4 + start: 1 + end: 1 + authors: + - + family-names: "Fröhlich" + given-names: "Fabian" + orcid: "https://orcid.org/0000-0002-5360-4292" + - + family-names: "Weindl" + given-names: "Daniel" + orcid: "https://orcid.org/0000-0001-9963-6057" + - + family-names: "Schälte" + given-names: "Yannik" + orcid: "https://orcid.org/0000-0003-1293-820X" + - + family-names: "Pathirana" + given-names: "Dilan" + orcid: "https://orcid.org/0000-0001-7000-2659" + - + family-names: "Paszkowski" + given-names: "Lukasz" + - + family-names: "Lines" + given-names: "Glenn Terje" + orcid: "https://orcid.org/0000-0002-6294-1805" + - + family-names: "Stapor" + given-names: "Paul" + orcid: "https://orcid.org/0000-0002-7567-3985" + - + family-names: "Hasenauer" + given-names: "Jan" + orcid: "https://orcid.org/0000-0002-4935-3312" +cff-version: 1.2.0 +message: "If you use this software, please cite both the article from preferred-citation and the software itself." +url: "https://github.com/AMICI-dev/AMICI" +doi: 10.5281/zenodo.597928 diff --git a/deps/AMICI/INSTALL.md b/deps/AMICI/INSTALL.md deleted file mode 100755 index 61140d2e2..000000000 --- a/deps/AMICI/INSTALL.md +++ /dev/null @@ -1,515 +0,0 @@ -# Installation - -## Table of Contents -1. [Availability](#availability) -2. [Python](#python) -3. [MATLAB](#matlab) -4. [C++ only](#cpp) -5. [Dependencies](#dependencies) - - -## Availability - -The sources for AMICI are available as -- Source [tarball](https://github.com/AMICI-dev/AMICI/tarball/master) -- Source [zip](https://github.com/AMICI-dev/AMICI/zipball/master) -- GIT repository on [github](https://github.com/AMICI-dev/AMICI) - -A Python package is available on pypi, see below. - -If AMICI was downloaded as a zip, it needs to be unpacked in a -convenient directory. If AMICI was obtained via cloning of the git -repository, no further unpacking is necessary. - -### Obtaining AMICI via the GIT version control system -In order to always stay up-to-date with the latest AMICI versions, -simply pull it from our GIT repository and recompile it when a new -release is available. For more information about GIT checkout their -[website](http://git-scm.com/) - -The GIT repository can currently be found at -[https://github.com/AMICI-dev/AMICI](https://github.com/AMICI-dev/AMICI) -and a direct clone is possible via - - git clone https://github.com/AMICI-dev/AMICI.git AMICI - - -## Python - -To use AMICI from python, install the module and all other requirements -using pip: - - pip3 install amici - -You can now import it as python module: - - import amici - -For cases where this installation fails, check below for special setups -and custom installations. -For Python-AMICI usage see -[https://github.com/AMICI-dev/AMICI/blob/master/documentation/PYTHON.md](https://github.com/AMICI-dev/AMICI/blob/master/documentation/PYTHON.md). - -### Installation of development versions - -To install development versions which have not been released to pypi yet, -you can install AMICI with pip directly from GitHub using: - - pip3 install -e git+https://github.com/AMICI-dev/amici.git@develop#egg=amici\&subdirectory=python/sdist - -Replace `develop` by the branch or commit you want to install. - -Note that this will probably not work on Windows which does not support -symlinks by default -(https://stackoverflow.com/questions/5917249/git-symlinks-in-windows/49913019#49913019). - -### Light installation - -In case you only want to use the AMICI Python package for generating model code -for use with Matlab or C++ and don't want to bothered with any unnecessary -dependencies, you can run - - pip3 install --install-option --no-clibs amici - -Note, however, that you will not be able to compile any model into a -Python extension with this installation. - -NOTE: If you run into an error with above installation command, install -all AMICI dependencies listed in -[`setup.py`](https://github.com/AMICI-dev/AMICI/blob/master/python/sdist/setup.py) -manually, and try again. (This is because `pip` `--install-option`s are -applied to *all* installed packages, including dependencies.) - -### Anaconda - -To use an Anaconda installation of python -([https://www.anaconda.com/distribution/](https://www.anaconda.com/distribution/), -Python>=3.6), proceed as follows: - -Since Anaconda provides own versions of some packages which might not -work with amici (in particular the gcc compiler), create a minimal -virtual environment via: - - conda create --name ENV_NAME pip python - -Here, replace ENV_NAME by some name for the environment. To activate the -environment, do: - - source activate ENV_NAME - -(and `conda deactivate` later to deactivate it again). - -SWIG must be installed and available in your `PATH`, and a -CBLAS-compatible BLAS must be available. You can also use conda to -install the latter locally, using: - - conda install -c conda-forge openblas - -To install AMICI, now do: - - pip install amici - -The option `--no-cache` may be helpful here to make sure the -installation is done completely anew. - -Now, you are ready to use AMICI in the virtual environment. - -#### Anaconda on Mac - -If the above installation does not work for you, try installing AMICI -via: - - CFLAGS="-stdlib=libc++" CC=clang CXX=clang pip3 install --verbose amici - -This will use the `clang` compiler. - -You will have to pass the same options when compiling any model later -on. This can be done by inserting the following code before calling -`sbml2amici`: - - import os - os.environ['CC'] = 'clang' - os.environ['CXX'] = 'clang' - os.environ['CFLAGS'] = '-stdlib=libc++' - -(For further discussion see https://github.com/AMICI-dev/AMICI/issues/357) - -### Windows using GCC (mingw) - -To install AMICI on Windows using python, you can proceed as follows: - -Some general remarks: - -* Install all libraries in a path not containing white spaces, - e.g. directly under C:. -* Replace the following paths according to your installation. -* Slashes can be preferable to backslashes for some environment - variables. -* See also [#425](https://github.com/AMICI-dev/amici/issues/425) for - further discussion. - -Then, follow these steps: - -* A python environment for Windows is required. We recommend - [Anaconda](https://www.anaconda.com/distribution/) with python >=3.7. -* Install [MinGW-W64](https://sourceforge.net/projects/mingw-w64/files/) - (32bit will succeed to compile, but fail during linking). - MinGW-W64 GCC-8.1.0 for `x86_64-posix-sjlj` - ([direct link](https://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win64/Personal%20Builds/mingw-builds/8.1.0/threads-posix/sjlj/x86_64-8.1.0-release-posix-sjlj-rt_v6-rev0.7z/download) has been shown to work on Windows 7 and 10 test systems. -* Add the following directory to `PATH`: - + `C:\mingw-w64\x86_64-8.1.0-posix-sjlj-rt_v6-rev0\mingw64\bin` -* Make sure that this is the compiler that is found by the system - (e.g. `where gcc` in a `cmd` should point to this installation). -* Download CBLAS headers and libraries, e.g. - [OpenBLAS](https://sourceforge.net/projects/openblas/files/v0.2.19/), - binary distribution 0.2.19. Set the following environment variables: - + `BLAS_CFLAGS=-IC:/OpenBLAS-v0.2.19-Win64-int32/include` - + `BLAS_LIBS=-Wl,-Bstatic -LC:/OpenBLAS-v0.2.19-Win64-int32/lib -lopenblas -Wl,-Bdynamic` -* Install [SWIG](http://www.swig.org/download.html) - (version swigwin-3.0.12 worked) and add the following directory to - `PATH`: - + `C:\swigwin-3.0.12` -* Install AMICI using: - - `pip install --global-option="build_clib" --global-option="--compiler=mingw32" - --global-option="build_ext" --global-option="--compiler=mingw32" - amici --no-cache-dir --verbose` - -Possible sources of errors: - -* On recent Windows versions, - `anaconda3\Lib\distutils\cygwinccompiler.py` fails linking - `msvcr140.dll` with - `[...] x86_64-w64-mingw32/bin/ld.exe: cannot find -lmsvcr140`. - This is not required for amici, so in `cygwinccompiler.py` - `return ['msvcr140']` can be changed to `return []`. -* If you use a python version where - [python/cpython#880](https://github.com/python/cpython/pull/880) - has not been fixed yet, you need to disable - `define hypot _hypot in anaconda3\include/pyconfig.h` yourself. -* `import amici` in python resulting in the very informative - - > ImportError: DLL load failed: The specified module could not be found. - - means that some amici module dependencies were not found (not the - AMICI module itself). - [DependencyWalker](http://www.dependencywalker.com/) will show you - which ones. - - Support for msvc is experimental. - [installOpenBLAS.ps1](https://github.com/AMICI-dev/AMICI/blob/master/scripts/installOpenBLAS.ps1) - and [compileBLAS.cmd](https://github.com/AMICI-dev/AMICI/blob/master/scripts/compileBLAS.cmd) - may serve as guidance on how to install openBLAS using msvc. - -### Windows using MSVC (Visual Studio) - -#### Visual Studio - -We assume that Visual Studio (not to be confused with Visual Studio Code) is already installed. Using Visual Studio Installer, the following components need to be included: - -* Microsoft Visual C++ (MSVC). This is part of multiple packages, including Desktop Development with C++. -* Windows Universal C Runtime. This is an individual component and installs some DLLs that we need. - -#### openBLAS - -Installation of AMICI using MSVC also requires installation of Basic Linear Algebra Subprograms (BLAS) and OpenBLAS version 0.3.12 is known to work. To install open BLAS, download the following scripts from AMICI: - -https://github.com/AMICI-dev/AMICI/blob/master/scripts/installOpenBLAS.ps1 -https://github.com/AMICI-dev/AMICI/blob/master/scripts/compileBLAS.cmd - -The first script needs to be called in Powershell, and it needs to call `compileBLAS.cmd`, so you will need to modify line 11: - - C:\Users\travis\build\AMICI\scripts\compileBLAS.cmd - -so that it matches your directory structure. It may also be necessary to modify line 3 of the second script (call to `vcvars64.bat`) in order to match your installation of MSVC. - -This will download openBLAS and compile it, creating - - C:\BLAS\lib\openblas.lib - C:\BLAS\bin\openblas.dll - -You will also need to define two environment variables: - - BLAS_LIBS="/LIBPATH:C:/BLAS/lib openblas.lib" - BLAS_CFLAGS="/IC:/BLAS/OpenBLAS-0.3.12/OpenBLAS-0.3.12" - -One way to do that is to run a PowerShell script with the following commands: - - [System.Environment]::SetEnvironmentVariable("BLAS_LIBS", "/LIBPATH:C:/BLAS/lib openblas.lib", [System.EnvironmentVariableTarget]::User) - [System.Environment]::SetEnvironmentVariable("BLAS_LIBS", "/LIBPATH:C:/BLAS/lib openblas.lib", [System.EnvironmentVariableTarget]::Process) - [System.Environment]::SetEnvironmentVariable("BLAS_CFLAGS", "-IC:/BLAS/OpenBLAS-0.3.12/OpenBLAS-0.3.12", [System.EnvironmentVariableTarget]::User) - [System.Environment]::SetEnvironmentVariable("BLAS_CFLAGS", "-IC:/BLAS/OpenBLAS-0.3.12/OpenBLAS-0.3.12", [System.EnvironmentVariableTarget]::Process) - -The call ending in `Process` sets the environment variable in the current process, and it is no longer in effect in the next process. The call ending in `User` is permanent, and takes effect the next time the user logs on. - -#### PATH -Now we need to make sure that all required DLLs are within the scope of the PATH variable. In particular, the following directories need to be included in PATH: - - C:\BLAS\bin - C:\Program Files (x86)\Windows Kits\10\Redist\ucrt\DLLs\x64 - -The first one is needed for `openblas.dll` and the second is needed for the Windows Universal C Runtime. -If any DLLs are missing in the PATH variable, Python will return the following error: - - ImportError: DLL load failed: The specified module could not be found. - -This can be tested using the "where" command. For example - - where openblas.dll - -should return - - C:\BLAS\bin\openblas.dll - -Almost all of the DLLs are standard Windows DLLs and should be included in either Windows or Visual Studio. But, in case it is necessary to test this, here is a list of some DLLs required by AMICI (when compiled with MSVC): - -* `openblas.dll` -* `python37.dll` -* `MSVCP140.dll` -* `KERNEL32.dll` -* `VCRUNTIME140_1.dll` -* `VCRUNTIME140.dll` -* `api-ms-win-crt-convert-l1-1-0.dll` -* `api-ms-win-crt-heap-l1-1-0.dll` -* `api-ms-win-crt-stdio-l1-1-0.dll` -* `api-ms-win-crt-string-l1-1-0.dll` -* `api-ms-win-crt-runtime-l1-1-0.dll` -* `api-ms-win-crt-time-l1-1-0.dll` -* `api-ms-win-crt-math-l1-1-0.dll` - -`MSVCP140.dll`, `VCRUNTIME140.dll`, and `VCRUNTIME140_1.dll` are needed by MSVC (see Visual Studio above). `KERNEL32.dll` is part of Windows and in `C:\Windows\System32`. The `api-ms-win-crt-XXX-l1-1-0.dll` are needed by `openblas.dll` and are part of the Windows Universal C Runtime (see Visual Studio above). - -### Custom installation - -AMICI Python package installation can be customized using a number of -environment variables: - -|Variable | Purpose | Example | -|---|---|---| -|`CC`| Setting the C(++) compiler | `CC=/usr/bin/g++`| -|`CFLAGS`| Extra compiler flags used in every compiler call | | -|`BLAS_CFLAGS`| Compiler flags for, e.g. BLAS include directories | | -|`BLAS_LIBS`| Flags for linking BLAS | | -|`ENABLE_GCOV_COVERAGE`| Set to build AMICI to provide code coverage information | `ENABLE_GCOV_COVERAGE=TRUE`| -|`ENABLE_AMICI_DEBUGGING`| Set to build AMICI with debugging symbols | `ENABLE_AMICI_DEBUGGING=TRUE`| -|`AMICI_PARALLEL_COMPILE`| Set to the number of parallel processes to be used for C(++) file compilation (defaults to 1)| `AMICI_PARALLEL_COMPILE=4`| - - -## MATLAB - -To use AMICI from MATLAB, start MATLAB and add the `AMICI/matlab` -directory to the MATLAB path. To add all toolbox directories to the -MATLAB path, execute the matlab script - - installAMICI.m - -To store the installation for further MATLAB session, the path can be -saved via - - savepath - -For the compilation of .mex files, MATLAB needs to be configured with a -working C++ compiler. The C++ compiler needs to be installed and -configured via: - - mex -setup c++ - -For a list of supported compilers we refer to the mathworks -documentation: -[mathworks.com](http://mathworks.com/support/compilers/R2018b/index.html) -Note that Microsoft Visual Studio compilers are currently not supported. - - -## C++ only - -To use AMICI from C++, run the - - ./scripts/buildSundials.sh - ./scripts/buildSuitesparse.sh - ./scripts/buildAmici.sh - -script to compile AMICI library. - -**NOTE**: On some systems, the CMake executable may be named something -other than `cmake`. In this case, set the `CMAKE` environment variable -to the correct name (e.g. `export CMAKE=cmake3`, in case you have CMake -available as `cmake3`). - -The static library file can then be linked from - - ./build/libamici.a - -In CMake-based packages, amici can be linked via - - find_package(Amici) - -### Optional SuperLU_MT support - -To build AMICI with SuperLU_MT support, run - - ./scripts/buildSuperLUMT.sh - ./scripts/buildSundials.sh - cd build/ - cmake -DSUNDIALS_SUPERLUMT_ENABLE=ON .. - make - - -## Dependencies - -### General - -The tools SUNDIALS and SuiteSparse shipped with AMICI do __not__ require -explicit installation. - -AMICI uses the following packages from SUNDIALS: - -__CVODES__: the sensitivity-enabled ODE solver in SUNDIALS. Radu Serban -and Alan C. Hindmarsh. _ASME 2005 International Design Engineering -Technical Conferences and Computers and Information in Engineering -Conference._ American Society of Mechanical Engineers, 2005. -[PDF](http://proceedings.asmedigitalcollection.asme.org/proceeding.aspx?articleid=1588657) - -__IDAS__ - -AMICI uses the following packages from SuiteSparse: - -__Algorithm 907: KLU__, A Direct Sparse Solver for Circuit Simulation -Problems. Timothy A. Davis, Ekanathan Palamadai Natarajan, -_ACM Transactions on Mathematical Software_, Vol 37, Issue 6, 2010, -pp 36:1 - 36:17. [PDF](http://dl.acm.org/authorize?305534) - -__Algorithm 837: AMD__, an approximate minimum degree ordering -algorithm, Patrick R. Amestoy, Timothy A. Davis, Iain S. Duff, -_ACM Transactions on Mathematical Software_, Vol 30, Issue 3, 2004, -pp 381 - 388. [PDF](http://dl.acm.org/authorize?733169) - -__Algorithm 836: COLAMD__, a column approximate minimum degree ordering -algorithm, Timothy A. Davis, John R. Gilbert, Stefan I. Larimore, -Esmond G. Ng _ACM Transactions on Mathematical Software_, Vol 30, -Issue 3, 2004, pp 377 - 380. [PDF](http://dl.acm.org/authorize?734450) - -#### libsbml - -To import Systems Biology Markup Language ([SBML](http://sbml.org/)) -models, AMICI relies on the Python or MATLAB SBML library. - -#### Math Kernel Library (MKL) - -The python and C++ interfaces require a system installation of a `BLAS`. -AMICI has been tested with various native and general purpose MKL -implementations such as Accelerate, Intel MKL, cblas, openblas, atlas. -The matlab interface uses the MATLAB MKL, which requires no separate -installation. - -On Ubuntu, this requirement can be satisfied with - - apt install libatlas-base-dev - -On Fedora (32): - - sudo dnf install blas-devel - -#### C++ compiler - -All AMICI installations require a C++11-compatible C++ compiler. -AMICI has been tested with g++, mingw, clang and the Intel compiler. -Visual C++ is not officially supported, but may work. - -#### HDF5 - -The python and C++ interfaces provide routines to read and write options -and results in [hdf5](https://support.hdfgroup.org/HDF5/) format. -For the python interface, the installation of hdf5 is optional, but for -the C++ interace it is currently required. - -HDF5 can be installed using package managers such as -[brew](https://brew.sh) or [apt](https://wiki.debian.org/Apt): - - brew install hdf5 - -or - - apt-get install libhdf5-serial-dev - -#### SWIG - -The python interface requires [SWIG](http://www.swig.org), which has to -be installed by the user. As root user, SWIG can be installed using -package managers such as [brew](https://brew.sh) or -[apt](https://wiki.debian.org/Apt): - - brew install swig - -or - - apt-get install swig3.0 - -Or by non-root users, using `scripts/downloadAndBuildSwig.sh` from the -AMICI repository (not included in the PyPI package). The binary -directory has to be added to the `PATH` environment variable, or `SWIG` -has to be set as described in the following section. - -##### Using a non-default SWIG executable - -We note here that some linux package managers may provide swig -executables as `swig3.0`, but installation as `swig` is required. This -can be fixed as root user using, e.g., symbolic links: - - mkdir -p ~/bin/ && ln -s $(which swig3.0) ~/bin/swig && export PATH=~/bin/:$PATH - -Non-root users can set the `SWIG` environment variable to the full -path of the desired SWIG executable. This variable has be set during -AMICI package installation as well as during model compilation. - -### Matlab - -The MATLAB interface requires the Mathworks Symbolic Toolbox for model -generation via `amiwrap(...)`, but not for execution of precompiled -models. Currently MATLAB R2018a or newer is not supported (see -[https://github.com/AMICI-dev/AMICI/issues/307](https://github.com/AMICI-dev/AMICI/issues/307)). - -The Symbolic Toolbox requirement can be circumvented by performing model -import using the Python interface. The result code can then be used from -Matlab. - -### Python - -The python interface requires python 3.6 or newer and a cblas-compatible -BLAS library to be installed. Windows installations via pip are -currently not supported, but users may try to install amici using the -build scripts provided for the C++ interface (these will by default -automatically install the python module). - -The python interface depends on some additional packages, e.g. `numpy`. -They are automatically installed when installing the python package. - -### C++ - -The C++ interface requires `cmake` and a cblas-compatible BLAS to be -installed. - -### Optional - -#### SuperLU_MT - -"A general purpose library for the direct solution of large, -sparse, nonsymmetric systems of linear equations" -(https://crd-legacy.lbl.gov/~xiaoye/SuperLU/#superlu_mt). -SuperLU_MT is optional and is so far only available from the C++ interface. - - -#### Boost - -[Boost](https://www.boost.org/) is an optional C++ dependency only required for -special functions (including e.g. gamma derivatives) in the python interface. -It can be installed via package managers via - - apt-get install libboost-math-dev - -or - - brew install boost - -As only headers are required, also a -[source code](https://www.boost.org/doc/libs/1_66_0/more/getting_started/unix-variants.html) -download suffices. The compiler must be able to find the module in the search path. diff --git a/deps/AMICI/README.md b/deps/AMICI/README.md index 1b05c7050..52856ae21 100644 --- a/deps/AMICI/README.md +++ b/deps/AMICI/README.md @@ -35,8 +35,6 @@ constrained optimization problems. PyPI version PyPI installation - - Build Status Code coverage diff --git a/deps/AMICI/documentation/amici_refs.bib b/deps/AMICI/documentation/amici_refs.bib index 6803ee7e6..bf72b85df 100644 --- a/deps/AMICI/documentation/amici_refs.bib +++ b/deps/AMICI/documentation/amici_refs.bib @@ -885,6 +885,67 @@ @article {Froehlich2021.05.20.445065 journal = {bioRxiv} } +@Article{ErdemMut2021, + author = {Erdem, Cemal and Mutsuddy, Arnab and Bensman, Ethan M. and Dodd, William B. and Saint-Antoine, Michael M. and Bouhaddou, Mehdi and Blake, Robert C. and Gross, Sean M. and Heiser, Laura M. and Alex Feltus, F. and Birtwistle, Marc R.}, + journal = {bioRxiv}, + title = {A Scalable, Open-Source Implementation of a Large-Scale Mechanistic Model for Single Cell Proliferation and Death Signaling}, + year = {2021}, + abstract = {Mechanistic models of how single cells respond to different perturbagens can help integrate disparate big data sets or predict response to varied drug combinations. However, the construction and simulation of such models have proved challenging. Our lab previously constructed one of the largest mechanistic models for single mammalian cell regulation of proliferation and death (774 species, 141 genes, 8 ligands, 2400 reactions). However, this, as many other large-scale models, was written using licensed software (MATLAB) with intricate programming structure, impeding alteration, expansion, and sharing. Here, we generated a new foundation for this model, which includes a python-based creation and simulation pipeline converting a few structured text files into an SBML-compatible format. This new open-source model (named SPARCED) is high-performance- and cloud-computing compatible and enables the study of virtual cell population responses at the single-cell level. We applied this new model to a subset of the LINCS MCF10A Data Cube, which observed that IFNγ acts as an anti-proliferative factor, but the reasons why were unknown. After expanding the SPARCED model with an IFNγ signaling module (to 950 species, 150 genes, 9 ligands, 2500 reactions), we ran stochastic single-cell simulations for two different putative crosstalk mechanisms and looked at the number of cycling cells in each case. Our model-based analysis suggested, and experiments support that these observations are better explained by IFNγ-induced SOCS1 expression sequestering activated EGF receptors, thereby downregulating AKT activity, as opposed to direct IFNγ-induced upregulation of p21 expression. This work forms a foundation for increased mechanistic model-based data integration on a single-cell level, an important building block for clinically predictive mechanistic models.Competing Interest StatementThe authors have declared no competing interest.}, + doi = {10.1101/2020.11.09.373407}, + elocation-id = {2020.11.09.373407}, + eprint = {https://www.biorxiv.org/content/early/2021/07/15/2020.11.09.373407.full.pdf}, + publisher = {Cold Spring Harbor Laboratory}, + url = {https://www.biorxiv.org/content/early/2021/07/15/2020.11.09.373407}, +} + +@Article{BastBuc2021, + author = {Lisa Bast and Michèle C. Buck and Judith S. Hecker and Robert A.J. Oostendorp and Katharina S. Götze and Carsten Marr}, + journal = {iScience}, + title = {Computational modeling of stem and progenitor cell kinetics identifies plausible hematopoietic lineage hierarchies}, + year = {2021}, + issn = {2589-0042}, + number = {2}, + pages = {102120}, + volume = {24}, + abstract = {Summary +Classically, hematopoietic stem cell (HSC) differentiation is assumed to occur via progenitor compartments of decreasing plasticity and increasing maturity in a specific, hierarchical manner. The classical hierarchy has been challenged in the past by alternative differentiation pathways. We abstracted experimental evidence into 10 differentiation hierarchies, each comprising 7 cell type compartments. By fitting ordinary differential equation models with realistic waiting time distributions to time-resolved data of differentiating HSCs from 10 healthy human donors, we identified plausible lineage hierarchies and rejected others. We found that, for most donors, the classical model of hematopoiesis is preferred. Surprisingly, multipotent lymphoid progenitor differentiation into granulocyte-monocyte progenitors is plausible in 90% of samples. An in silico analysis confirmed that, even for strong noise, the classical model can be identified robustly. Our computational approach infers differentiation hierarchies in a personalized fashion and can be used to gain insights into kinetic alterations of diseased hematopoiesis.}, + doi = {https://doi.org/10.1016/j.isci.2021.102120}, + keywords = {stem cells research, in silico biology, systems biology}, + url = {https://www.sciencedirect.com/science/article/pii/S2589004221000882}, +} + +@Article{TomasoniPar2021, + author = {Tomasoni, Danilo and Paris, Alessio and Giampiccolo, Stefano and Reali, Federico and Simoni, Giulia and Marchetti, Luca and Kaddi, Chanchala and Neves-Zaph, Susana and Priami, Corrado and Azer, Karim and Lombardo, Rosario}, + journal = {Communications Biology}, + title = {{QSPcc} reduces bottlenecks in computational model simulations}, + year = {2021}, + issn = {2399-3642}, + number = {1}, + pages = {1022}, + volume = {4}, + abstract = {Mathematical models have grown in size and complexity becoming often computationally intractable. In sensitivity analysis and optimization phases, critical for tuning, validation and qualification, these models may be run thousands of times. Scientific programming languages popular for prototyping, such as MATLAB and R, can be a bottleneck in terms of performance. Here we show a compiler-based approach, designed to be universal at handling engineering and life sciences modeling styles, that automatically translates models into fast C code. At first QSPcc is demonstrated to be crucial in enabling the research on otherwise intractable Quantitative Systems Pharmacology models, such as in rare Lysosomal Storage Disorders. To demonstrate the full value in seamlessly accelerating, or enabling, the R&D efforts in natural sciences, we then benchmark QSPcc against 8 solutions on 24 real-world projects from different scientific fields. With speed-ups of 22000x peak, and 1605x arithmetic mean, our results show consistent superior performances.}, + doi = {10.1038/s42003-021-02553-9}, + refid = {Tomasoni2021}, + url = {https://doi.org/10.1038/s42003-021-02553-9}, +} + +@Misc{MaierHar2020, + author = {Corinna Maier and Niklas Hartung and Charlotte Kloft and Wilhelm Huisinga and Jana de Wiljes}, + title = {Reinforcement learning and Bayesian data assimilation for model-informed precision dosing in oncology}, + year = {2020}, + archiveprefix = {arXiv}, + eprint = {2006.01061}, + primaryclass = {stat.ML}, +} +@phdthesis{Maier2021, + author = {Corinna Maier}, + title = {Bayesian data assimilation and reinforcement learning for model-informed precision dosing in oncology}, + type = {doctoralthesis}, + pages = {x, 138}, + school = {Universit{\"a}t Potsdam}, + doi = {10.25932/publishup-51587}, + year = {2021}, +} @Comment{jabref-meta: databaseType:bibtex;} @Comment{jabref-meta: grouping: diff --git a/deps/AMICI/documentation/cpp_installation.rst b/deps/AMICI/documentation/cpp_installation.rst index bd0ada57b..f0c5ed1c0 100644 --- a/deps/AMICI/documentation/cpp_installation.rst +++ b/deps/AMICI/documentation/cpp_installation.rst @@ -24,8 +24,8 @@ To use AMICI from C++, run the .. code-block:: bash + ./scripts/buildSuiteSparse.sh ./scripts/buildSundials.sh - ./scripts/buildSuitesparse.sh ./scripts/buildAmici.sh script to build the AMICI library. diff --git a/deps/AMICI/documentation/index.rst b/deps/AMICI/documentation/index.rst index 39a6b4b7e..8bfa399ef 100644 --- a/deps/AMICI/documentation/index.rst +++ b/deps/AMICI/documentation/index.rst @@ -2,9 +2,6 @@ Welcome to AMICI's documentation! ================================= -.. image:: https://travis-ci.com/AMICI-dev/AMICI.svg?branch=master - :target: https://travis-ci.com/AMICI-dev/AMICI - :alt: Build status .. image:: https://codecov.io/gh/AMICI-dev/AMICI/branch/master/graph/badge.svg :target: https://codecov.io/gh/AMICI-dev/AMICI :alt: Code coverage diff --git a/deps/AMICI/documentation/python_installation.rst b/deps/AMICI/documentation/python_installation.rst index 127309fd6..0526f6133 100644 --- a/deps/AMICI/documentation/python_installation.rst +++ b/deps/AMICI/documentation/python_installation.rst @@ -205,16 +205,16 @@ You will also need to define two environment variables: .. code-block:: text BLAS_LIBS="/LIBPATH:C:\BLAS\lib openblas.lib" - BLAS_CFLAGS="/IC:\BLAS\OpenBLAS-v0.3.10\OpenBLAS-0.3.10" + BLAS_CFLAGS="/IC:/BLAS/OpenBLAS-0.3.12/OpenBLAS-0.3.12" One way to do that is to run a PowerShell script with the following commands: .. code-block:: text - [System.Environment]::SetEnvironmentVariable("BLAS_LIBS", "/LIBPATH:C:\BLAS\lib openblas.lib", [System.EnvironmentVariableTarget]::User) - [System.Environment]::SetEnvironmentVariable("BLAS_LIBS", "/LIBPATH:C:\BLAS\lib openblas.lib", [System.EnvironmentVariableTarget]::Process) - [System.Environment]::SetEnvironmentVariable("BLAS_CFLAGS", "-IC:\BLAS\OpenBLAS-v0.3.10\OpenBLAS-0.3.10", [System.EnvironmentVariableTarget]::User) - [System.Environment]::SetEnvironmentVariable("BLAS_CFLAGS", "-IC:\BLAS\OpenBLAS-v0.3.10\OpenBLAS-0.3.10", [System.EnvironmentVariableTarget]::Process) + [System.Environment]::SetEnvironmentVariable("BLAS_LIBS", "/LIBPATH:C:/BLAS/lib openblas.lib", [System.EnvironmentVariableTarget]::User) + [System.Environment]::SetEnvironmentVariable("BLAS_LIBS", "/LIBPATH:C:/BLAS/lib openblas.lib", [System.EnvironmentVariableTarget]::Process) + [System.Environment]::SetEnvironmentVariable("BLAS_CFLAGS", "-IC:/BLAS/OpenBLAS-0.3.12/OpenBLAS-0.3.12", [System.EnvironmentVariableTarget]::User) + [System.Environment]::SetEnvironmentVariable("BLAS_CFLAGS", "-IC:/BLAS/OpenBLAS-0.3.12/OpenBLAS-0.3.12", [System.EnvironmentVariableTarget]::Process) The call ending in ``Process`` sets the environment variable in the current process, and it is no longer in effect in the next process. The call ending in diff --git a/deps/AMICI/documentation/references.md b/deps/AMICI/documentation/references.md index 3f624bdac..7ef025419 100644 --- a/deps/AMICI/documentation/references.md +++ b/deps/AMICI/documentation/references.md @@ -1,17 +1,26 @@ # References -List of publications using AMICI. Total number is 57. +List of publications using AMICI. Total number is 62. If you applied AMICI in your work and your publication is missing, please let us know via a new Github issue.

2021

+ +
+

Erdem, Cemal, Arnab Mutsuddy, Ethan M. Bensman, William B. Dodd, Michael M. Saint-Antoine, Mehdi Bouhaddou, Robert C. Blake, et al. 2021. “A Scalable, Open-Source Implementation of a Large-Scale Mechanistic Model for Single Cell Proliferation and Death Signaling.” bioRxiv. https://doi.org/10.1101/2020.11.09.373407.

+

Fröhlich, Fabian, and Peter K. Sorger. 2021. “Fides: Reliable Trust-Region Optimization for Parameter Estimation of Ordinary Differential Equation Models.” bioRxiv. https://doi.org/10.1101/2021.05.20.445065.

Gaspari, Erika. 2021. “Model-Driven Design of Mycoplasma as a Vaccine Chassis.” PhD thesis, Wageningen: Wageningen University. https://doi.org/10.18174/539593.

+
+

Maier, Corinna. 2021. “Bayesian Data Assimilation and Reinforcement Learning for Model-Informed Precision Dosing in Oncology.” Doctoralthesis, Universität Potsdam. https://doi.org/10.25932/publishup-51587.

+

Raimúndez, Elba, Erika Dudkin, Jakob Vanhoefer, Emad Alamoudi, Simon Merkt, Lara Fuhrmann, Fan Bai, and Jan Hasenauer. 2021. “COVID-19 Outbreak in Wuhan Demonstrates the Limitations of Publicly Available Case Numbers for Epidemiological Modeling.” Epidemics 34: 100439. https://doi.org/https://doi.org/10.1016/j.epidem.2021.100439.

@@ -24,8 +33,11 @@ If you applied AMICI in your work and your publication is missing, please let us

Sten, Sebastian, Henrik Podéus, Nicolas Sundqvist, Fredrik Elinder, Maria Engström, and Gunnar Cedersund. 2021. “A Multi-Data Based Quantitative Model for the Neurovascular Coupling in the Brain.” bioRxiv. https://doi.org/10.1101/2021.03.25.437053.

+
+

Tomasoni, Danilo, Alessio Paris, Stefano Giampiccolo, Federico Reali, Giulia Simoni, Luca Marchetti, Chanchala Kaddi, et al. 2021. “QSPcc Reduces Bottlenecks in Computational Model Simulations.” Communications Biology 4 (1): 1022. https://doi.org/10.1038/s42003-021-02553-9.

+
-

Vanhoefer, Jakob, Marta R. a. Matos, Dilan Pathirana, Yannik Schälte, and Jan Hasenauer. 2021. “Yaml2sbml: Human-Readable and -Writable Specification of Ode Models and Their Conversion to Sbml.” Journal of Open Source Software 6 (61): 3215. https://doi.org/10.21105/joss.03215.

+

Vanhoefer, Jakob, Marta R. A. Matos, Dilan Pathirana, Yannik Schälte, and Jan Hasenauer. 2021. “Yaml2sbml: Human-Readable and -Writable Specification of Ode Models and Their Conversion to Sbml.” Journal of Open Source Software 6 (61): 3215. https://doi.org/10.21105/joss.03215.

van Rosmalen, R. P., R. W. Smith, V. A. P. Martins dos Santos, C. Fleck, and M. Suarez-Diez. 2021. “Model Reduction of Genome-Scale Metabolic Models as a Basis for Targeted Kinetic Models.” Metabolic Engineering 64: 74–84. https://doi.org/https://doi.org/10.1016/j.ymben.2021.01.008.

@@ -48,6 +60,9 @@ If you applied AMICI in your work and your publication is missing, please let us

Kuritz, Karsten, Alain R Bonny, João Pedro Fonseca, and Frank Allgöwer. 2020. “PDE-Constrained Optimization for Estimating Population Dynamics over Cell Cycle from Static Single Cell Measurements.” bioRxiv. https://doi.org/10.1101/2020.03.30.015909.

+
+

Maier, Corinna, Niklas Hartung, Charlotte Kloft, Wilhelm Huisinga, and Jana de Wiljes. 2020. “Reinforcement Learning and Bayesian Data Assimilation for Model-Informed Precision Dosing in Oncology.” http://arxiv.org/abs/2006.01061.

+

Schälte, Yannik, and Jan Hasenauer. 2020. “Efficient Exact Inference for Dynamical Systems with Noisy Measurements Using Sequential Approximate Bayesian Computation.” bioRxiv. https://doi.org/10.1101/2020.01.30.927004.

diff --git a/deps/AMICI/include/amici/defines.h b/deps/AMICI/include/amici/defines.h index 7e6957617..852b0c437 100644 --- a/deps/AMICI/include/amici/defines.h +++ b/deps/AMICI/include/amici/defines.h @@ -109,6 +109,13 @@ enum class ParameterScaling { log10 }; +/** modes for observable scaling */ +enum class ObservableScaling { + lin, + log, + log10 +}; + /** modes for second order sensitivity analysis */ enum class SecondOrderMode { none, diff --git a/deps/AMICI/include/amici/model.h b/deps/AMICI/include/amici/model.h index 1e4780844..1de282d20 100644 --- a/deps/AMICI/include/amici/model.h +++ b/deps/AMICI/include/amici/model.h @@ -802,6 +802,13 @@ class Model : public AbstractModel, public ModelDimensions { */ void getObservable(gsl::span y, const realtype t, const AmiVector &x); + + /** + * @brief Get scaling type for observable + * @param iy observable index + * @return scaling type + */ + virtual ObservableScaling getObservableScaling(int iy) const; /** * @brief Get sensitivity of time-resolved observables. diff --git a/deps/AMICI/include/amici/steadystateproblem.h b/deps/AMICI/include/amici/steadystateproblem.h index bd7efe47c..550433e79 100644 --- a/deps/AMICI/include/amici/steadystateproblem.h +++ b/deps/AMICI/include/amici/steadystateproblem.h @@ -144,8 +144,8 @@ class SteadystateProblem { * @brief Computes the weighted root mean square of xdot * the weights are computed according to x: * w_i = 1 / ( rtol * x_i + atol ) - * @param x current state - * @param xdot current rhs + * @param x current state (sx[ip] for sensitivities) + * @param xdot current rhs (sxdot[ip] for sensitivities) * @param atol absolute tolerance * @param rtol relative tolerance * @param ewt error weight vector diff --git a/deps/AMICI/include/amici/sundials_matrix_wrapper.h b/deps/AMICI/include/amici/sundials_matrix_wrapper.h index 2ccc7622a..43c1af984 100644 --- a/deps/AMICI/include/amici/sundials_matrix_wrapper.h +++ b/deps/AMICI/include/amici/sundials_matrix_wrapper.h @@ -9,6 +9,8 @@ #include +#include + #include "amici/vector.h" namespace amici { @@ -118,13 +120,25 @@ class SUNMatrixWrapper { * @brief Get the number of rows * @return number of rows */ - sunindextype rows() const; + sunindextype rows() const { + assert(!matrix_ || + (matrix_id() == SUNMATRIX_SPARSE ? + num_rows_ == SM_ROWS_S(matrix_) : + num_rows_ == SM_ROWS_D(matrix_))); + return num_rows_; + } /** * @brief Get the number of columns * @return number of columns */ - sunindextype columns() const; + sunindextype columns() const { + assert(!matrix_ || + (matrix_id() == SUNMATRIX_SPARSE ? + num_columns_ == SM_COLUMNS_S(matrix_) : + num_columns_ == SM_COLUMNS_D(matrix_))); + return num_columns_; + } /** * @brief Get the number of specified non-zero elements (sparse matrices only) @@ -162,7 +176,13 @@ class SUNMatrixWrapper { * @param idx data index * @return idx-th data entry */ - realtype get_data(sunindextype idx) const; + realtype get_data(sunindextype idx) const{ + assert(matrix_); + assert(matrix_id() == SUNMATRIX_SPARSE); + assert(idx < capacity()); + assert(SM_DATA_S(matrix_) == data_); + return data_[idx]; + } /** * @brief Get data entry for a dense matrix @@ -170,14 +190,26 @@ class SUNMatrixWrapper { * @param icol col * @return A(irow,icol) */ - realtype get_data(sunindextype irow, sunindextype icol) const; + realtype get_data(sunindextype irow, sunindextype icol) const{ + assert(matrix_); + assert(matrix_id() == SUNMATRIX_DENSE); + assert(irow < rows()); + assert(icol < columns()); + return SM_ELEMENT_D(matrix_, irow, icol); + } /** * @brief Set data entry for a sparse matrix * @param idx data index * @param data data for idx-th entry */ - void set_data(sunindextype idx, realtype data); + void set_data(sunindextype idx, realtype data) { + assert(matrix_); + assert(matrix_id() == SUNMATRIX_SPARSE); + assert(idx < capacity()); + assert(SM_DATA_S(matrix_) == data_); + data_[idx] = data; + } /** * @brief Set data entry for a dense matrix @@ -185,47 +217,93 @@ class SUNMatrixWrapper { * @param icol col * @param data data for idx-th entry */ - void set_data(sunindextype irow, sunindextype icol, realtype data); + void set_data(sunindextype irow, sunindextype icol, realtype data) { + assert(matrix_); + assert(matrix_id() == SUNMATRIX_DENSE); + assert(irow < rows()); + assert(icol < columns()); + SM_ELEMENT_D(matrix_, irow, icol) = data; + } /** * @brief Get the index value of a sparse matrix * @param idx data index * @return row (CSC) or column (CSR) for idx-th data entry */ - sunindextype get_indexval(sunindextype idx) const; + sunindextype get_indexval(sunindextype idx) const { + assert(matrix_); + assert(matrix_id() == SUNMATRIX_SPARSE); + assert(idx < capacity()); + assert(indexvals_ == SM_INDEXVALS_S(matrix_)); + return indexvals_[idx]; + } /** * @brief Set the index value of a sparse matrix * @param idx data index * @param val row (CSC) or column (CSR) for idx-th data entry */ - void set_indexval(sunindextype idx, sunindextype val); + void set_indexval(sunindextype idx, sunindextype val) { + assert(matrix_); + assert(matrix_id() == SUNMATRIX_SPARSE); + assert(idx < capacity()); + assert(indexvals_ == SM_INDEXVALS_S(matrix_)); + indexvals_[idx] = val; + } /** * @brief Set the index values of a sparse matrix * @param vals rows (CSC) or columns (CSR) for data entries */ - void set_indexvals(const gsl::span vals); + void set_indexvals(const gsl::span vals) { + assert(matrix_); + assert(matrix_id() == SUNMATRIX_SPARSE); + assert(static_cast(vals.size()) == capacity()); + assert(indexvals_ == SM_INDEXVALS_S(matrix_)); + std::copy_n(vals.begin(), capacity(), indexvals_); + } /** * @brief Get the index pointer of a sparse matrix * @param ptr_idx pointer index * @return index where the ptr_idx-th column (CSC) or row (CSR) starts */ - sunindextype get_indexptr(sunindextype ptr_idx) const; + sunindextype get_indexptr(sunindextype ptr_idx) const { + assert(matrix_); + assert(matrix_id() == SUNMATRIX_SPARSE); + assert(ptr_idx <= num_indexptrs()); + assert(indexptrs_ == SM_INDEXPTRS_S(matrix_)); + return indexptrs_[ptr_idx]; + } /** * @brief Set the index pointer of a sparse matrix * @param ptr_idx pointer index * @param ptr data-index where the ptr_idx-th column (CSC) or row (CSR) starts */ - void set_indexptr(sunindextype ptr_idx, sunindextype ptr); + void set_indexptr(sunindextype ptr_idx, sunindextype ptr) { + assert(matrix_); + assert(matrix_id() == SUNMATRIX_SPARSE); + assert(ptr_idx <= num_indexptrs()); + assert(ptr <= capacity()); + assert(indexptrs_ == SM_INDEXPTRS_S(matrix_)); + indexptrs_[ptr_idx] = ptr; + if (ptr_idx == num_indexptrs()) + num_nonzeros_ = ptr; + } /** * @brief Set the index pointers of a sparse matrix * @param ptrs starting data-indices where the columns (CSC) or rows (CSR) start */ - void set_indexptrs(const gsl::span ptrs); + void set_indexptrs(const gsl::span ptrs) { + assert(matrix_); + assert(matrix_id() == SUNMATRIX_SPARSE); + assert(static_cast(ptrs.size()) == num_indexptrs() + 1); + assert(indexptrs_ == SM_INDEXPTRS_S(matrix_)); + std::copy_n(ptrs.begin(), num_indexptrs() + 1, indexptrs_); + num_nonzeros_ = indexptrs_[num_indexptrs()]; + } /** * @brief Get the type of sparse matrix diff --git a/deps/AMICI/matlab/mtoc/config/Doxyfile.template b/deps/AMICI/matlab/mtoc/config/Doxyfile.template index a50605b07..ed2f623f1 100644 --- a/deps/AMICI/matlab/mtoc/config/Doxyfile.template +++ b/deps/AMICI/matlab/mtoc/config/Doxyfile.template @@ -875,7 +875,6 @@ WARN_LOGFILE = INPUT = "_SourceDir_/README.md" \ "_SourceDir_/LICENSE.md" \ - "_SourceDir_/INSTALL.md" \ "_SourceDir_/documentation/MATLAB_.md" \ "_SourceDir_/documentation/CPP_.md" \ "_SourceDir_/include" \ diff --git a/deps/AMICI/python/amici/custom_commands.py b/deps/AMICI/python/amici/custom_commands.py index e7f36ebd2..d4c25a348 100644 --- a/deps/AMICI/python/amici/custom_commands.py +++ b/deps/AMICI/python/amici/custom_commands.py @@ -15,7 +15,6 @@ from setuptools.command.install import install from setuptools.command.install_lib import install_lib from setuptools.command.sdist import sdist -from distutils import log # typehints Library = Tuple[str, Dict[str, List[str]]] @@ -24,7 +23,7 @@ class AmiciInstall(install): """Custom install to handle extra arguments""" - log.debug("running AmiciInstall") + print("running AmiciInstall") # Passing --no-clibs allows to install the Python-only part of AMICI user_options = install.user_options + [ @@ -81,7 +80,7 @@ class AmiciBuildCLib(build_clib): """Custom build_clib""" def run(self): - log.debug("running AmiciBuildCLib") + print("running AmiciBuildCLib") # Always force recompilation. The way setuptools/distutils check for # whether sources require recompilation is not reliable and may lead @@ -91,7 +90,7 @@ def run(self): build_clib.run(self) def build_libraries(self, libraries: List[Library]): - log.debug("running AmiciBuildCLib.build_libraries") + print("running AmiciBuildCLib.build_libraries") no_clibs = 'develop' in self.distribution.command_obj \ and self.get_finalized_command('develop').no_clibs @@ -139,7 +138,7 @@ def finalize_options(self): develop.finalize_options(self) def run(self): - log.debug("running AmiciDevelop") + print("running AmiciDevelop") if not self.no_clibs: self.get_finalized_command('build_clib').run() @@ -156,7 +155,7 @@ def run(self): Returns: """ - log.debug("running AmiciInstallLib") + print("running AmiciInstallLib") if 'ENABLE_AMICI_DEBUGGING' in os.environ \ and os.environ['ENABLE_AMICI_DEBUGGING'] == 'TRUE' \ @@ -187,7 +186,7 @@ def run(self): the wheel """ - log.debug("running AmiciBuildExt") + print("running AmiciBuildExt") no_clibs = 'develop' in self.distribution.command_obj \ and self.get_finalized_command('develop').no_clibs @@ -220,13 +219,13 @@ def run(self): f"Found unexpected number of files: {libfilenames}" src = libfilenames[0] dest = os.path.join(target_dir, os.path.basename(src)) - log.info(f"copying {src} -> {dest}") + print(f"copying {src} -> {dest}") copyfile(src, dest) swig_outdir = os.path.join(os.path.abspath(build_dir), "amici") generate_swig_interface_files(swig_outdir=swig_outdir) swig_py_module_path = os.path.join(swig_outdir, 'amici.py') - log.debug("updating typehints") + print("updating typehints") fix_typehints(swig_py_module_path, swig_py_module_path) # Always force recompilation. The way setuptools/distutils check for @@ -244,7 +243,7 @@ class AmiciSDist(sdist): def run(self): """Setuptools entry-point""" - log.debug("running AmiciSDist") + print("running AmiciSDist") save_git_version() @@ -266,7 +265,7 @@ def save_git_version(): '--always', '--tags'] subprocess.run(cmd, stdout=f) except Exception as e: - log.warn(e) + print(e) def set_compiler_specific_library_options( @@ -298,7 +297,7 @@ def set_compiler_specific_library_options( for field in ['cflags', 'sources', 'macros']: try: lib[1][field] += lib[1][f'{field}_{compiler_type}'] - log.info(f"Changed {field} for {lib[0]} with {compiler_type} " + print(f"Changed {field} for {lib[0]} with {compiler_type} " f"to {lib[1][field]}") except KeyError: # No compiler-specific options set @@ -322,7 +321,7 @@ def set_compiler_specific_extension_options( new_value = getattr(ext, attr) + \ getattr(ext, f'{attr}_{compiler_type}') setattr(ext, attr, new_value) - log.info(f"Changed {attr} for {compiler_type} to {new_value}") + print(f"Changed {attr} for {compiler_type} to {new_value}") except AttributeError: # No compiler-specific options set pass diff --git a/deps/AMICI/python/amici/import_utils.py b/deps/AMICI/python/amici/import_utils.py index 833e622c4..52c9c793c 100644 --- a/deps/AMICI/python/amici/import_utils.py +++ b/deps/AMICI/python/amici/import_utils.py @@ -4,13 +4,44 @@ from typing import Dict, Union, Optional, Callable import sympy as sp +import enum from toposort import toposort SymbolDef = Dict[sp.Symbol, Union[Dict[str, sp.Expr], sp.Expr]] +class ObservableTransformation(str, enum.Enum): + """ + Different modes of observable transformation. + """ + LOG10 = 'log10' + LOG = 'log' + LIN = 'lin' + + +def noise_distribution_to_observable_transformation( + noise_distribution: Union[str, Callable] +) -> ObservableTransformation: + """ + Parse noise distribution string and extract observable transformation + + :param noise_distribution: + see :func:`noise_distribution_to_cost_function` + + :return: + observable transformation + """ + if isinstance(noise_distribution, str): + if noise_distribution.startswith('log-'): + return ObservableTransformation.LOG + if noise_distribution.startswith('log10-'): + return ObservableTransformation.LOG10 + + return ObservableTransformation.LIN + + def noise_distribution_to_cost_function( - noise_distribution: str + noise_distribution: Union[str, Callable] ) -> Callable[[str], str]: """ Parse noise distribution string to a cost function definition amici can diff --git a/deps/AMICI/python/amici/ode_export.py b/deps/AMICI/python/amici/ode_export.py index b5ec800af..e0376b879 100644 --- a/deps/AMICI/python/amici/ode_export.py +++ b/deps/AMICI/python/amici/ode_export.py @@ -45,7 +45,8 @@ ) from .logging import get_logger, log_execution_time, set_log_level from .constants import SymbolId -from .import_utils import smart_subs_dict, toposort_symbols +from .import_utils import smart_subs_dict, toposort_symbols, \ + ObservableTransformation # Template for model simulation main.cpp file CXX_MAIN_TEMPLATE_FILE = os.path.join(amiciSrcPath, 'main.template.cpp') @@ -555,6 +556,10 @@ class Observable(ModelQuantity): :ivar _measurement_symbol: sympy symbol used in the objective function to represent measurements to this observable + + :ivar trafo: + observable transformation, only applies when evaluating objective + function or residuals """ _measurement_symbol: Union[sp.Symbol, None] = None @@ -563,7 +568,8 @@ def __init__(self, identifier: sp.Symbol, name: str, value: sp.Expr, - measurement_symbol: Optional[sp.Symbol] = None): + measurement_symbol: Optional[sp.Symbol] = None, + transformation: Optional[ObservableTransformation] = 'lin'): """ Create a new Observable instance. @@ -575,9 +581,14 @@ def __init__(self, :param value: formula + + :param transformation: + observable transformation, only applies when evaluating objective + function or residuals """ super(Observable, self).__init__(identifier, name, value) self._measurement_symbol = measurement_symbol + self.trafo = transformation def get_measurement_symbol(self) -> sp.Symbol: if self._measurement_symbol is None: @@ -1050,10 +1061,9 @@ def import_from_sbml_importer(self, nexpr = len(symbols[SymbolId.EXPRESSION]) # assemble fluxes and add them as expressions to the model - fluxes = [] - for ir, flux in enumerate(si.flux_vector): - flux_id = generate_flux_symbol(ir) - fluxes.append(flux_id) + assert len(si.flux_ids) == len(si.flux_vector) + fluxes = [generate_flux_symbol(ir, name=flux_id) + for ir, flux_id in enumerate(si.flux_ids)] nr = len(fluxes) # correct time derivatives for compartment changes @@ -1161,6 +1171,8 @@ def transform_dxdt_to_concentration(species_id, dxdt): args += ['value'] if symbol_name == SymbolId.EVENT: args += ['state_update', 'event_observable'] + if symbol_name == SymbolId.OBSERVABLE: + args += ['transformation'] protos = [ { @@ -1216,7 +1228,8 @@ def transform_dxdt_to_concentration(species_id, dxdt): # fill in 'self._sym' based on prototypes and components in ode_model self.generate_basic_variables(from_sbml=True) self._has_quadratic_nllh = all( - llh['dist'] in ['normal', 'lin-normal'] + llh['dist'] in ['normal', 'lin-normal', 'log-normal', + 'log10-normal'] for llh in si.symbols[SymbolId.LLHY].values() ) @@ -1300,6 +1313,17 @@ def add_conservation_law(self, self._states[ix].set_conservation_law(state_expr) + + def get_observable_transformations(self) -> List[ObservableTransformation]: + """ + List of observable transformations + + :return: + list of transformations + """ + return [obs.trafo for obs in self._observables] + + def num_states_rdata(self) -> int: """ Number of states. @@ -2575,9 +2599,6 @@ class ODEExporter: :ivar model: ODE definition - :ivar outdir: - see :meth:`amici.ode_export.ODEExporter.set_paths` - :ivar verbose: more verbose output if True @@ -2621,7 +2642,8 @@ def __init__( assume_pow_positivity: Optional[bool] = False, compiler: Optional[str] = None, allow_reinit_fixpar_initcond: Optional[bool] = True, - generate_sensitivity_code: Optional[bool] = True + generate_sensitivity_code: Optional[bool] = True, + model_name: Optional[str] = 'model' ): """ Generate AMICI C++ files for the ODE provided to the constructor. @@ -2649,19 +2671,21 @@ def __init__( :param generate_sensitivity_code specifies whether code required for sensitivity computation will be generated + + :param model_name: + name of the model to be used during code generation """ set_log_level(logger, verbose) - self.outdir: str = outdir self.verbose: bool = logger.getEffectiveLevel() <= logging.DEBUG self.assume_pow_positivity: bool = assume_pow_positivity self.compiler: str = compiler - self.model_name: str = 'model' - output_dir = os.path.join(os.getcwd(), - f'amici-{self.model_name}') - self.model_path: str = os.path.abspath(output_dir) - self.model_swig_path: str = os.path.join(self.model_path, 'swig') + self.model_path: str = '' + self.model_swig_path: str = '' + + self.set_name(model_name) + self.set_paths(outdir) # Signatures and properties of generated model functions (see # include/amici/model.h for details) @@ -2702,8 +2726,11 @@ def compile_model(self) -> None: def _prepare_model_folder(self) -> None: """ - Remove all files from the model folder. + Create model directory or remove all files if the output directory + already exists. """ + os.makedirs(self.model_path, exist_ok=True) + for file in os.listdir(self.model_path): file_path = os.path.join(self.model_path, file) if os.path.isfile(file_path): @@ -3290,6 +3317,13 @@ def _write_model_header_cpp(self) -> None: self._get_symbol_name_initializer_list('k'), 'OBSERVABLE_NAMES_INITIALIZER_LIST': self._get_symbol_name_initializer_list('y'), + 'OBSERVABLE_TRAFO_INITIALIZER_LIST': + '\n'.join( + f'ObservableScaling::{trafo}, // y[{idx}]' + for idx, trafo in enumerate( + self.model.get_observable_transformations() + ) + ), 'EXPRESSION_NAMES_INITIALIZER_LIST': self._get_symbol_name_initializer_list('w'), 'PARAMETER_IDS_INITIALIZER_LIST': @@ -3472,22 +3506,24 @@ def _write_module_setup(self) -> None: template_data ) - def set_paths(self, output_dir: str) -> None: + def set_paths(self, output_dir: Optional[str] = None) -> None: """ Set output paths for the model and create if necessary :param output_dir: relative or absolute path where the generated model - code is to be placed. will be created if does not exists. + code is to be placed. If ``None``, this will default to + `amici-{self.model_name}` in the current working directory. + will be created if does not exists. """ + if output_dir is None: + output_dir = os.path.join(os.getcwd(), + f'amici-{self.model_name}') + self.model_path = os.path.abspath(output_dir) self.model_swig_path = os.path.join(self.model_path, 'swig') - for directory in [self.model_path, self.model_swig_path]: - if not os.path.exists(directory): - os.makedirs(directory) - def set_name(self, model_name: str) -> None: """ Sets the model name @@ -3873,7 +3909,10 @@ def generate_measurement_symbol(observable_id: Union[str, sp.Symbol]): return symbol_with_assumptions(f'm{observable_id}') -def generate_flux_symbol(reaction_index: int) -> sp.Symbol: +def generate_flux_symbol( + reaction_index: int, + name: Optional[str] = None +) -> sp.Symbol: """ Generate identifier symbol for a reaction flux. This function will always return the same unique python object for a @@ -3881,9 +3920,14 @@ def generate_flux_symbol(reaction_index: int) -> sp.Symbol: :param reaction_index: index of the reaction to which the flux corresponds + :param name: + an optional identifier of the reaction to which the flux corresponds :return: identifier symbol """ + if name is not None: + return symbol_with_assumptions(name) + return symbol_with_assumptions(f'flux_r{reaction_index}') diff --git a/deps/AMICI/python/amici/pysb_import.py b/deps/AMICI/python/amici/pysb_import.py index 603eb3ef2..6dab3f8a7 100644 --- a/deps/AMICI/python/amici/pysb_import.py +++ b/deps/AMICI/python/amici/pysb_import.py @@ -11,7 +11,8 @@ ) from .import_utils import ( - noise_distribution_to_cost_function, _get_str_symbol_identifiers + noise_distribution_to_cost_function, _get_str_symbol_identifiers, + noise_distribution_to_observable_transformation ) import logging from .logging import get_logger, log_execution_time, set_log_level @@ -139,13 +140,12 @@ def pysb2amici( exporter = ODEExporter( ode_model, outdir=output_dir, + model_name=model.name, verbose=verbose, assume_pow_positivity=assume_pow_positivity, compiler=compiler, generate_sensitivity_code=generate_sensitivity_code ) - exporter.set_name(model.name) - exporter.set_paths(output_dir) exporter.generate_model_code() if compile: @@ -218,7 +218,7 @@ def ode_model_from_pysb_importer( _process_pysb_expressions(model, ode, observables, sigmas, noise_distributions) ode._has_quadratic_nllh = not noise_distributions or all( - noise_distr in ['normal', 'lin-normal'] + noise_distr in ['normal', 'lin-normal', 'log-normal', 'log10-normal'] for noise_distr in noise_distributions.values() ) @@ -374,8 +374,12 @@ def _add_expression( ) if name in observables: + noise_dist = noise_distributions.get(name, 'normal') \ + if noise_distributions else 'normal' + y = sp.Symbol(f'{name}') - obs = Observable(y, name, sym) + trafo = noise_distribution_to_observable_transformation(noise_dist) + obs = Observable(y, name, sym, transformation=trafo) ode_model.add_component(obs) sigma_name, sigma_value = _get_sigma_name_and_value( @@ -385,8 +389,7 @@ def _add_expression( sigma = sp.Symbol(sigma_name) ode_model.add_component(SigmaY(sigma, f'{sigma_name}', sigma_value)) - noise_dist = noise_distributions.get(name, 'normal') \ - if noise_distributions else 'normal' + cost_fun_str = noise_distribution_to_cost_function(noise_dist)(name) my = generate_measurement_symbol(obs.get_id()) cost_fun_expr = sp.sympify(cost_fun_str, diff --git a/deps/AMICI/python/amici/sbml_import.py b/deps/AMICI/python/amici/sbml_import.py index ab9cfd9ba..c851384d7 100644 --- a/deps/AMICI/python/amici/sbml_import.py +++ b/deps/AMICI/python/amici/sbml_import.py @@ -20,8 +20,8 @@ from .import_utils import ( smart_subs, smart_subs_dict, toposort_symbols, - _get_str_symbol_identifiers, - noise_distribution_to_cost_function + _get_str_symbol_identifiers, noise_distribution_to_cost_function, + noise_distribution_to_observable_transformation ) from .ode_export import ( ODEExporter, ODEModel, generate_measurement_symbol, @@ -87,6 +87,9 @@ class SbmlImporter: :ivar flux_vector: reaction kinetic laws + :ivar flux_ids: + identifiers for elements of flux_vector + :ivar _local_symbols: model symbols for sympy to consider during sympification see `locals`argument in `sympy.sympify` @@ -364,6 +367,7 @@ def sbml2amici(self, self, compute_cls=compute_conservation_laws) exporter = ODEExporter( ode_model, + model_name=model_name, outdir=output_dir, verbose=verbose, assume_pow_positivity=assume_pow_positivity, @@ -371,8 +375,6 @@ def sbml2amici(self, allow_reinit_fixpar_initcond=allow_reinit_fixpar_initcond, generate_sensitivity_code=generate_sensitivity_code ) - exporter.set_name(model_name) - exporter.set_paths(output_dir) exporter.generate_model_code() if compile: @@ -406,28 +408,49 @@ def check_support(self) -> None: Also ensures that the SBML contains at least one reaction, or rate rule, or assignment rule, to produce change in the system over time. """ - if hasattr(self.sbml, 'all_elements_from_plugins') \ + + # Check for required but unsupported SBML extensions + if self.sbml_doc.getLevel() != 3 \ + and hasattr(self.sbml, 'all_elements_from_plugins') \ and self.sbml.all_elements_from_plugins.getSize(): raise SBMLException('SBML extensions are currently not supported!') - if any([not rule.isAssignment() and not isinstance( + if self.sbml_doc.getLevel() == 3: + # the "required" attribute is only available in SBML Level 3 + for i_plugin in range(self.sbml.getNumPlugins()): + plugin = self.sbml.getPlugin(i_plugin) + if plugin.getPackageName() in ('layout',): + # 'layout' plugin does not have the 'required' attribute + continue + if hasattr(plugin, 'getRequired') and not plugin.getRequired(): + # if not "required", this has no impact on model + # simulation, and we can safely ignore it + continue + # Check if there are extension elements. If not, we can safely + # ignore the enabled package + if plugin.getListOfAllElements(): + raise SBMLException( + f'Required SBML extension {plugin.getPackageName()} ' + f'is currently not supported!') + + if any(not rule.isAssignment() and not isinstance( self.sbml.getElementBySId(rule.getVariable()), (sbml.Compartment, sbml.Species, sbml.Parameter) - ) for rule in self.sbml.getListOfRules()]): + ) for rule in self.sbml.getListOfRules()): raise SBMLException('Algebraic rules are currently not supported, ' 'and rate rules are only supported for ' 'species, compartments, and parameters.') - if any([not (rule.isAssignment() or rule.isRate()) + if any(not (rule.isAssignment() or rule.isRate()) and isinstance( self.sbml.getElementBySId(rule.getVariable()), (sbml.Compartment, sbml.Species, sbml.Parameter) - ) for rule in self.sbml.getListOfRules()]): + ) for rule in self.sbml.getListOfRules()): raise SBMLException('Only assignment and rate rules are ' 'currently supported for compartments, ' 'species, and parameters!') - if any([r.getFast() for r in self.sbml.getListOfReactions()]): + if any(r.getFast() for r in self.sbml.getListOfReactions()): raise SBMLException('Fast reactions are currently not supported!') # Check events for unsupported functionality @@ -849,6 +872,14 @@ def _process_reactions(self): # stoichiometric matrix self.stoichiometric_matrix = sp.SparseMatrix(sp.zeros(nx, nr)) self.flux_vector = sp.zeros(nr, 1) + # Use reaction IDs as IDs for flux expressions (note that prior to SBML + # level 3 version 2 the ID attribute was not mandatory and may be + # unset) + self.flux_ids = [ + f"flux_{reaction.getId()}" if reaction.isSetId() + else f"flux_r{reaction_idx}" + for reaction_idx, reaction in enumerate(reactions) + ] or ['flux_r0'] reaction_ids = [ reaction.getId() for reaction in reactions @@ -1178,7 +1209,11 @@ def _process_observables( # former. 'value': self._sympy_from_sbml_math( definition['formula'] - ) + ), + 'transformation': + noise_distribution_to_observable_transformation( + noise_distributions.get(obs, 'normal') + ) } for iobs, (obs, definition) in enumerate(observables.items()) } diff --git a/deps/AMICI/python/amici/setup.template.py b/deps/AMICI/python/amici/setup.template.py index b9b6f69c4..990b5ea23 100644 --- a/deps/AMICI/python/amici/setup.template.py +++ b/deps/AMICI/python/amici/setup.template.py @@ -30,9 +30,9 @@ def build_extension(self, ext): # except for Windows, where this seems to be incompatible with # providing swig files. Not investigated further... if sys.platform != 'win32': - import distutils.ccompiler + import setuptools._distutils.ccompiler self.compiler.compile = compile_parallel.__get__( - self.compiler, distutils.ccompiler.CCompiler) + self.compiler, setuptools._distutils.ccompiler.CCompiler) build_ext.build_extension(self, ext) @@ -64,7 +64,7 @@ def get_amici_libs() -> List[str]: def get_extension() -> Extension: - """Get distutils extension object for this AMICI model package""" + """Get setuptools extension object for this AMICI model package""" cxx_flags = [] linker_flags = [] diff --git a/deps/AMICI/python/amici/setuptools.py b/deps/AMICI/python/amici/setuptools.py index 4c998e318..7cfcf61fe 100644 --- a/deps/AMICI/python/amici/setuptools.py +++ b/deps/AMICI/python/amici/setuptools.py @@ -9,7 +9,6 @@ import shlex import subprocess -from distutils import log from .swig import find_swig, get_swig_version try: @@ -143,7 +142,7 @@ def get_hdf5_config() -> PackageInfo: hdf5_include_dir_found = os.path.isfile( os.path.join(hdf5_include_dir_hint, 'hdf5.h')) if hdf5_include_dir_found: - log.info('hdf5.h found in %s' % hdf5_include_dir_hint) + print(f"hdf5.h found in {hdf5_include_dir_hint}") h5pkgcfg['include_dirs'] = [hdf5_include_dir_hint] break @@ -153,7 +152,7 @@ def get_hdf5_config() -> PackageInfo: hdf5_library_dir_found = os.path.isfile( os.path.join(hdf5_library_dir_hint, lib_filename)) if hdf5_library_dir_found: - log.info(f'{lib_filename} found in {hdf5_library_dir_hint}') + print(f'{lib_filename} found in {hdf5_library_dir_hint}') h5pkgcfg['library_dirs'] = [hdf5_library_dir_hint] break if hdf5_library_dir_found: @@ -192,8 +191,8 @@ def add_coverage_flags_if_required(cxx_flags: List[str], """ if 'ENABLE_GCOV_COVERAGE' in os.environ and \ os.environ['ENABLE_GCOV_COVERAGE'].upper() == 'TRUE': - log.info("ENABLE_GCOV_COVERAGE was set to TRUE." - " Building AMICI with coverage symbols.") + print("ENABLE_GCOV_COVERAGE was set to TRUE." + " Building AMICI with coverage symbols.") cxx_flags.extend(['-g', '-O0', '--coverage']) linker_flags.extend(['--coverage', '-g']) @@ -212,8 +211,8 @@ def add_debug_flags_if_required(cxx_flags: List[str], """ if 'ENABLE_AMICI_DEBUGGING' in os.environ \ and os.environ['ENABLE_AMICI_DEBUGGING'] == 'TRUE': - log.info("ENABLE_AMICI_DEBUGGING was set to TRUE." - " Building AMICI with debug symbols.") + print("ENABLE_AMICI_DEBUGGING was set to TRUE." + " Building AMICI with debug symbols.") cxx_flags.extend(['-g', '-O0', '-UNDEBUG']) linker_flags.extend(['-g']) @@ -237,7 +236,7 @@ def generate_swig_interface_files(swig_outdir: str = None, f'-Iamici{os.sep}include', ] - log.info(f"Found SWIG version {swig_version}") + print(f"Found SWIG version {swig_version}") # Are HDF5 includes available to generate the wrapper? if with_hdf5 is None: @@ -258,7 +257,7 @@ def generate_swig_interface_files(swig_outdir: str = None, '-o', os.path.join("amici", "amici_wrap.cxx"), os.path.join("amici", "swig", "amici.i")] - log.info(f"Running SWIG: {' '.join(swig_cmd)}") + print(f"Running SWIG: {' '.join(swig_cmd)}") sp = subprocess.run(swig_cmd, stdout=subprocess.PIPE, stderr=sys.stdout.buffer) if not sp.returncode == 0: @@ -271,15 +270,15 @@ def add_openmp_flags(cxx_flags: List, ldflags: List) -> None: # Enable OpenMP support for Linux / OSX: if sys.platform == 'linux': - log.info("Adding OpenMP flags...") + print("Adding OpenMP flags...") cxx_flags.insert(0, "-fopenmp") ldflags.insert(0, "-fopenmp") elif sys.platform == 'darwin': if os.path.exists('/usr/local/lib/libomp.a'): - log.info("Adding OpenMP flags...") + print("Adding OpenMP flags...") cxx_flags[0:0] = ["-Xpreprocessor", "-fopenmp"] ldflags[0:0] = ["-Xpreprocessor", "-fopenmp", "-lomp"] else: - log.info("Not adding OpenMP flags, because /usr/local/lib/libomp.a" - " does not exist. To enable, run `brew install libomp` " - "or add flags manually.") + print("Not adding OpenMP flags, because /usr/local/lib/libomp.a" + " does not exist. To enable, run `brew install libomp` " + "or add flags manually.") diff --git a/deps/AMICI/python/sdist/LICENSE.md b/deps/AMICI/python/sdist/LICENSE.md new file mode 120000 index 000000000..f0608a63a --- /dev/null +++ b/deps/AMICI/python/sdist/LICENSE.md @@ -0,0 +1 @@ +../../LICENSE.md \ No newline at end of file diff --git a/deps/AMICI/python/sdist/MANIFEST.in b/deps/AMICI/python/sdist/MANIFEST.in index d522f9cf9..f53efb3e6 100644 --- a/deps/AMICI/python/sdist/MANIFEST.in +++ b/deps/AMICI/python/sdist/MANIFEST.in @@ -9,6 +9,7 @@ include amici/src/hdf5.cpp include amici/swig/CMakeLists_model.cmake include setup_clibs.py include version.txt +include LICENSE.md exclude amici/*.so exclude amici/*.dll diff --git a/deps/AMICI/python/sdist/setup.cfg b/deps/AMICI/python/sdist/setup.cfg index 8e8fdbaea..e37885e29 100644 --- a/deps/AMICI/python/sdist/setup.cfg +++ b/deps/AMICI/python/sdist/setup.cfg @@ -27,7 +27,7 @@ package_dir = amici = amici python_requires = >=3.7 install_requires = - sympy>=1.7.1 + sympy>=1.7.1,<1.9 numpy>=1.14.5; python_version=='3.7' numpy>=1.17.5; python_version=='3.8' numpy>=1.19.3; python_version>='3.9' @@ -37,6 +37,7 @@ install_requires = pkgconfig wurlitzer toposort + setuptools>=48 include_package_data = True zip_safe = False diff --git a/deps/AMICI/python/tests/test_preequilibration.py b/deps/AMICI/python/tests/test_preequilibration.py index 1129c8985..2af6da888 100644 --- a/deps/AMICI/python/tests/test_preequilibration.py +++ b/deps/AMICI/python/tests/test_preequilibration.py @@ -236,7 +236,7 @@ def test_parameter_in_expdata(preeq_fixture): def test_raise_presimulation_with_adjoints(preeq_fixture): - """Test data replicates""" + """Test simulation failures with adjoin+presimulation""" model, solver, edata, edata_preeq, \ edata_presim, edata_sim, pscales, plists = preeq_fixture @@ -248,16 +248,15 @@ def test_raise_presimulation_with_adjoints(preeq_fixture): rdata = amici.runAmiciSimulation(model, solver, edata) assert rdata['status'] == amici.AMICI_ERROR - # presimulation and postequilibration with adjoints: - # this also needs to fail + # add postequilibration y = edata.getObservedData() stdy = edata.getObservedDataStdDev() - - # add infty timepoint ts = np.hstack([*edata.getTimepoints(), np.inf]) - edata.setTimepoints(sorted(ts)) + edata.setTimepoints(ts) edata.setObservedData(np.hstack([y, y[0]])) edata.setObservedDataStdDev(np.hstack([stdy, stdy[0]])) + + # remove presimulation edata.t_presim = 0 edata.fixedParametersPresimulation = () @@ -267,7 +266,8 @@ def test_raise_presimulation_with_adjoints(preeq_fixture): def test_equilibration_methods_with_adjoints(preeq_fixture): - """Test data replicates""" + """Test different combinations of equilibration and simulation + sensitivity methods""" model, solver, edata, edata_preeq, \ edata_presim, edata_sim, pscales, plists = preeq_fixture diff --git a/deps/AMICI/scripts/README.md b/deps/AMICI/scripts/README.md index 67f64dca5..d656d9d49 100644 --- a/deps/AMICI/scripts/README.md +++ b/deps/AMICI/scripts/README.md @@ -13,7 +13,7 @@ This directory contains a number of build, installation, and CI scripts. * `buildBNGL.sh` Download and build - [BioNetGen](https://www.csb.pitt.edu/Faculty/Faeder/?page_id=409) (required for some tests) + [BioNetGen](https://github.com/RuleWorld/bionetgen) (required for some tests) * `buildSuiteSparse.sh` @@ -85,7 +85,3 @@ This directory contains a number of build, installation, and CI scripts. Run memory leak check using valgrind for all unit and integration tests. Assumes they have been built before in the default location. - -* `travis_wrap.sh` - - Wrapper script for Travis CI to enable output folding in Travis CI logs diff --git a/deps/AMICI/scripts/buildBNGL.sh b/deps/AMICI/scripts/buildBNGL.sh index 6429be7aa..ff2bed8ba 100755 --- a/deps/AMICI/scripts/buildBNGL.sh +++ b/deps/AMICI/scripts/buildBNGL.sh @@ -10,12 +10,12 @@ amici_path=$(cd "$script_path/.." && pwd) mkdir -p "${amici_path}/ThirdParty" cd "${amici_path}/ThirdParty" -if [ ! -d "BioNetGen-2.5.2" ]; then +if [ ! -d "BioNetGen-2.7.0" ]; then if [ ! -e "bionetgen.tar.gz" ]; then if [[ "$OSTYPE" == "linux-gnu" || "$OSTYPE" == "linux" ]]; then - wget -q -O bionetgen.tar.gz https://github.com/RuleWorld/bionetgen/releases/download/BioNetGen-2.5.2/BioNetGen-2.5.2-linux.tgz + wget -q -O bionetgen.tar.gz https://github.com/RuleWorld/bionetgen/releases/download/BioNetGen-2.7.0/BioNetGen-2.7.0-linux.tgz elif [[ "$OSTYPE" == "darwin"* ]]; then - wget -q -O bionetgen.tar.gz https://github.com/RuleWorld/bionetgen/releases/download/BioNetGen-2.5.2/BioNetGen-2.5.2-mac.tgz + wget -q -O bionetgen.tar.gz https://github.com/RuleWorld/bionetgen/releases/download/BioNetGen-2.7.0/BioNetGen-2.7.0-mac.tgz fi fi tar -xf bionetgen.tar.gz diff --git a/deps/AMICI/scripts/run-codecov.sh b/deps/AMICI/scripts/run-codecov.sh index 3db9f0dd8..42d76f7a0 100755 --- a/deps/AMICI/scripts/run-codecov.sh +++ b/deps/AMICI/scripts/run-codecov.sh @@ -8,7 +8,7 @@ source "${amici_path}"/build/venv/bin/activate pip install coverage pytest pytest-cov if [[ -z "${BNGPATH}" ]]; then - export BNGPATH="${amici_path}"/ThirdParty/BioNetGen-2.5.2 + export BNGPATH="${amici_path}"/ThirdParty/BioNetGen-2.7.0 fi pytest \ diff --git a/deps/AMICI/scripts/run-python-tests.sh b/deps/AMICI/scripts/run-python-tests.sh index 045148c63..e0b33eb47 100755 --- a/deps/AMICI/scripts/run-python-tests.sh +++ b/deps/AMICI/scripts/run-python-tests.sh @@ -7,7 +7,7 @@ amici_path=$(cd "$script_path"/.. && pwd) set -e if [[ -z "${BNGPATH}" ]]; then - export BNGPATH=${amici_path}/ThirdParty/BioNetGen-2.5.2 + export BNGPATH=${amici_path}/ThirdParty/BioNetGen-2.7.0 fi cd "${amici_path}"/python/tests diff --git a/deps/AMICI/scripts/run-valgrind-py.sh b/deps/AMICI/scripts/run-valgrind-py.sh index 7d43861a2..241a6bd23 100755 --- a/deps/AMICI/scripts/run-valgrind-py.sh +++ b/deps/AMICI/scripts/run-valgrind-py.sh @@ -7,7 +7,7 @@ amici_path=$(cd "$script_path"/.. && pwd) set -e if [[ -z "${BNGPATH}" ]]; then - export BNGPATH=${amici_path}/ThirdParty/BioNetGen-2.5.2 + export BNGPATH=${amici_path}/ThirdParty/BioNetGen-2.7.0 fi cd "${amici_path}"/python/tests diff --git a/deps/AMICI/scripts/travis_wrap.sh b/deps/AMICI/scripts/travis_wrap.sh deleted file mode 100755 index 58314beea..000000000 --- a/deps/AMICI/scripts/travis_wrap.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash -# wrapper script to enable code folding on travis-ci -# -# Note: Expects travis functions exported in before_script -# -# Usage: travis_wrap.sh fold-block-id script [script args] -# fold-block-id should not contain special characters, blanks newlines, ... - -set -e -travis_time_finish -travis_fold start "$1" - travis_time_start - bash -c "${@:2:$#}" - travis_time_finish -travis_fold end "$1" diff --git a/deps/AMICI/src/model.ODE_template.cpp b/deps/AMICI/src/model.ODE_template.cpp index af1e5845c..5c9f69b6a 100644 --- a/deps/AMICI/src/model.ODE_template.cpp +++ b/deps/AMICI/src/model.ODE_template.cpp @@ -21,6 +21,10 @@ std::array observableNames = { TPL_OBSERVABLE_NAMES_INITIALIZER_LIST }; +std::array observableScalings = { + TPL_OBSERVABLE_TRAFO_INITIALIZER_LIST +}; + std::array expressionNames = { TPL_EXPRESSION_NAMES_INITIALIZER_LIST }; diff --git a/deps/AMICI/src/model.cpp b/deps/AMICI/src/model.cpp index 2123be742..e37b7409d 100644 --- a/deps/AMICI/src/model.cpp +++ b/deps/AMICI/src/model.cpp @@ -791,6 +791,10 @@ void Model::getObservable(gsl::span y, const realtype t, writeSlice(derived_state_.y_, y); } +ObservableScaling Model::getObservableScaling(int /*iy*/) const { + return ObservableScaling::lin; +} + void Model::getObservableSensitivity(gsl::span sy, const realtype t, const AmiVector &x, const AmiVectorArray &sx) { diff --git a/deps/AMICI/src/model_header.ODE_template.h b/deps/AMICI/src/model_header.ODE_template.h index 79eec1d51..c35d07918 100644 --- a/deps/AMICI/src/model_header.ODE_template.h +++ b/deps/AMICI/src/model_header.ODE_template.h @@ -19,6 +19,7 @@ extern std::array parameterNames; extern std::array fixedParameterNames; extern std::array stateNames; extern std::array observableNames; +extern std::array observableScalings; extern std::array expressionNames; extern std::array parameterIds; extern std::array fixedParameterIds; @@ -565,6 +566,10 @@ class Model_TPL_MODELNAME : public amici::Model_ODE { virtual bool hasQuadraticLLH() const override { return TPL_QUADRATIC_LLH; } + + virtual ObservableScaling getObservableScaling(int iy) const override { + return observableScalings.at(iy); + } }; diff --git a/deps/AMICI/src/rdata.cpp b/deps/AMICI/src/rdata.cpp index df64d76eb..38fcf8562 100644 --- a/deps/AMICI/src/rdata.cpp +++ b/deps/AMICI/src/rdata.cpp @@ -777,8 +777,18 @@ void ReturnData::initializeObjectiveFunction(bool enable_chi2) { chi2 = 0.0; } -static realtype fres(realtype y, realtype my, realtype sigma_y) { - return (y - my) / sigma_y; +static realtype fres(realtype y, realtype my, realtype sigma_y, + ObservableScaling scale) { + switch (scale) { + case amici::ObservableScaling::lin: + return (y - my) / sigma_y; + case amici::ObservableScaling::log: + return (std::log(y) - std::log(my)) / sigma_y; + case amici::ObservableScaling::log10: + return (std::log10(y) - std::log10(my)) / sigma_y; + default: + throw std::invalid_argument("only lin, log, log10 allowed."); + } } static realtype fres_error(realtype sigma_y, realtype sigma_offset) { @@ -800,8 +810,11 @@ void ReturnData::fres(const int it, Model &model, const ExpData &edata) { int iyt = iy + it * edata.nytrue(); if (!edata.isSetObservedData(it, iy)) continue; + res.at(iyt) = amici::fres(y_it.at(iy), observedData[iy], - sigmay_it.at(iy)); + sigmay_it.at(iy), + model.getObservableScaling(iy)); + if (sigma_res) res.at(iyt + nt * nytrue) = fres_error(sigmay_it.at(iy), sigma_offset); @@ -821,10 +834,20 @@ void ReturnData::fchi2(const int it, const ExpData &edata) { } static realtype fsres(realtype y, realtype sy, realtype my, - realtype sigma_y, realtype ssigma_y) { - return (sy - ssigma_y * fres(y, my, sigma_y)) / sigma_y; + realtype sigma_y, realtype ssigma_y, + ObservableScaling scale) { + auto res = fres(y, my, sigma_y, scale); + switch (scale) { + case amici::ObservableScaling::lin: + return (sy - ssigma_y * res) / sigma_y; + case amici::ObservableScaling::log: + return (sy / y - ssigma_y * res) / sigma_y; + case amici::ObservableScaling::log10: + return (sy / (y * std::log(10)) - ssigma_y * res) / sigma_y; + default: + throw std::invalid_argument("only lin, log, log10 allowed."); + } } - static realtype fsres_error(realtype sigma_y, realtype ssigma_y, realtype sigma_offset) { return ssigma_y / ( fres_error(sigma_y, sigma_offset) * sigma_y); @@ -851,9 +874,12 @@ void ReturnData::fsres(const int it, Model &model, const ExpData &edata) { continue; for (int ip = 0; ip < nplist; ++ip) { int idx = (iy + it * edata.nytrue()) * nplist + ip; + sres.at(idx) = amici::fsres(y_it.at(iy), sy_it.at(iy + ny * ip), observedData[iy], sigmay_it.at(iy), - ssigmay_it.at(iy + ny * ip)); + ssigmay_it.at(iy + ny * ip), + model.getObservableScaling(iy)); + if (sigma_res) { int idx_res = (iy + it * edata.nytrue() + edata.nytrue() * edata.nt()) * @@ -941,18 +967,19 @@ void ReturnData::fFIM(int it, Model &model, const ExpData &edata) { auto y = y_it.at(iy); auto m = observedData[iy]; auto s = sigmay_it.at(iy); + auto os = model.getObservableScaling(iy); // auto r = amici::fres(y, m, s); for (int ip = 0; ip < nplist; ++ip) { auto dy_i = sy_it.at(iy + ny * ip); auto ds_i = ssigmay_it.at(iy + ny * ip); - auto sr_i = amici::fsres(y, dy_i, m, s, ds_i); + auto sr_i = amici::fsres(y, dy_i, m, s, ds_i, os); realtype sre_i = 0.0; if (sigma_res) sre_i = amici::fsres_error(s, ds_i, sigma_offset); for (int jp = 0; jp < nplist; ++jp) { auto dy_j = sy_it.at(iy + ny * jp); auto ds_j = ssigmay_it.at(iy + ny * jp); - auto sr_j = amici::fsres(y, dy_j, m, s, ds_j); + auto sr_j = amici::fsres(y, dy_j, m, s, ds_j, os); FIM.at(ip + nplist * jp) += sr_i*sr_j; if (sigma_res) { auto sre_j = amici::fsres_error(s, ds_j, sigma_offset); diff --git a/deps/AMICI/src/steadystateproblem.cpp b/deps/AMICI/src/steadystateproblem.cpp index 969c5723f..8c433efcc 100644 --- a/deps/AMICI/src/steadystateproblem.cpp +++ b/deps/AMICI/src/steadystateproblem.cpp @@ -198,9 +198,15 @@ void SteadystateProblem::findSteadyStateBySimulation(const Solver *solver, steady_state_status_[1] = SteadyStateStatus::failed_too_long_simulation; break; default: + model->app->warningF("AMICI:newton", + "AMICI newton method failed: %s\n", + ex.what()); steady_state_status_[1] = SteadyStateStatus::failed; } - } catch (AmiException const &) { + } catch (AmiException const &ex) { + model->app->warningF("AMICI:equilibration", + "AMICI equilibration failed: %s\n", + ex.what()); steady_state_status_[1] = SteadyStateStatus::failed; } } @@ -462,7 +468,7 @@ bool SteadystateProblem::checkConvergence(const Solver *solver, sx_ = solver->getStateSensitivity(t_); model->fsxdot(t_, x_, dx_, ip, sx_[ip], dx_, xdot_); wrms_ = getWrmsNorm( - x_, xdot_, solver->getAbsoluteToleranceSteadyStateSensi(), + sx_[ip], xdot_, solver->getAbsoluteToleranceSteadyStateSensi(), solver->getRelativeToleranceSteadyStateSensi(), ewt_); converged = wrms_ < RCONST(1.0); } @@ -504,8 +510,8 @@ void SteadystateProblem::applyNewtonsMethod(Model *model, xdot_old_ = xdot_; wrms_ = getWrmsNorm(x_newton_, xdot_, newtonSolver->atol_, - newtonSolver->rtol_, ewt_); - bool converged = wrms_ < RCONST(1.0); + newtonSolver->rtol_, ewt_); + bool converged = newton_retry ? false : wrms_ < RCONST(1.0); while (!converged && i_newtonstep < newtonSolver->max_steps) { /* If Newton steps are necessary, compute the initial search direction */ @@ -594,7 +600,7 @@ void SteadystateProblem::runSteadystateSimulation(const Solver *solver, /* Do we also have to check for convergence of sensitivities? */ SensitivityMethod sensitivityFlag = SensitivityMethod::none; if (solver->getSensitivityOrder() > SensitivityOrder::none && - solver->getSensitivityMethod() > SensitivityMethod::none) + solver->getSensitivityMethod() == SensitivityMethod::forward) sensitivityFlag = SensitivityMethod::forward; /* If flag for forward sensitivity computation by simulation is not set, disable forward sensitivity integration. Sensitivities will be computed diff --git a/deps/AMICI/src/sundials_matrix_wrapper.cpp b/deps/AMICI/src/sundials_matrix_wrapper.cpp index 4e080958c..9fb2fee02 100644 --- a/deps/AMICI/src/sundials_matrix_wrapper.cpp +++ b/deps/AMICI/src/sundials_matrix_wrapper.cpp @@ -6,7 +6,6 @@ #include // bad_alloc #include #include // invalid_argument and domain_error -#include namespace amici { @@ -140,7 +139,7 @@ void SUNMatrixWrapper::reallocate(sunindextype NNZ) { update_ptrs(); capacity_ = NNZ; - assert((NNZ && columns() && rows()) ^ !matrix_); + assert((NNZ && columns() && rows()) || !matrix_); } void SUNMatrixWrapper::realloc() { @@ -153,24 +152,10 @@ void SUNMatrixWrapper::realloc() { update_ptrs(); capacity_ = num_nonzeros_; - assert(capacity() ^ !matrix_); + assert(capacity() || !matrix_); } -sunindextype SUNMatrixWrapper::rows() const { - assert(!matrix_ || - (matrix_id() == SUNMATRIX_SPARSE ? - num_rows_ == SM_ROWS_S(matrix_) : - num_rows_ == SM_ROWS_D(matrix_))); - return num_rows_; -} -sunindextype SUNMatrixWrapper::columns() const { - assert(!matrix_ || - (matrix_id() == SUNMATRIX_SPARSE ? - num_columns_ == SM_COLUMNS_S(matrix_) : - num_columns_ == SM_COLUMNS_D(matrix_))); - return num_columns_; -} sunindextype SUNMatrixWrapper::num_indexptrs() const { assert(matrix_id() == SUNMATRIX_SPARSE); @@ -195,40 +180,6 @@ sunindextype SUNMatrixWrapper::num_nonzeros() const { return num_nonzeros_; } -realtype SUNMatrixWrapper::get_data(sunindextype idx) const{ - assert(matrix_); - assert(matrix_id() == SUNMATRIX_SPARSE); - assert(idx < capacity()); - assert(SM_DATA_S(matrix_) == data_); - return data_[idx]; -}; - -realtype SUNMatrixWrapper::get_data(sunindextype irow, sunindextype icol) const{ - assert(matrix_); - assert(matrix_id() == SUNMATRIX_DENSE); - assert(irow < rows()); - assert(icol < columns()); - return SM_ELEMENT_D(matrix_, irow, icol); -}; - - -void SUNMatrixWrapper::set_data(sunindextype idx, realtype data) { - assert(matrix_); - assert(matrix_id() == SUNMATRIX_SPARSE); - assert(idx < capacity()); - assert(SM_DATA_S(matrix_) == data_); - data_[idx] = data; -} - -void SUNMatrixWrapper::set_data(sunindextype irow, sunindextype icol, - realtype data) { - assert(matrix_); - assert(matrix_id() == SUNMATRIX_DENSE); - assert(irow < rows()); - assert(icol < columns()); - SM_ELEMENT_D(matrix_, irow, icol) = data; -} - const realtype *SUNMatrixWrapper::data() const { return data_; } @@ -237,58 +188,6 @@ realtype *SUNMatrixWrapper::data() { return data_; } -sunindextype SUNMatrixWrapper::get_indexval(sunindextype idx) const { - assert(matrix_); - assert(matrix_id() == SUNMATRIX_SPARSE); - assert(idx < capacity()); - assert(indexvals_ == SM_INDEXVALS_S(matrix_)); - return indexvals_[idx]; -} - -void SUNMatrixWrapper::set_indexval(sunindextype idx, sunindextype val) { - assert(matrix_); - assert(matrix_id() == SUNMATRIX_SPARSE); - assert(idx < capacity()); - assert(indexvals_ == SM_INDEXVALS_S(matrix_)); - indexvals_[idx] = val; -} - -void SUNMatrixWrapper::set_indexvals(const gsl::span vals) { - assert(matrix_); - assert(matrix_id() == SUNMATRIX_SPARSE); - assert(static_cast(vals.size()) == capacity()); - assert(indexvals_ == SM_INDEXVALS_S(matrix_)); - std::copy_n(vals.begin(), capacity(), indexvals_); -} - -sunindextype SUNMatrixWrapper::get_indexptr(sunindextype ptr_idx) const { - assert(matrix_); - assert(matrix_id() == SUNMATRIX_SPARSE); - assert(ptr_idx <= num_indexptrs()); - assert(indexptrs_ == SM_INDEXPTRS_S(matrix_)); - return indexptrs_[ptr_idx]; -} - -void SUNMatrixWrapper::set_indexptr(sunindextype ptr_idx, sunindextype ptr) { - assert(matrix_); - assert(matrix_id() == SUNMATRIX_SPARSE); - assert(ptr_idx <= num_indexptrs()); - assert(ptr <= capacity()); - assert(indexptrs_ == SM_INDEXPTRS_S(matrix_)); - indexptrs_[ptr_idx] = ptr; - if (ptr_idx == num_indexptrs()) - num_nonzeros_ = ptr; -} - -void SUNMatrixWrapper::set_indexptrs(const gsl::span ptrs) { - assert(matrix_); - assert(matrix_id() == SUNMATRIX_SPARSE); - assert(static_cast(ptrs.size()) == num_indexptrs() + 1); - assert(indexptrs_ == SM_INDEXPTRS_S(matrix_)); - std::copy_n(ptrs.begin(), num_indexptrs() + 1, indexptrs_); - num_nonzeros_ = indexptrs_[num_indexptrs()]; -} - int SUNMatrixWrapper::sparsetype() const { assert(matrix_); assert(matrix_id() == SUNMATRIX_SPARSE); @@ -297,7 +196,8 @@ int SUNMatrixWrapper::sparsetype() const { void SUNMatrixWrapper::scale(realtype a) { if (matrix_) { - for (sunindextype idx = 0; idx < capacity(); ++idx) + auto cap = capacity(); + for (sunindextype idx = 0; idx < cap; ++idx) data_[idx] *= a; } } @@ -382,17 +282,38 @@ void SUNMatrixWrapper::multiply(gsl::span c, return; /* Carry out actual multiplication */ - sunindextype idx; + auto c_ptr = c.data(); + auto b_ptr = b.data(); + if (transpose) { - for (int icols = 0; icols < (int)cols.size(); ++icols) - for (idx = get_indexptr(cols.at(icols)); - idx < get_indexptr(cols.at(icols) + 1); ++idx) - c.at(icols) += get_data(idx) * b.at(get_indexval(idx)); + auto cols_size = cols.size(); + for (std::size_t icols = 0; icols < cols_size; ++icols) { + auto idx_next_col = get_indexptr(cols.at(icols) + 1); + for (sunindextype idx = get_indexptr(cols.at(icols)); + idx < idx_next_col; ++idx) { + + auto idx_val = get_indexval(idx); + assert(icols < c.size()); + assert(static_cast(idx_val) < b.size()); + + c_ptr[icols] += get_data(idx) * b_ptr[idx_val]; + } + } } else { - for (sunindextype icols = 0; icols < columns(); ++icols) - for (idx = get_indexptr(cols.at(icols)); - idx < get_indexptr(cols.at(icols)+1); ++idx) - c.at(get_indexval(idx)) += get_data(idx) * b.at(icols); + auto num_cols = static_cast(columns()); + for (std::size_t icols = 0; icols < num_cols; ++icols) { + auto idx_next_col = get_indexptr(cols.at(icols) + 1); + + for (sunindextype idx = get_indexptr(cols.at(icols)); + idx < idx_next_col; ++idx) { + auto idx_val = get_indexval(idx); + + assert(icols < b.size()); + assert(static_cast(idx_val) < c.size()); + + c_ptr[idx_val] += get_data(idx) * b_ptr[icols]; + } + } } } @@ -512,7 +433,9 @@ void SUNMatrixWrapper::sparse_add(const SUNMatrixWrapper &A, realtype alpha, nnz); // no reallocation should happen here for (cidx = get_indexptr(ccol); cidx < nnz; cidx++) { - set_data(cidx, x.at(get_indexval(cidx))); // copy data to C + auto x_idx = get_indexval(cidx); + assert(x_idx >= 0 && static_cast(x_idx) < x.size()); + set_data(cidx, x[x_idx]); // copy data to C } } set_indexptr(num_indexptrs(), nnz); @@ -522,7 +445,11 @@ void SUNMatrixWrapper::sparse_add(const SUNMatrixWrapper &A, realtype alpha, void SUNMatrixWrapper::sparse_sum(const std::vector &mats) { // matrix_ == nullptr is allowed on the first call - if (std::all_of(mats.begin(), mats.end(), [](const SUNMatrixWrapper &m){return !m.matrix_;})) + auto all_empty = std::all_of(mats.begin(), mats.end(), + [](const SUNMatrixWrapper &m){ + return !m.matrix_; + }); + if (all_empty) return; check_csc(this); @@ -558,13 +485,15 @@ void SUNMatrixWrapper::sparse_sum(const std::vector &mats) { for (acol = 0; acol < columns(); acol++) { - set_indexptr(acol, nnz); /* column j of A starts here */ + set_indexptr(acol, nnz); /* column j of A starts here */ for (auto & mat : mats) nnz = mat.scatter(acol, 1.0, w.data(), gsl::make_span(x), acol+1, this, nnz); // no reallocation should happen here for (aidx = get_indexptr(acol); aidx < nnz; aidx++) { - set_data(aidx, x.at(get_indexval(aidx))); // copy data to C + auto x_idx = get_indexval(aidx); + assert(x_idx >= 0 && static_cast(x_idx) < x.size()); + set_data(aidx, x[x_idx]); // copy data to C } } set_indexptr(num_indexptrs(), nnz); @@ -572,8 +501,6 @@ void SUNMatrixWrapper::sparse_sum(const std::vector &mats) { realloc(); // resize if necessary } -static const std::string scatter_name = "scatter"; - sunindextype SUNMatrixWrapper::scatter(const sunindextype acol, const realtype beta, sunindextype *w, @@ -597,13 +524,15 @@ sunindextype SUNMatrixWrapper::scatter(const sunindextype acol, for (aidx = get_indexptr(acol); aidx < get_indexptr(acol+1); aidx++) { auto arow = get_indexval(aidx); /* A(arow,acol) is nonzero */ + assert(arow >= 0 && static_cast(arow) <= x.size()); if (w && w[arow] < mark) { w[arow] = mark; /* arow is new entry in C(:,*) */ if (C) C->set_indexval(nnz++, arow); /* add arow to pattern of C(:,*) */ - x.at(arow) = beta * get_data(aidx); /* x(arow) = beta*A(arow,acol) */ - } else - x.at(arow) += beta * get_data(aidx); /* arow exists in C(:,*) already */ + x[arow] = beta * get_data(aidx); /* x(arow) = beta*A(arow,acol) */ + } else { + x[arow] += beta * get_data(aidx); /* arow exists in C(:,*) already */ + } } assert(!C || nnz <= C->capacity()); return nnz; @@ -612,10 +541,9 @@ sunindextype SUNMatrixWrapper::scatter(const sunindextype acol, // https://github.com/DrTimothyAldenDavis/SuiteSparse/blob/master/CSparse/Source/cs_cumsum.c /* p [0..n] = cumulative sum of c[0..n-1], and then copy p [0..n-1] into c */ static void cumsum(gsl::span p, std::vector &c) { - sunindextype i; sunindextype nz = 0; assert(p.size() == c.size() + 1); - for (i = 0; i < static_cast(c.size()); i++) + for (sunindextype i = 0; i < static_cast(c.size()); i++) { p[i] = nz; nz += c[i]; @@ -652,45 +580,62 @@ void SUNMatrixWrapper::transpose(SUNMatrixWrapper &C, const realtype alpha, // see https://github.com/DrTimothyAldenDavis/SuiteSparse/blob/master/CSparse/Source/cs_transpose.c - std::vector w; auto nrows = rows(); + if (C_matrix_id == SUNMATRIX_SPARSE) { - w = std::vector(columns()); + std::vector w(columns()); + auto w_data = w.data(); + for (sunindextype acol = 0; acol < nrows; acol++) { /* row counts */ auto next_indexptr = get_indexptr(acol+1); + auto widx_offset = (acol/blocksize)*blocksize; for (sunindextype aidx = get_indexptr(acol); aidx < next_indexptr; aidx++) { - sunindextype widx = (acol/blocksize)*blocksize + get_indexval(aidx) % blocksize; + sunindextype widx = widx_offset + get_indexval(aidx) % blocksize; assert(widx >= 0 && widx < (sunindextype)w.size()); - w[widx]++; - assert(w[widx] <= nrows); + w_data[widx]++; + assert(w_data[widx] <= nrows); } } /* row pointers */ cumsum(gsl::make_span(C.indexptrs_, C.columns()+1), w); - } - - for (sunindextype acol = 0; acol < nrows; acol++) - { - auto next_indexptr = get_indexptr(acol+1); - for (sunindextype aidx = get_indexptr(acol); aidx < next_indexptr; aidx++) + for (sunindextype acol = 0; acol < nrows; acol++) { - sunindextype ccol = (acol/blocksize)*blocksize + get_indexval(aidx) % blocksize; - sunindextype crow = (get_indexval(aidx)/blocksize)*blocksize + acol % blocksize; - assert(crow < nrows); - assert(ccol < columns()); - if (C_matrix_id == SUNMATRIX_SPARSE) { + auto next_indexptr = get_indexptr(acol+1); + auto ccol_offset = (acol/blocksize)*blocksize; + auto crow_offset = acol % blocksize; + for (sunindextype aidx = get_indexptr(acol); aidx < next_indexptr; aidx++) + { + auto indexval_aidx = get_indexval(aidx); + sunindextype ccol = ccol_offset + indexval_aidx % blocksize; + sunindextype crow = (indexval_aidx/blocksize)*blocksize + crow_offset; + assert(crow < nrows); + assert(ccol < columns()); assert(aidx < capacity()); assert(ccol >= 0 && ccol < (sunindextype)w.size()); - sunindextype cidx = w[ccol]++; + sunindextype cidx = w_data[ccol]++; C.set_indexval(cidx, crow); /* place A(i,j) as entry C(j,i) */ C.set_data(cidx, alpha * get_data(aidx)); - } else { + } + } + } else { + + for (sunindextype acol = 0; acol < nrows; acol++) + { + auto next_indexptr = get_indexptr(acol+1); + + for (sunindextype aidx = get_indexptr(acol); aidx < next_indexptr; aidx++) + { + sunindextype ccol = (acol/blocksize)*blocksize + get_indexval(aidx) % blocksize; + sunindextype crow = (get_indexval(aidx)/blocksize)*blocksize + acol % blocksize; + assert(crow < nrows); + assert(ccol < columns()); C.set_data(crow, ccol, alpha * get_data(aidx)); } } } + } void SUNMatrixWrapper::to_dense(SUNMatrixWrapper &D) const { diff --git a/deps/AMICI/swig/amici.i b/deps/AMICI/swig/amici.i index 80a7c0d4e..7253545d6 100644 --- a/deps/AMICI/swig/amici.i +++ b/deps/AMICI/swig/amici.i @@ -161,6 +161,7 @@ def enum(prefix): values = {k[len(prefix)+1:]:v for k,v in values.items()} return IntEnum(prefix, values) ParameterScaling = enum('ParameterScaling') +ObservableScaling = enum('ObservableScaling') SecondOrderMode = enum('SecondOrderMode') SensitivityOrder = enum('SensitivityOrder') SensitivityMethod = enum('SensitivityMethod') diff --git a/deps/AMICI/tests/petab_test_suite/test_petab_suite.py b/deps/AMICI/tests/petab_test_suite/test_petab_suite.py index 73c7a58db..5e0695c3e 100755 --- a/deps/AMICI/tests/petab_test_suite/test_petab_suite.py +++ b/deps/AMICI/tests/petab_test_suite/test_petab_suite.py @@ -108,14 +108,9 @@ def _test_case(case, model_type): logger.log(logging.DEBUG if simulations_match else logging.ERROR, f"Simulations: match = {simulations_match}") - # FIXME case 7 fails due to #963 - if case not in ['0007', '0016']: - check_derivatives(problem, model) - - # FIXME case 7 fails due to #963 - if not all([llhs_match, simulations_match]) \ - or (not chi2s_match and case not in ['0007', '0016']): - # chi2s_match ignored until fixed in amici + check_derivatives(problem, model) + + if not all([llhs_match, simulations_match]) or not chi2s_match: logger.error(f"Case {case} failed.") raise AssertionError(f"Case {case}: Test results do not match " "expectations") diff --git a/deps/AMICI/version.txt b/deps/AMICI/version.txt index 168a8f63a..8f78d386f 100644 --- a/deps/AMICI/version.txt +++ b/deps/AMICI/version.txt @@ -1 +1 @@ -0.11.18 +0.11.19 From d9d2766f1ad2769152954756885dd14d4cbef597 Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Thu, 28 Oct 2021 16:54:25 +0200 Subject: [PATCH 06/11] CI: Update sonar scanner (#358) Use updated CI image --- .github/workflows/benchmark_models.yml | 2 +- .github/workflows/parpe_tests.yml | 4 ++-- .github/workflows/petab_testsuite.yml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/benchmark_models.yml b/.github/workflows/benchmark_models.yml index 6d31f946a..9a60c4fdd 100644 --- a/.github/workflows/benchmark_models.yml +++ b/.github/workflows/benchmark_models.yml @@ -5,7 +5,7 @@ on: [push, pull_request, workflow_dispatch] jobs: container: runs-on: ubuntu-latest - container: dweindl/parpeci:1902967 + container: ghcr.io/icb-dcm/custom_ci_image:master name: benchmark models steps: - uses: actions/checkout@master diff --git a/.github/workflows/parpe_tests.yml b/.github/workflows/parpe_tests.yml index f843d6741..c795970f8 100644 --- a/.github/workflows/parpe_tests.yml +++ b/.github/workflows/parpe_tests.yml @@ -5,7 +5,7 @@ on: [push, pull_request, workflow_dispatch] jobs: container: runs-on: ubuntu-latest - container: dweindl/parpeci:1902967 + container: ghcr.io/icb-dcm/custom_ci_image:master name: parPE tests env: # mpiexec prefix for running tests @@ -22,7 +22,7 @@ jobs: - run: echo "AMICI_PATH=${PARPE_BASE}/deps/AMICI/" >> $GITHUB_ENV # sonar cloud - - run: echo "SONAR_SCANNER_VERSION=4.6.0.2311" >> $GITHUB_ENV + - run: echo "SONAR_SCANNER_VERSION=4.6.1.2450" >> $GITHUB_ENV - run: echo "SONAR_SCANNER_HOME=/root/.sonar/sonar-scanner-$SONAR_SCANNER_VERSION-linux" >> $GITHUB_ENV - run: echo "SONAR_SCANNER_OPTS=-server" >> $GITHUB_ENV - run: echo "${SONAR_SCANNER_HOME}/bin" >> $GITHUB_PATH diff --git a/.github/workflows/petab_testsuite.yml b/.github/workflows/petab_testsuite.yml index 7dabfcc05..0cf36659d 100644 --- a/.github/workflows/petab_testsuite.yml +++ b/.github/workflows/petab_testsuite.yml @@ -6,7 +6,7 @@ on: [push, pull_request, workflow_dispatch] jobs: container: runs-on: ubuntu-latest - container: dweindl/parpeci:1902967 + container: ghcr.io/icb-dcm/custom_ci_image:master name: PEtab test suite steps: - uses: actions/checkout@master From cfd0d4202846791aaaedd316980b03f0a3dcb861 Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Thu, 4 Nov 2021 00:36:30 +0100 Subject: [PATCH 07/11] Various cleanup (#360) * redundant virtual * enum class * using * fix potentially accessing destroyed mutex * Replace getScalarProduct by STL inner_product --- .../steadystate/main_multicondition.cpp | 4 +- include/parpeamici/amiciSimulationRunner.h | 6 +- include/parpeamici/hierarchicalOptimization.h | 12 ++-- .../parpeamici/multiConditionDataProvider.h | 56 +++++++++---------- include/parpeamici/multiConditionProblem.h | 2 +- include/parpeamici/optimizationApplication.h | 4 +- include/parpecommon/hdf5Misc.h | 4 +- include/parpecommon/logging.h | 15 +++-- .../localOptimizationCeres.h | 2 +- .../localOptimizationIpoptTNLP.h | 22 ++++---- .../parpeoptimization/minibatchOptimization.h | 39 ++++++------- sonar-project.properties | 1 + src/parpeamici/hierarchicalOptimization.cpp | 36 ++++++------ src/parpeamici/multiConditionDataProvider.cpp | 4 +- src/parpeamici/multiConditionProblem.cpp | 22 ++++---- src/parpeamici/optimizationApplication.cpp | 20 +++---- src/parpeamici/standaloneSimulator.cpp | 8 +-- src/parpecommon/hdf5Misc.cpp | 8 +-- src/parpecommon/logging.cpp | 30 +++++----- src/parpecommon/misc.cpp | 4 +- src/parpeloadbalancer/loadBalancerMaster.cpp | 6 +- src/parpeloadbalancer/loadBalancerWorker.cpp | 2 +- .../localOptimizationCeres.cpp | 14 ++--- .../localOptimizationFides.cpp | 8 +-- .../localOptimizationFsqp.cpp | 6 +- .../localOptimizationIpopt.cpp | 10 ++-- .../localOptimizationIpoptTNLP.cpp | 30 +++++----- .../localOptimizationToms611.cpp | 4 +- .../minibatchOptimization.cpp | 22 +++----- .../multiStartOptimization.cpp | 22 ++++---- src/parpeoptimization/optimizationOptions.cpp | 6 +- src/parpeoptimization/optimizationProblem.cpp | 20 +++---- .../optimizationResultWriter.cpp | 2 +- templates/main.cpp | 6 +- templates/main_debug.cpp | 2 +- templates/main_nominal.cpp | 8 +-- tests/parpecommon/commonTests.cpp | 2 +- 37 files changed, 225 insertions(+), 244 deletions(-) diff --git a/examples/parpeamici/steadystate/main_multicondition.cpp b/examples/parpeamici/steadystate/main_multicondition.cpp index 1831161f9..e1d317708 100644 --- a/examples/parpeamici/steadystate/main_multicondition.cpp +++ b/examples/parpeamici/steadystate/main_multicondition.cpp @@ -40,8 +40,8 @@ class SteadystateApplication : public parpe::OptimizationApplication { ~SteadystateApplication() override = default; - void initProblem(std::string inFileArgument, - std::string outFileArgument) override + void initProblem(std::string const& inFileArgument, + std::string const& outFileArgument) override { // The same file should only be opened/created once, an then only be reopened diff --git a/include/parpeamici/amiciSimulationRunner.h b/include/parpeamici/amiciSimulationRunner.h index 270fb49bf..771dab670 100644 --- a/include/parpeamici/amiciSimulationRunner.h +++ b/include/parpeamici/amiciSimulationRunner.h @@ -150,8 +150,7 @@ operator==(AmiciSimulationRunner::AmiciResultPackageSimple const& lhs, } // namespace parpe -namespace boost { -namespace serialization { +namespace boost::serialization { template void @@ -180,7 +179,6 @@ serialize(Archive& ar, ar& u.status; } -} // namespace boost -} // namespace serialization +} // namespace boost::serialization #endif // PARPE_AMICI_SIMULATIONRUNNER_H diff --git a/include/parpeamici/hierarchicalOptimization.h b/include/parpeamici/hierarchicalOptimization.h index 016cf40cb..c9868cd1b 100644 --- a/include/parpeamici/hierarchicalOptimization.h +++ b/include/parpeamici/hierarchicalOptimization.h @@ -272,11 +272,11 @@ class HierarchicalOptimizationProblemWrapper : public OptimizationProblem HierarchicalOptimizationProblemWrapper( HierarchicalOptimizationProblemWrapper const& other) = delete; - virtual void fillInitialParameters(gsl::span buffer) const override; + void fillInitialParameters(gsl::span buffer) const override; - virtual void fillParametersMin(gsl::span buffer) const override; + void fillParametersMin(gsl::span buffer) const override; - virtual void fillParametersMax(gsl::span buffer) const override; + void fillParametersMax(gsl::span buffer) const override; void fillFilteredParams(std::vector const& fullParams, gsl::span buffer) const; @@ -292,7 +292,7 @@ class HierarchicalOptimizationProblemWrapper : public OptimizationProblem // TODO: need to ensure that this will work with the reduced number of // parameters - virtual std::unique_ptr getReporter() const override; + std::unique_ptr getReporter() const override; private: std::unique_ptr wrapped_problem_; @@ -322,12 +322,12 @@ class HierarchicalOptimizationReporter : public OptimizationReporter // bool starting(gsl::span initialParameters) const override; // TODO: always update final parameters - virtual bool iterationFinished( + bool iterationFinished( gsl::span parameters, double objectiveFunctionValue, gsl::span objectiveFunctionGradient) const override; - virtual bool afterCostFunctionCall( + bool afterCostFunctionCall( gsl::span parameters, double objectiveFunctionValue, gsl::span objectiveFunctionGradient) const override; diff --git a/include/parpeamici/multiConditionDataProvider.h b/include/parpeamici/multiConditionDataProvider.h index f3ae58b29..d3dbe62f6 100644 --- a/include/parpeamici/multiConditionDataProvider.h +++ b/include/parpeamici/multiConditionDataProvider.h @@ -122,7 +122,7 @@ class MultiConditionDataProviderDefault : public MultiConditionDataProvider MultiConditionDataProviderDefault(std::unique_ptr model, std::unique_ptr solver); - virtual ~MultiConditionDataProviderDefault() override = default; + ~MultiConditionDataProviderDefault() override = default; /** * @brief Provides the number of conditions for which data is available and @@ -132,63 +132,63 @@ class MultiConditionDataProviderDefault : public MultiConditionDataProvider * dataset. * @return Number of conditions */ - virtual int getNumberOfSimulationConditions() const override; + int getNumberOfSimulationConditions() const override; - virtual std::vector getSimulationToOptimizationParameterMapping( + std::vector getSimulationToOptimizationParameterMapping( int conditionIdx) const override; - virtual void mapSimulationToOptimizationGradientAddMultiply( + void mapSimulationToOptimizationGradientAddMultiply( int conditionIdx, gsl::span simulation, gsl::span optimization, gsl::span parameters, double coefficient = 1.0) const override; - virtual void mapAndSetOptimizationToSimulationVariables( + void mapAndSetOptimizationToSimulationVariables( int conditionIdx, gsl::span optimization, gsl::span simulation, gsl::span optimizationScale, gsl::span simulationScale) const override; - virtual std::vector getParameterScaleOpt() + std::vector getParameterScaleOpt() const override; - virtual amici::ParameterScaling getParameterScaleOpt( + amici::ParameterScaling getParameterScaleOpt( int optimizationParameterIndex) const override; - virtual amici::ParameterScaling getParameterScaleSim( + amici::ParameterScaling getParameterScaleSim( int simulationIdx, int optimizationParameterIndex) const override; - virtual std::vector getParameterScaleSim( + std::vector getParameterScaleSim( int) const override; - virtual void updateSimulationParametersAndScale( + void updateSimulationParametersAndScale( int conditionIndex, gsl::span optimizationParams, amici::Model& model) const override; - virtual std::unique_ptr getExperimentalDataForCondition( + std::unique_ptr getExperimentalDataForCondition( int conditionIdx) const override; - virtual std::vector> getAllMeasurements() + std::vector> getAllMeasurements() const override; - virtual std::vector> getAllSigmas() const override; + std::vector> getAllSigmas() const override; /** * @brief Returns the number of optimization parameters of this problem * @return Number of parameters */ - virtual int getNumOptimizationParameters() const override; + int getNumOptimizationParameters() const override; /** * @brief Returns a pointer to the underlying AMICI model * @return The model */ - virtual std::unique_ptr getModel() const override; + std::unique_ptr getModel() const override; - virtual std::unique_ptr getSolver() const override; + std::unique_ptr getSolver() const override; std::vector getProblemParameterIds() const override; @@ -249,7 +249,7 @@ class MultiConditionDataProviderHDF5 : public MultiConditionDataProvider MultiConditionDataProviderHDF5(MultiConditionDataProviderHDF5 const&) = delete; - virtual ~MultiConditionDataProviderHDF5() override;; + ~MultiConditionDataProviderHDF5() override; /** * @brief Get the number of simulations required for objective function @@ -257,7 +257,7 @@ class MultiConditionDataProviderHDF5 : public MultiConditionDataProvider * of conditions present in the data. * @return Number of conditions */ - virtual int getNumberOfSimulationConditions() const override; + int getNumberOfSimulationConditions() const override; /** * @brief Get index vector of length of model parameter with indices of @@ -267,33 +267,33 @@ class MultiConditionDataProviderHDF5 : public MultiConditionDataProvider * @param conditionIdx * @return */ - virtual std::vector getSimulationToOptimizationParameterMapping( + std::vector getSimulationToOptimizationParameterMapping( int conditionIdx) const override; - virtual void mapSimulationToOptimizationGradientAddMultiply( + void mapSimulationToOptimizationGradientAddMultiply( int conditionIdx, gsl::span simulation, gsl::span optimization, gsl::span parameters, double coefficient = 1.0) const override; - virtual void mapAndSetOptimizationToSimulationVariables( + void mapAndSetOptimizationToSimulationVariables( int conditionIdx, gsl::span optimization, gsl::span simulation, gsl::span optimizationScale, gsl::span simulationScale) const override; - virtual std::vector getParameterScaleOpt() + std::vector getParameterScaleOpt() const override; - virtual amici::ParameterScaling getParameterScaleOpt( + amici::ParameterScaling getParameterScaleOpt( int parameterIdx) const override; - virtual std::vector getParameterScaleSim( + std::vector getParameterScaleSim( int simulationIdx) const override; - virtual amici::ParameterScaling getParameterScaleSim( + amici::ParameterScaling getParameterScaleSim( int simulationIdx, int modelParameterIdx) const override; @@ -336,16 +336,16 @@ class MultiConditionDataProviderHDF5 : public MultiConditionDataProvider * @brief Returns the number of optimization parameters of this problem * @return Number of parameters */ - virtual int getNumOptimizationParameters() const override; + int getNumOptimizationParameters() const override; /** * @brief Returns a pointer to a copy of the underlying AMICI model * as provided to the constructor * @return The model */ - virtual std::unique_ptr getModel() const override; + std::unique_ptr getModel() const override; - virtual std::unique_ptr getSolver() const override; + std::unique_ptr getSolver() const override; /** * @brief Based on the array of optimization parameters, set the simulation diff --git a/include/parpeamici/multiConditionProblem.h b/include/parpeamici/multiConditionProblem.h index 4a9d8a07d..101437bf8 100644 --- a/include/parpeamici/multiConditionProblem.h +++ b/include/parpeamici/multiConditionProblem.h @@ -112,7 +112,7 @@ class AmiciSummedGradientFunction : public SummedGradientFunction { LoadBalancerMaster *loadBalancer, OptimizationResultWriter *resultWriter); - virtual ~AmiciSummedGradientFunction() = default; + ~AmiciSummedGradientFunction() override = default; FunctionEvaluationStatus evaluate( gsl::span parameters, diff --git a/include/parpeamici/optimizationApplication.h b/include/parpeamici/optimizationApplication.h index 7b4f86781..8a6e79d9d 100644 --- a/include/parpeamici/optimizationApplication.h +++ b/include/parpeamici/optimizationApplication.h @@ -41,8 +41,8 @@ class OptimizationApplication { * @param inFileArgument * @param outFileArgument */ - virtual void initProblem(std::string inFileArgument, - std::string outFileArgument) = 0; + virtual void initProblem(std::string const& inFileArgument, + std::string const& outFileArgument) = 0; /** * @brief Start the optimization run. Must only be called once. diff --git a/include/parpecommon/hdf5Misc.h b/include/parpecommon/hdf5Misc.h index f0ceb938f..393d9fd78 100644 --- a/include/parpecommon/hdf5Misc.h +++ b/include/parpecommon/hdf5Misc.h @@ -20,14 +20,14 @@ class HDF5Exception : public std::exception { explicit HDF5Exception(const char *format, ...); - const char* what() const noexcept; + const char* what() const noexcept override; std::string msg; std::string stackTrace; }; -typedef std::recursive_mutex mutexHdfType; +using mutexHdfType = std::recursive_mutex; void initHDF5Mutex(); diff --git a/include/parpecommon/logging.h b/include/parpecommon/logging.h index 219b302df..f53831c4d 100644 --- a/include/parpecommon/logging.h +++ b/include/parpecommon/logging.h @@ -17,14 +17,13 @@ constexpr const char ANSI_COLOR_RESET[] = "\x1b[0m"; std::string printfToString(const char *fmt, va_list ap); -// TODO enum class -typedef enum loglevel_tag { - LOGLVL_CRITICAL = 1, - LOGLVL_ERROR, - LOGLVL_WARNING, - LOGLVL_INFO, - LOGLVL_DEBUG -} loglevel; +enum class loglevel { + critical = 1, + error, + warning, + info, + debug +}; // Minimum log level that will be printed extern loglevel minimumLogLevel; diff --git a/include/parpeoptimization/localOptimizationCeres.h b/include/parpeoptimization/localOptimizationCeres.h index f3b6fba78..6a30a0073 100644 --- a/include/parpeoptimization/localOptimizationCeres.h +++ b/include/parpeoptimization/localOptimizationCeres.h @@ -19,7 +19,7 @@ class OptimizerCeres : public Optimizer { * @param problem the optimization problem * @return Returns 0 on success. */ - virtual std::tuple > + std::tuple > optimize(OptimizationProblem *problem) override; }; diff --git a/include/parpeoptimization/localOptimizationIpoptTNLP.h b/include/parpeoptimization/localOptimizationIpoptTNLP.h index 5f9aff87b..e0f319b83 100644 --- a/include/parpeoptimization/localOptimizationIpoptTNLP.h +++ b/include/parpeoptimization/localOptimizationIpoptTNLP.h @@ -20,7 +20,7 @@ namespace parpe { /** Mutex for managing access to IpOpt routines which are not thread-safe */ -typedef std::recursive_mutex mutexIpOptType; +using mutexIpOptType = std::recursive_mutex; /** * @brief ipoptMutex Ipopt seems not to be thread safe. Lock this mutex every @@ -42,25 +42,25 @@ class LocalOptimizationIpoptTNLP : public Ipopt::TNLP { LocalOptimizationIpoptTNLP(OptimizationProblem &problem, OptimizationReporter &reporter); - virtual ~LocalOptimizationIpoptTNLP() override = default; + ~LocalOptimizationIpoptTNLP() override = default; - virtual bool get_nlp_info(Ipopt::Index &n, Ipopt::Index &m, + bool get_nlp_info(Ipopt::Index &n, Ipopt::Index &m, Ipopt::Index &nnz_jac_g, Ipopt::Index &nnz_h_lag, IndexStyleEnum &index_style) override; - virtual bool get_bounds_info(Ipopt::Index n, Ipopt::Number *x_l, + bool get_bounds_info(Ipopt::Index n, Ipopt::Number *x_l, Ipopt::Number *x_u, Ipopt::Index m, Ipopt::Number *g_l, Ipopt::Number *g_u) override; - virtual bool get_starting_point(Ipopt::Index n, bool init_x, + bool get_starting_point(Ipopt::Index n, bool init_x, Ipopt::Number *x, bool init_z, Ipopt::Number *z_L, Ipopt::Number *z_U, Ipopt::Index m, bool init_lambda, Ipopt::Number *lambda) override; - virtual bool eval_f(Ipopt::Index n, const Ipopt::Number *x, bool new_x, + bool eval_f(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number &obj_value) override; /** @@ -76,27 +76,27 @@ class LocalOptimizationIpoptTNLP : public Ipopt::TNLP { * @param grad_f * @return */ - virtual bool eval_grad_f(Ipopt::Index n, const Ipopt::Number *x, bool new_x, + bool eval_grad_f(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number *grad_f) override; - virtual bool eval_g(Ipopt::Index n, const Ipopt::Number *x, bool new_x, + bool eval_g(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index m, Ipopt::Number *g) override; - virtual bool eval_jac_g(Ipopt::Index n, const Ipopt::Number *x, bool new_x, + bool eval_jac_g(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index m, Ipopt::Index nele_jac, Ipopt::Index *iRow, Ipopt::Index *jCol, Ipopt::Number *values) override; - virtual bool intermediate_callback( + bool intermediate_callback( Ipopt::AlgorithmMode mode, Ipopt::Index iter, Ipopt::Number obj_value, Ipopt::Number inf_pr, Ipopt::Number inf_du, Ipopt::Number mu, Ipopt::Number d_norm, Ipopt::Number regularization_size, Ipopt::Number alpha_du, Ipopt::Number alpha_pr, Ipopt::Index ls_trials, const Ipopt::IpoptData *ip_data, Ipopt::IpoptCalculatedQuantities *ip_cq) override; - virtual void finalize_solution(Ipopt::SolverReturn status, Ipopt::Index n, + void finalize_solution(Ipopt::SolverReturn status, Ipopt::Index n, const Ipopt::Number *x, const Ipopt::Number *z_L, const Ipopt::Number *z_U, Ipopt::Index m, diff --git a/include/parpeoptimization/minibatchOptimization.h b/include/parpeoptimization/minibatchOptimization.h index 931d61188..36bb265a0 100755 --- a/include/parpeoptimization/minibatchOptimization.h +++ b/include/parpeoptimization/minibatchOptimization.h @@ -14,8 +14,11 @@ #include #include #include +#include + #include + namespace parpe { /** @@ -58,7 +61,7 @@ class MinibatchOptimizationProblem: public OptimizationProblem { MinibatchOptimizationProblem(MinibatchOptimizationProblem const& other) = delete; - virtual ~MinibatchOptimizationProblem() override = default; + ~MinibatchOptimizationProblem() override = default; /** vector of training data */ virtual std::vector getTrainingData() const = 0; @@ -374,15 +377,6 @@ std::vector> getBatches(gsl::span data, return batches; } -/** - * @brief Get scalar product of two vectors. - * @param v - * @param w - * @return the scalar product - */ -double getScalarProduct(gsl::span v, - gsl::span w); - /** * @brief Get Euclidean (l2) norm of vector. * @param v @@ -464,7 +458,7 @@ class MinibatchOptimizer { std::stringstream ss; ss << ": Cost: " << cost << " |g|2: " << getVectorNorm(gradient) << " Batch: " << batches[batchIdx] << " LearningRate: " << learningRate << std::endl; - batchLogger->logmessage(LOGLVL_DEBUG, ss.str().c_str()); + batchLogger->logmessage(loglevel::debug, ss.str().c_str()); if (status == functionEvaluationFailure) { // Check, if the interceptor should be used (should always be the case, except for study purpose... @@ -551,13 +545,13 @@ class MinibatchOptimizer { if (logger) { switch (status) { case minibatchExitStatus::invalidNumber: - logger->logmessage(LOGLVL_ERROR, "Mini-batch cost function evaluation failed."); + logger->logmessage(loglevel::error, "Mini-batch cost function evaluation failed."); break; case minibatchExitStatus::gradientNormConvergence: - logger->logmessage(LOGLVL_INFO, "Convergence: gradientNormThreshold reached."); + logger->logmessage(loglevel::info, "Convergence: gradientNormThreshold reached."); break; case minibatchExitStatus::maxEpochsExceeded: - logger->logmessage(LOGLVL_INFO, "Number of epochs exceeded."); + logger->logmessage(loglevel::info, "Number of epochs exceeded."); } } @@ -615,7 +609,7 @@ class MinibatchOptimizer { std::vector firstDifference = getVectorDifference(parameters, oldParameters); std::stringstream first_ss; first_ss << " Interceptor is active! Former step size: " << getVectorNorm(firstDifference) << std::endl; - logger->logmessage(LOGLVL_DEBUG, first_ss.str().c_str()); + logger->logmessage(loglevel::debug, first_ss.str().c_str()); } @@ -639,7 +633,7 @@ class MinibatchOptimizer { // If nothing helps and no cold restart wanted: cancel optimization if (initialFail or (finalFail and interceptor != interceptType::reduceStepAndRestart)) { - logger->logmessage(LOGLVL_DEBUG, "Failure at initial point of optimization. Stopping."); + logger->logmessage(loglevel::debug, "Failure at initial point of optimization. Stopping."); return functionEvaluationFailure; } @@ -685,7 +679,7 @@ class MinibatchOptimizer { << "real step length: " << getVectorNorm(parDifference) << std::endl; if (logger) - logger->logmessage(LOGLVL_DEBUG, ss.str().c_str()); + logger->logmessage(loglevel::debug, ss.str().c_str()); } @@ -764,7 +758,7 @@ class MinibatchOptimizer { /* Return on improvement */ if (cost1 <= cost) { - logger->logmessage(LOGLVL_DEBUG, " Line-Search: Step was good right away..."); + logger->logmessage(loglevel::debug, " Line-Search: Step was good right away..."); return; } @@ -782,7 +776,8 @@ class MinibatchOptimizer { direction[i] /= dirNorm; /* Is the step direction a descent direction? */ - double dirGradient = getScalarProduct(direction, gradient); + double dirGradient = std::inner_product( + direction.begin(), direction.end(), gradient.begin(), 0.0); if (dirGradient > 0) { /* No descent direction, no hope for improvement: * Try to do something smart anyway */ @@ -812,7 +807,7 @@ class MinibatchOptimizer { << "), cost2: " << cost2 << " (step length: " << newStepLength << ") " << std::endl; if (logger) - logger->logmessage(LOGLVL_DEBUG, parabola_ss.str().c_str()); + logger->logmessage(loglevel::debug, parabola_ss.str().c_str()); /* We tried all we could */ return; @@ -831,7 +826,7 @@ class MinibatchOptimizer { << "), cost1: " << cost1 << " (step length: " << stepLength << "), cost2: " << cost2 << " (step length: " << newStepLength << ")" << std::endl; - logger->logmessage(LOGLVL_DEBUG, line_ss.str().c_str()); + logger->logmessage(loglevel::debug, line_ss.str().c_str()); } /* If we did improve, return, otherwise iterate */ @@ -848,7 +843,7 @@ class MinibatchOptimizer { std::stringstream line_ss; if (logger) { line_ss << " Line-Search: Need to go to third order approximation, looping... " << std::endl; - logger->logmessage(LOGLVL_DEBUG, line_ss.str().c_str()); + logger->logmessage(loglevel::debug, line_ss.str().c_str()); } performLineSearch(stepLength, newStepLength, diff --git a/sonar-project.properties b/sonar-project.properties index b58b6ab19..f73cc5b70 100644 --- a/sonar-project.properties +++ b/sonar-project.properties @@ -19,3 +19,4 @@ sonar.cfamily.cache.enabled=true sonar.cfamily.cache.path=sonar_cache sonar.python.coverage.reportPaths=build/coverage_py.xml +sonar.python.version=3.7,3.8,3.9,3.10 diff --git a/src/parpeamici/hierarchicalOptimization.cpp b/src/parpeamici/hierarchicalOptimization.cpp index dfb569a73..60caa352b 100644 --- a/src/parpeamici/hierarchicalOptimization.cpp +++ b/src/parpeamici/hierarchicalOptimization.cpp @@ -115,7 +115,7 @@ HierarchicalOptimizationWrapper::init() << " proportionality, " << offsetParameterIndices.size() << " offset, " << sigmaParameterIndices.size() << " sigma\n"; Logger logger; - logger.logmessage(LOGLVL_DEBUG, ss.str()); + logger.logmessage(loglevel::debug, ss.str()); } } @@ -203,10 +203,10 @@ HierarchicalOptimizationWrapper::evaluate( if (logger) { std::stringstream ss; ss << "scalings " << scalings; - logger->logmessage(LOGLVL_DEBUG, ss.str()); + logger->logmessage(loglevel::debug, ss.str()); ss.str(std::string()); ss << "sigmas " << sigmas; - logger->logmessage(LOGLVL_DEBUG, ss.str()); + logger->logmessage(loglevel::debug, ss.str()); } // splice parameter vector we get from optimizer with analytically // computed parameters @@ -733,7 +733,7 @@ computeAnalyticalScalings( timeIdx * numObservables]; // std::cout< -1e-18) { // negative values due to numerical errors // TODO: some outputs may be validly < 0 - logmessage(LOGLVL_WARNING, + logmessage(loglevel::warning, "In computeAnalyticalScalings %d: " "Simulation is %g < 0 for condition %d " "observable %d timepoint %d. " @@ -766,7 +766,7 @@ computeAnalyticalScalings( } if (denominator == 0.0) { - logmessage(LOGLVL_WARNING, + logmessage(loglevel::warning, "In computeAnalyticalScalings: denominator is 0.0 for " "scaling parameter " + std::to_string(scalingIdx) + @@ -821,7 +821,7 @@ computeAnalyticalOffsets( [observableIdx + timeIdx * numObservables]; if (std::isnan(sim)) { - logmessage(LOGLVL_WARNING, + logmessage(loglevel::warning, "In computeAnalyticalOffsets %d: " "Simulation is NaN for condition %d " "observable %d timepoint %d", @@ -838,7 +838,7 @@ computeAnalyticalOffsets( } if (denominator == 0.0) { - logmessage(LOGLVL_WARNING, + logmessage(loglevel::warning, "In computeAnalyticalOffsets: denominator is 0.0 " "for offset parameter " + std::to_string(offsetIdx) + @@ -885,7 +885,7 @@ computeAnalyticalSigmas( modelOutputsScaled[conditionIdx][flat_index]; if (std::isnan(scaledSim)) { - logmessage(LOGLVL_WARNING, + logmessage(loglevel::warning, "In computeAnalyticalSigmas %d: " "Simulation is NaN for condition %d " "observable %d timepoint %d", @@ -905,7 +905,7 @@ computeAnalyticalSigmas( } if (denominator == 0.0) { - logmessage(LOGLVL_WARNING, + logmessage(loglevel::warning, "In computeAnalyticalSigmas: Denominator is 0.0 for sigma " "parameter " + std::to_string(sigmaIdx) + @@ -919,7 +919,7 @@ computeAnalyticalSigmas( if (sigma < epsilonAbs) { // Must not return sigma = 0.0 - logmessage(LOGLVL_WARNING, + logmessage(loglevel::warning, "In computeAnalyticalSigmas " + std::to_string(sigmaIdx) + ": Computed sigma < epsilon. Setting to " + std::to_string(epsilonAbs)); @@ -1110,16 +1110,16 @@ computeNegLogLikelihood(std::vector const& measurements, double sigmaSquared = sigmas[i] * sigmas[i]; if (std::isnan(sim)) { logmessage( - LOGLVL_WARNING, "Simulation is NaN for data point %d", i); + loglevel::warning, "Simulation is NaN for data point %d", i); return std::numeric_limits::quiet_NaN(); } if (std::isnan(sigmaSquared)) { - logmessage(LOGLVL_WARNING, "Sigma is NaN for data point %d", i); + logmessage(loglevel::warning, "Sigma is NaN for data point %d", i); return std::numeric_limits::quiet_NaN(); } if (sigmaSquared < 0.0) { logmessage( - LOGLVL_WARNING, "Negative sigma for data point %d", i); + loglevel::warning, "Negative sigma for data point %d", i); return std::numeric_limits::quiet_NaN(); } @@ -1230,13 +1230,13 @@ HierarchicalOptimizationReporter::finished(double optimalCost, std::copy( parameters.begin(), parameters.end(), cached_parameters_.data()); if (logger_) - logger_->logmessage(LOGLVL_INFO, "cachedCost != optimalCost"); + logger_->logmessage(loglevel::info, "cachedCost != optimalCost"); cached_cost_ = NAN; } if (logger_) logger_->logmessage( - LOGLVL_INFO, + loglevel::info, "Optimizer status %d, final llh: %e, time: wall: %f cpu: %f.", exitStatus, cached_cost_, @@ -1267,7 +1267,7 @@ HierarchicalOptimizationReporter::iterationFinished( double wallTimeOptim = wall_timer_.getTotal(); if (logger_) - logger_->logmessage(LOGLVL_INFO, + logger_->logmessage(loglevel::info, "iter: %d cost: %g " "time_iter: wall: %gs cpu: %gs " "time_optim: wall: %gs cpu: %gs", @@ -1357,7 +1357,7 @@ checkGradientForAnalyticalParameters(const std::vector& gradient, auto curGradient = gradient[idx]; // std::cout<<" : "< threshold) - logmessage(LOGLVL_WARNING, + logmessage(loglevel::warning, "Gradient w.r.t. analytically computed parameter " "%d is %f, exceeding threshold %g", idx, diff --git a/src/parpeamici/multiConditionDataProvider.cpp b/src/parpeamici/multiConditionDataProvider.cpp index 3557c57de..dc7163750 100644 --- a/src/parpeamici/multiConditionDataProvider.cpp +++ b/src/parpeamici/multiConditionDataProvider.cpp @@ -281,7 +281,7 @@ MultiConditionDataProviderHDF5::readFixedSimulationParameters( buffer); if (H5Eget_num(H5E_DEFAULT)) { - logmessage(LOGLVL_CRITICAL, + logmessage(loglevel::critical, "Problem in readFixedParameters (row %d, nk %d)\n", conditionIdx, model_->nk()); @@ -674,7 +674,7 @@ MultiConditionDataProviderDefault::updateSimulationParametersAndScale( gsl::span optimizationParams, amici::Model& model) const { - logmessage(LOGLVL_WARNING, + logmessage(loglevel::warning, "MultiConditionDataProviderDefault::updateSimulationParameters: " "No proper mapping implemented. Ensure this is correct."); model.setParameters(std::vector(optimizationParams.begin(), diff --git a/src/parpeamici/multiConditionProblem.cpp b/src/parpeamici/multiConditionProblem.cpp index 08657a889..72a2ab4a3 100644 --- a/src/parpeamici/multiConditionProblem.cpp +++ b/src/parpeamici/multiConditionProblem.cpp @@ -209,14 +209,14 @@ void printSimulationResult(Logger *logger, int jobId, if(!rdata) { // This should not happen, but apparently we can't rely on AMICI always // returning some result object - logger->logmessage(LOGLVL_ERROR, + logger->logmessage(loglevel::error, "AMICI simulation failed unexpectedly."); return; } bool with_sensi = rdata->sensi >= amici::SensitivityOrder::first; - logger->logmessage(LOGLVL_DEBUG, "Result for %d: %g (%d) (%d/%d/%.4fs%c)", + logger->logmessage(loglevel::debug, "Result for %d: %g (%d) (%d/%d/%.4fs%c)", jobId, rdata->llh, rdata->status, rdata->numsteps.empty()?-1:rdata->numsteps[rdata->numsteps.size() - 1], rdata->numstepsB.empty()?-1:rdata->numstepsB[0], @@ -228,13 +228,13 @@ void printSimulationResult(Logger *logger, int jobId, if (with_sensi) { for (int i = 0; i < rdata->np; ++i) { if (std::isnan(rdata->sllh[i])) { - logger->logmessage(LOGLVL_DEBUG, + logger->logmessage(loglevel::debug, "Gradient contains NaN at %d", i); break; } if (std::isinf(rdata->sllh[i])) { - logger->logmessage(LOGLVL_DEBUG, + logger->logmessage(loglevel::debug, "Gradient contains Inf at %d", i); break; } @@ -360,19 +360,19 @@ AmiciSimulationRunner::AmiciResultPackageSimple runAndLogSimulation( std::string const& identifier, std::string const& message){ if(!identifier.empty()) { - logger->logmessage(LOGLVL_ERROR, "[" + identifier + "] " + message); + logger->logmessage(loglevel::error, "[" + identifier + "] " + message); } else { - logger->logmessage(LOGLVL_ERROR, message); + logger->logmessage(loglevel::error, message); } }; amiciApp.warning = [logger]( std::string const& identifier, std::string const& message){ if(!identifier.empty()) { - logger->logmessage(LOGLVL_WARNING, + logger->logmessage(loglevel::warning, "[" + identifier + "] " + message); } else { - logger->logmessage(LOGLVL_WARNING, message); + logger->logmessage(loglevel::warning, message); } }; model.app = &amiciApp; // TODO: may dangle need to unset on exit @@ -405,7 +405,7 @@ AmiciSimulationRunner::AmiciResultPackageSimple runAndLogSimulation( } if(trial - 1 == maxNumTrials) { - logger->logmessage(LOGLVL_ERROR, + logger->logmessage(loglevel::error, "Simulation trial %d/%d failed. Giving up.", trial, maxNumTrials); break; @@ -445,7 +445,7 @@ AmiciSimulationRunner::AmiciResultPackageSimple runAndLogSimulation( } logger->logmessage( - LOGLVL_WARNING, + loglevel::warning, "Error during simulation (try %d/%d), " "retrying with relaxed error tolerances (*= %g): " "abs: %g rel: %g quadAbs: %g quadRel: %g " @@ -468,7 +468,7 @@ AmiciSimulationRunner::AmiciResultPackageSimple runAndLogSimulation( status = std::to_string(rdata->status); } logger->logmessage( - LOGLVL_WARNING, "Error during simulation: %s (%s)", + loglevel::warning, "Error during simulation: %s (%s)", e.what(), status.c_str()); } diff --git a/src/parpeamici/optimizationApplication.cpp b/src/parpeamici/optimizationApplication.cpp index 49003866a..c6f3aebe5 100644 --- a/src/parpeamici/optimizationApplication.cpp +++ b/src/parpeamici/optimizationApplication.cpp @@ -26,7 +26,7 @@ struct sigaction act; struct sigaction oldact; void signalHandler(int sig) { - logmessage(LOGLVL_CRITICAL, "Caught signal %d ", sig); + logmessage(loglevel::critical, "Caught signal %d ", sig); printBacktrace(); // restore previous @@ -37,7 +37,7 @@ void signalHandler(int sig) { int OptimizationApplication::init(int argc, char **argv) { // reduce verbosity if(std::getenv("PARPE_NO_DEBUG")) - minimumLogLevel = LOGLVL_INFO; + minimumLogLevel = loglevel::info; int status = parseCliOptionsPreMpiInit(argc, argv); if(status) @@ -93,11 +93,9 @@ int OptimizationApplication::parseCliOptionsPostMpiInit(int argc, char **argv) { // restart from first argument optind = 1; - int c; - while (true) { int optionIndex = 0; - c = getopt_long(argc, argv, shortOptions, longOptions, &optionIndex); + int c = getopt_long(argc, argv, shortOptions, longOptions, &optionIndex); if (c == -1) break; // no more options @@ -131,7 +129,7 @@ int OptimizationApplication::parseCliOptionsPostMpiInit(int argc, char **argv) { if (optind < argc) { dataFileName = argv[optind++]; } else { - logmessage(LOGLVL_CRITICAL, + logmessage(loglevel::critical, "Must provide input file as first and only argument to %s.", argv[0]); return 1; @@ -172,7 +170,7 @@ void OptimizationApplication::initMPI(int *argc, char ***argv) { throw std::runtime_error("MPI_THREAD_MULTIPLE not supported?"); if (mpiErr != MPI_SUCCESS) { - logmessage(LOGLVL_CRITICAL, "Problem initializing MPI. Exiting."); + logmessage(loglevel::critical, "Problem initializing MPI. Exiting."); exit(1); } @@ -180,7 +178,7 @@ void OptimizationApplication::initMPI(int *argc, char ***argv) { if (mpiRank == 0) { int commSize = getMpiCommSize(); - logmessage(LOGLVL_INFO, "Running with %d MPI processes.", commSize); + logmessage(loglevel::info, "Running with %d MPI processes.", commSize); } #endif } @@ -195,7 +193,7 @@ int OptimizationApplication::run(int argc, char **argv) { return status; if (dataFileName.empty()) { - logmessage(LOGLVL_CRITICAL, + logmessage(loglevel::critical, "No input file provided. Must provide input file as first " "and only argument or set " "OptimizationApplication::inputFileName manually."); @@ -215,7 +213,7 @@ int OptimizationApplication::run(int argc, char **argv) { loadBalancer.terminate(); loadBalancer.sendTerminationSignalToAllWorkers(); finalizeTiming(wallTimer.getTotal(), cpuTimer.getTotal()); - logmessage(LOGLVL_INFO, "Sent termination signal to workers."); + logmessage(loglevel::info, "Sent termination signal to workers."); } else { runWorker(); @@ -300,7 +298,7 @@ void OptimizationApplication::finalizeTiming(double wallTimeSeconds, double cpuT int mpiRank = getMpiRank(); if (mpiRank < 1) { - logmessage(LOGLVL_INFO, "Walltime on master: %fs, CPU time of all processes: %fs", + logmessage(loglevel::info, "Walltime on master: %fs, CPU time of all processes: %fs", wallTimeSeconds, totalCpuTimeInSeconds); saveTotalCpuTime(h5File, totalCpuTimeInSeconds); } diff --git a/src/parpeamici/standaloneSimulator.cpp b/src/parpeamici/standaloneSimulator.cpp index 9f279a14e..8e8da1ff9 100644 --- a/src/parpeamici/standaloneSimulator.cpp +++ b/src/parpeamici/standaloneSimulator.cpp @@ -402,18 +402,18 @@ StandaloneSimulator::runSimulation(int conditionIdx, amiciApp.error = [&logger](std::string const& identifier, std::string const& message) { if (!identifier.empty()) { - logger.logmessage(LOGLVL_ERROR, "[" + identifier + "] " + message); + logger.logmessage(loglevel::error, "[" + identifier + "] " + message); } else { - logger.logmessage(LOGLVL_ERROR, message); + logger.logmessage(loglevel::error, message); } }; amiciApp.warning = [&logger](std::string const& identifier, std::string const& message) { if (!identifier.empty()) { - logger.logmessage(LOGLVL_WARNING, + logger.logmessage(loglevel::warning, "[" + identifier + "] " + message); } else { - logger.logmessage(LOGLVL_WARNING, message); + logger.logmessage(loglevel::warning, message); } }; model.app = &amiciApp; // TODO: may dangle need to unset on exit diff --git a/src/parpecommon/hdf5Misc.cpp b/src/parpecommon/hdf5Misc.cpp index ee3c3074b..a77485d24 100644 --- a/src/parpecommon/hdf5Misc.cpp +++ b/src/parpecommon/hdf5Misc.cpp @@ -65,12 +65,12 @@ herr_t hdf5ErrorStackWalker_cb(unsigned int n, const H5E_error_t *err_desc, std::unique_ptr min_str { H5Eget_minor(err_desc->min_num), &std::free }; - logmessage(LOGLVL_CRITICAL, "%*s#%03d: %s line %u in %s(): %s", indent, "", + logmessage(loglevel::critical, "%*s#%03d: %s line %u in %s(): %s", indent, "", n, err_desc->file_name, err_desc->line, err_desc->func_name, err_desc->desc); - logmessage(LOGLVL_CRITICAL, "%*smajor(%02d): %s", indent * 2, "", + logmessage(loglevel::critical, "%*smajor(%02d): %s", indent * 2, "", err_desc->maj_num, maj_str.get()); - logmessage(LOGLVL_CRITICAL, "%*sminor(%02d): %s", indent * 2, "", + logmessage(loglevel::critical, "%*sminor(%02d): %s", indent * 2, "", err_desc->min_num, min_str.get()); return 0; @@ -701,7 +701,7 @@ H5::H5File hdf5OpenForReading(const std::string &hdf5Filename) H5_RESTORE_ERROR_HANDLER; return file; } catch (...) { - logmessage(LOGLVL_CRITICAL, + logmessage(loglevel::critical, "failed to open HDF5 file '%s'.", hdf5Filename.c_str()); printBacktrace(20); diff --git a/src/parpecommon/logging.cpp b/src/parpecommon/logging.cpp index 6a41291b2..27a48c52a 100644 --- a/src/parpecommon/logging.cpp +++ b/src/parpecommon/logging.cpp @@ -19,7 +19,7 @@ namespace parpe { const char *loglevelShortStr[] = {"", "CRI", "ERR", "WRN", "INF", "DBG"}; -loglevel minimumLogLevel = LOGLVL_DEBUG; +loglevel minimumLogLevel = loglevel::debug; static void printlogmessage(loglevel lvl, const char *message); std::string printfToString(const char *fmt, va_list ap) { @@ -58,7 +58,7 @@ void logProcessStats() while (std::getline(file, line)) { if(line.rfind("Vm", 0) == 0 || line.rfind("Rss", 0) == 0) { - logmessage(LOGLVL_DEBUG, line); + logmessage(loglevel::debug, line); } } file.close(); @@ -78,13 +78,13 @@ void printMPIInfo() { int procNameLen; MPI_Get_processor_name(procName, &procNameLen); - logmessage(LOGLVL_DEBUG, "Rank %d/%d running on %s.", mpiRank, + logmessage(loglevel::debug, "Rank %d/%d running on %s.", mpiRank, mpiCommSize, procName); } else { - logmessage(LOGLVL_DEBUG, "MPI not initialized."); + logmessage(loglevel::debug, "MPI not initialized."); } #else - logmessage(LOGLVL_DEBUG, "MPI support disabled."); + logmessage(loglevel::debug, "MPI support disabled."); #endif } @@ -93,7 +93,7 @@ void printDebugInfoAndWait(int seconds) { //int i = 0; char hostname[256]; gethostname(hostname, sizeof(hostname)); - logmessage(LOGLVL_DEBUG, + logmessage(loglevel::debug, "PID %d on %s ready for attach (will wait for %ds)", getpid(), hostname, seconds); fflush(stdout); @@ -102,11 +102,11 @@ void printDebugInfoAndWait(int seconds) { } void error(const char *message) { // exit? - logmessage(LOGLVL_ERROR, message); + logmessage(loglevel::error, message); } void warning(const char *message) { - logmessage(LOGLVL_WARNING, message); + logmessage(loglevel::warning, message); } void logmessage(loglevel lvl, const std::string &msg) @@ -127,19 +127,19 @@ void printlogmessage(loglevel lvl, const char *message) // TODO: fileLogLevel, consoleLogLevel // Coloring switch (lvl) { - case LOGLVL_CRITICAL: + case loglevel::critical: printf(ANSI_COLOR_MAGENTA); break; - case LOGLVL_ERROR: + case loglevel::error: printf(ANSI_COLOR_RED); break; - case LOGLVL_WARNING: + case loglevel::warning: printf(ANSI_COLOR_YELLOW); break; - case LOGLVL_DEBUG: + case loglevel::debug: printf(ANSI_COLOR_CYAN); break; - case LOGLVL_INFO: + case loglevel::info: printf(ANSI_COLOR_GREEN); break; } @@ -177,9 +177,9 @@ void printlogmessage(loglevel lvl, const char *message) printf("%s\n", ANSI_COLOR_RESET); switch (lvl) { - case LOGLVL_CRITICAL: + case loglevel::critical: [[fallthrough]]; - case LOGLVL_ERROR: + case loglevel::error: fflush(stdout); break; default: diff --git a/src/parpecommon/misc.cpp b/src/parpecommon/misc.cpp index 32eefa008..8806a8092 100644 --- a/src/parpecommon/misc.cpp +++ b/src/parpecommon/misc.cpp @@ -64,9 +64,9 @@ void runInParallelAndWaitForFinish(void *(*function)(void *), void **args, // wait for finish for (int i = 0; i < numArgs; ++i) { pthread_join(threads[i], nullptr); - logmessage(LOGLVL_DEBUG, "Thread i %d finished", i); + logmessage(loglevel::debug, "Thread i %d finished", i); } - logmessage(LOGLVL_DEBUG, "All k threads finished."); + logmessage(loglevel::debug, "All k threads finished."); } void printBacktrace(int nMaxFrames) { diff --git a/src/parpeloadbalancer/loadBalancerMaster.cpp b/src/parpeloadbalancer/loadBalancerMaster.cpp index d252a421a..5e66d8ca1 100644 --- a/src/parpeloadbalancer/loadBalancerMaster.cpp +++ b/src/parpeloadbalancer/loadBalancerMaster.cpp @@ -55,6 +55,9 @@ void LoadBalancerMaster::run() { LoadBalancerMaster::~LoadBalancerMaster() { terminate(); + + pthread_mutex_destroy(&mutexQueue); + sem_destroy(&semQueue); } #ifndef QUEUE_MASTER_TEST @@ -215,9 +218,6 @@ void LoadBalancerMaster::terminate() { pthread_cancel(queueThread); // wait until canceled pthread_join(queueThread, nullptr); - - pthread_mutex_destroy(&mutexQueue); - sem_destroy(&semQueue); } int LoadBalancerMaster::handleReply(MPI_Status *mpiStatus) { diff --git a/src/parpeloadbalancer/loadBalancerWorker.cpp b/src/parpeloadbalancer/loadBalancerWorker.cpp index 382bd2234..04be15886 100644 --- a/src/parpeloadbalancer/loadBalancerWorker.cpp +++ b/src/parpeloadbalancer/loadBalancerWorker.cpp @@ -61,7 +61,7 @@ bool LoadBalancerWorker::waitForAndHandleJobs(const messageHandlerFunc& messageH #ifdef LOADBALANCERWORKER_REPORT_WAITING_TIME double endTime = MPI_Wtime(); double waitedSeconds = (endTime - startTime); - logmessage(LOGLVL_DEBUG, "Message received after waiting %fs.", rank, waitedSeconds); + logmessage(loglevel::debug, "Message received after waiting %fs.", rank, waitedSeconds); #endif messageHandler(buffer, mpiStatus.MPI_TAG); diff --git a/src/parpeoptimization/localOptimizationCeres.cpp b/src/parpeoptimization/localOptimizationCeres.cpp index fbf3ac6bd..0ff76b7ba 100644 --- a/src/parpeoptimization/localOptimizationCeres.cpp +++ b/src/parpeoptimization/localOptimizationCeres.cpp @@ -35,19 +35,19 @@ class LogSinkAdapter : public google::LogSink { const char* message, size_t message_len) override { // Map log levels - loglevel lvl = LOGLVL_INFO; + loglevel lvl = loglevel::info; switch (severity) { case google::INFO: - lvl = LOGLVL_INFO; + lvl = loglevel::info; break; case google::WARNING: - lvl = LOGLVL_WARNING; + lvl = loglevel::warning; break; case google::ERROR: - lvl = LOGLVL_ERROR; + lvl = loglevel::error; break; case google::FATAL: - lvl = LOGLVL_CRITICAL; + lvl = loglevel::critical; break; } @@ -263,11 +263,11 @@ void setCeresOption(const std::pair &pair, } else if(key == "minimizer_progress_to_stdout") { options->minimizer_progress_to_stdout = std::stoi(val); } else { - logmessage(LOGLVL_WARNING, "Ignoring unknown optimization option %s.", key.c_str()); + logmessage(loglevel::warning, "Ignoring unknown optimization option %s.", key.c_str()); return; } - logmessage(LOGLVL_DEBUG, "Set optimization option %s to %s.", key.c_str(), val.c_str()); + logmessage(loglevel::debug, "Set optimization option %s to %s.", key.c_str(), val.c_str()); } diff --git a/src/parpeoptimization/localOptimizationFides.cpp b/src/parpeoptimization/localOptimizationFides.cpp index cba2b17ad..bb06d626c 100644 --- a/src/parpeoptimization/localOptimizationFides.cpp +++ b/src/parpeoptimization/localOptimizationFides.cpp @@ -67,7 +67,7 @@ get_optimization_options(OptimizationOptions const& parpe_options) if (result != fides::subspace_dim_to_str.cend()) options.subspace_solver = result->first; else - logmessage(LOGLVL_WARNING, + logmessage(loglevel::warning, "Invalid value %s provided for option " "'subspace_solver'. Ignoring.", val.c_str()); @@ -79,7 +79,7 @@ get_optimization_options(OptimizationOptions const& parpe_options) if (result != fides::step_back_strategy_str.cend()) options.stepback_strategy = result->first; else - logmessage(LOGLVL_WARNING, + logmessage(loglevel::warning, "Invalid value %s provided for option " "'stepback_strategy'. Ignoring.", val.c_str()); @@ -98,13 +98,13 @@ get_optimization_options(OptimizationOptions const& parpe_options) } else if (key == "refine_stepback") { options.refine_stepback = std::stoi(val); } else { - logmessage(LOGLVL_WARNING, + logmessage(loglevel::warning, "Ignoring unknown optimization option %s.", key.c_str()); return; } - logmessage(LOGLVL_DEBUG, + logmessage(loglevel::debug, "Set optimization option %s to %s.", key.c_str(), val.c_str()); diff --git a/src/parpeoptimization/localOptimizationFsqp.cpp b/src/parpeoptimization/localOptimizationFsqp.cpp index 7f457808c..482e8bc28 100644 --- a/src/parpeoptimization/localOptimizationFsqp.cpp +++ b/src/parpeoptimization/localOptimizationFsqp.cpp @@ -168,7 +168,7 @@ class FsqpProblem { static_assert(sizeof(doublereal) >= sizeof(&thisthis), ""); memcpy(w.data(), &thisthis, 1 * sizeof(&thisthis)); - logmessage(LOGLVL_DEBUG, "w0 %p", &w.data()[0]); + logmessage(loglevel::debug, "w0 %p", &w.data()[0]); problem->fillInitialParameters(x); problem->fillParametersMin(bl); @@ -324,7 +324,7 @@ FsqpProblem *getProblemFromFj(doublereal &fj, integer nparam, integer j) { parpe::FsqpProblem *problem = nullptr; int nwff = getNwff(nparam, j); - logmessage(LOGLVL_DEBUG, "w0 obj: %p", &fj - nwff + 1); + logmessage(loglevel::debug, "w0 obj: %p", &fj - nwff + 1); memcpy(&problem, &fj - nwff + 1, sizeof(problem)); @@ -344,7 +344,7 @@ FsqpProblem *getProblemFromGradFj(doublereal *gradfj, integer nparam, integer j) parpe::FsqpProblem *problem = nullptr; int nwgrf = getNwgrf(nparam, j); - logmessage(LOGLVL_DEBUG, "w0 gradobj: %p", gradfj - nwgrf + 1); + logmessage(loglevel::debug, "w0 gradobj: %p", gradfj - nwgrf + 1); // NOTE: Will have to change that once we want to include constraints memcpy(&problem, gradfj - nwgrf + 1, sizeof(problem)); diff --git a/src/parpeoptimization/localOptimizationIpopt.cpp b/src/parpeoptimization/localOptimizationIpopt.cpp index 41315dac7..8f2e6506d 100644 --- a/src/parpeoptimization/localOptimizationIpopt.cpp +++ b/src/parpeoptimization/localOptimizationIpopt.cpp @@ -353,13 +353,13 @@ void setIpOptOption(const std::pair &pair, else if(std::find(dblOpts.begin(), dblOpts.end(), key) != dblOpts.end()) success = options->SetNumericValue(key, std::stod(val)); else { - logmessage(LOGLVL_WARNING, "Ignoring unknown optimization option %s.", key.c_str()); + logmessage(loglevel::warning, "Ignoring unknown optimization option %s.", key.c_str()); return; } RELEASE_ASSERT(success, "Problem setting IpOpt option"); - logmessage(LOGLVL_DEBUG, "Set optimization option %s to %s.", key.c_str(), val.c_str()); + logmessage(loglevel::debug, "Set optimization option %s to %s.", key.c_str(), val.c_str()); } void setIpOptOptions(SmartPtr optionsIpOpt, @@ -418,11 +418,11 @@ std::tuple > OptimizerIpOpt::optimize(Optimizat // TODO: print where } } catch (IpoptException& e) { - logmessage(LOGLVL_ERROR, "IpOpt exception: %s", e.Message().c_str()); + logmessage(loglevel::error, "IpOpt exception: %s", e.Message().c_str()); } catch (std::exception& e) { - logmessage(LOGLVL_ERROR, "Unknown exception occurred during optimization: %s", e.what()); + logmessage(loglevel::error, "Unknown exception occurred during optimization: %s", e.what()); } catch (...) { - logmessage(LOGLVL_ERROR, "Unknown exception occurred during optimization"); + logmessage(loglevel::error, "Unknown exception occurred during optimization"); } finalCost = optimizationController->getFinalCost(); finalParameters = optimizationController->getFinalParameters(); diff --git a/src/parpeoptimization/localOptimizationIpoptTNLP.cpp b/src/parpeoptimization/localOptimizationIpoptTNLP.cpp index 559a5b9e3..ba8ea9414 100644 --- a/src/parpeoptimization/localOptimizationIpoptTNLP.cpp +++ b/src/parpeoptimization/localOptimizationIpoptTNLP.cpp @@ -28,7 +28,7 @@ LocalOptimizationIpoptTNLP::get_nlp_info(Index& n, { n = reporter.numParameters(); - m = 0; // number of constrants + m = 0; // number of constraints nnz_jac_g = 0; // numNonZeroElementsConstraintJacobian nnz_h_lag = 0; // numNonZeroElementsLagrangianHessian index_style = TNLP::C_STYLE; // array layout for sparse matrices @@ -91,7 +91,7 @@ LocalOptimizationIpoptTNLP::eval_f(Index n, bool /*new_x*/, Number& obj_value) { - auto unlockIpOpt = ipOptReleaseLock(); + [[maybe_unused]] auto unlockIpOpt = ipOptReleaseLock(); return reporter.evaluate(gsl::make_span(x, n), obj_value, @@ -104,7 +104,7 @@ LocalOptimizationIpoptTNLP::eval_grad_f(Index n, bool /*new_x*/, Number* grad_f) { - auto unlockIpOpt = ipOptReleaseLock(); + [[maybe_unused]] auto unlockIpOpt = ipOptReleaseLock(); double obj_value; return reporter.evaluate(gsl::make_span(x, n), @@ -155,16 +155,16 @@ LocalOptimizationIpoptTNLP::intermediate_callback( IpoptCalculatedQuantities* /*ip_cq*/) { - auto unlockIpOpt = ipOptReleaseLock(); + [[maybe_unused]] auto unlockIpOpt = ipOptReleaseLock(); // get current parameters from IpOpt which are not available directly gsl::span parameters; - auto x = ip_data->curr()->x(); - auto xx = dynamic_cast(Ipopt::GetRawPtr(x)); - if (xx) + + if (auto x = ip_data->curr()->x(); + auto xx = dynamic_cast(Ipopt::GetRawPtr(x))) parameters = gsl::span(xx->Values(), xx->Dim()); else - logmessage(LOGLVL_WARNING, + logmessage(loglevel::warning, "Not Ipopt::DenseVector in " "LocalOptimizationIpoptTNLP::intermediate_callback"); @@ -174,7 +174,7 @@ LocalOptimizationIpoptTNLP::intermediate_callback( #ifdef INSTALL_SIGNAL_HANDLER if (caughtTerminationSignal) { - logmessage(LOGLVL_CRITICAL, "CAUGHT SIGTERM... EXITING."); + logmessage(loglevel::critical, "CAUGHT SIGTERM... EXITING."); return false; } #endif @@ -197,7 +197,7 @@ LocalOptimizationIpoptTNLP::finalize_solution( IpoptCalculatedQuantities* /*ip_cq*/) { - auto unlockIpOpt = ipOptReleaseLock(); + [[maybe_unused]] auto unlockIpOpt = ipOptReleaseLock(); // If we finish with objective value of NAN, IpOpt still passes // obj_value 0.0 along with the respective flag. This does not make too // much sense. Set to NAN. @@ -208,16 +208,14 @@ LocalOptimizationIpoptTNLP::finalize_solution( reporter.finished(obj_value, gsl::span(x, n), status); } -std::unique_lock -ipOptGetLock() +auto ipOptGetLock() -> std::unique_lock { - return std::unique_lock(mutexIpOpt); + return std::unique_lock(mutexIpOpt); } -InverseUniqueLock -ipOptReleaseLock() +auto ipOptReleaseLock() -> InverseUniqueLock { - return InverseUniqueLock(&mutexIpOpt); + return InverseUniqueLock(&mutexIpOpt); } } // namespace parpe diff --git a/src/parpeoptimization/localOptimizationToms611.cpp b/src/parpeoptimization/localOptimizationToms611.cpp index 5877dd174..6a8343cc5 100644 --- a/src/parpeoptimization/localOptimizationToms611.cpp +++ b/src/parpeoptimization/localOptimizationToms611.cpp @@ -81,11 +81,11 @@ void setToms611Option(const std::pair &pai } else if(key == "xftol") { v[xftol] = std::stod(val); } else { - logmessage(LOGLVL_WARNING, "Ignoring unknown optimization option %s.", key.c_str()); + logmessage(loglevel::warning, "Ignoring unknown optimization option %s.", key.c_str()); return; } - logmessage(LOGLVL_DEBUG, "Set optimization option %s to %s.", key.c_str(), val.c_str()); + logmessage(loglevel::debug, "Set optimization option %s to %s.", key.c_str(), val.c_str()); } diff --git a/src/parpeoptimization/minibatchOptimization.cpp b/src/parpeoptimization/minibatchOptimization.cpp index 173de8205..ac78dcf9c 100755 --- a/src/parpeoptimization/minibatchOptimization.cpp +++ b/src/parpeoptimization/minibatchOptimization.cpp @@ -4,16 +4,8 @@ namespace parpe { -double getScalarProduct(gsl::span v, - gsl::span w) { - double scalarProduct = 0.0; - for (unsigned int i = 0; i < v.size(); ++i) - scalarProduct += v[i] * w[i]; - return scalarProduct; -} - double getVectorNorm(gsl::span v) { - return std::sqrt(getScalarProduct(v, v)); + return std::sqrt(std::inner_product(v.begin(), v.end(), v.begin(), 0.0)); } std::vector getVectorDifference(gsl::span v, @@ -41,11 +33,11 @@ void setMinibatchOption(const std::pair &p } else if (key == "lineSearchSteps") { optimizer->lineSearchSteps = std::stoi(val); } else if (key == "rescueInterceptor") { - if (val == "none" or val == "0") { + if (val == "none" || val == "0") { optimizer->interceptor = parpe::interceptType::none; - } else if (val == "reduceStep" or val == "1") { + } else if (val == "reduceStep" || val == "1") { optimizer->interceptor = parpe::interceptType::reduceStep; - } else if (val == "reduceStepAndRestart" or val == "2") { + } else if (val == "reduceStepAndRestart" || val == "2") { optimizer->interceptor = parpe::interceptType::reduceStepAndRestart; } } else if (key == "parameterUpdater") { @@ -61,7 +53,7 @@ void setMinibatchOption(const std::pair &p // this might have been set previously if there was an updater-specific option before optimizer->parameterUpdater = std::make_unique(); } else { - logmessage(LOGLVL_WARNING, "Ignoring unknown Minibatch parameterUpdater %s.", val.c_str()); + logmessage(loglevel::warning, "Ignoring unknown Minibatch parameterUpdater %s.", val.c_str()); } } else if (key == "learningRateInterpMode") { if (val == "linear") { @@ -79,11 +71,11 @@ void setMinibatchOption(const std::pair &p } else if (key == "endLearningRate") { optimizer->learningRateUpdater->setEndLearningRate(std::stod(val)); } else { - logmessage(LOGLVL_WARNING, "Ignoring unknown optimization option %s.", key.c_str()); + logmessage(loglevel::warning, "Ignoring unknown optimization option %s.", key.c_str()); return; } - logmessage(LOGLVL_DEBUG, "Set optimization option %s to %s.", key.c_str(), val.c_str()); + logmessage(loglevel::debug, "Set optimization option %s to %s.", key.c_str(), val.c_str()); } std::tuple > runMinibatchOptimization(MinibatchOptimizationProblem *problem) { diff --git a/src/parpeoptimization/multiStartOptimization.cpp b/src/parpeoptimization/multiStartOptimization.cpp index 4eb92c56f..82b6aadd5 100644 --- a/src/parpeoptimization/multiStartOptimization.cpp +++ b/src/parpeoptimization/multiStartOptimization.cpp @@ -33,7 +33,7 @@ void MultiStartOptimization::run() { void MultiStartOptimization::runMultiThreaded() { - logmessage(LOGLVL_DEBUG, + logmessage(loglevel::debug, "Starting runParallelMultiStartOptimization with %d starts", numberOfStarts); @@ -55,7 +55,7 @@ void MultiStartOptimization::runMultiThreaded() for (int ms = 0; ms < numberOfStarts; ++ms) { ++lastStartIdx; - logmessage(LOGLVL_DEBUG, + logmessage(loglevel::debug, "Spawning thread for local optimization #%d (%d)", lastStartIdx, ms); @@ -92,10 +92,10 @@ void MultiStartOptimization::runMultiThreaded() if (*threadStatus == 0 || !restartOnFailure) { if (*threadStatus == 0) { - logmessage(LOGLVL_DEBUG, + logmessage(loglevel::debug, "Thread ms #%d finished successfully", ms); } else { - logmessage(LOGLVL_DEBUG, "Thread ms #%d finished " + logmessage(loglevel::debug, "Thread ms #%d finished " "unsuccessfully. Not trying " "new starting point.", ms); @@ -104,7 +104,7 @@ void MultiStartOptimization::runMultiThreaded() } #ifndef __APPLE__ else { - logmessage(LOGLVL_WARNING, "Thread ms #%d finished " + logmessage(loglevel::warning, "Thread ms #%d finished " "unsuccessfully... trying new " "starting point", ms); @@ -112,7 +112,7 @@ void MultiStartOptimization::runMultiThreaded() localProblems[ms] = msProblem.getLocalProblem(lastStartIdx).release(); logmessage( - LOGLVL_DEBUG, + loglevel::debug, "Spawning thread for local optimization #%d (%d)", lastStartIdx, ms); auto ret = pthread_create( @@ -132,14 +132,14 @@ void MultiStartOptimization::runMultiThreaded() sleep(1); // TODO: replace by condition via ThreadWrapper } - logmessage(LOGLVL_DEBUG, "runParallelMultiStartOptimization finished"); + logmessage(loglevel::debug, "runParallelMultiStartOptimization finished"); pthread_attr_destroy(&threadAttr); } void MultiStartOptimization::runSingleThreaded() { - logmessage(LOGLVL_DEBUG, + logmessage(loglevel::debug, "Starting runParallelMultiStartOptimization with %d starts sequentially", numberOfStarts); @@ -156,17 +156,17 @@ void MultiStartOptimization::runSingleThreaded() auto problem = msProblem.getLocalProblem(first_start_idx + ms); auto result = getLocalOptimum(problem.get()); if(result) { - logmessage(LOGLVL_DEBUG, + logmessage(loglevel::debug, "Start #%d finished successfully", ms); ++numSucceeded; } else { - logmessage(LOGLVL_DEBUG, "Thread ms #%d finished " + logmessage(loglevel::debug, "Thread ms #%d finished " "unsuccessfully.",ms); } ++ms; } - logmessage(LOGLVL_DEBUG, "runParallelMultiStartOptimization finished"); + logmessage(loglevel::debug, "runParallelMultiStartOptimization finished"); } void MultiStartOptimization::setRunParallel(bool runParallel) diff --git a/src/parpeoptimization/optimizationOptions.cpp b/src/parpeoptimization/optimizationOptions.cpp index 2775c6756..7f02cab38 100644 --- a/src/parpeoptimization/optimizationOptions.cpp +++ b/src/parpeoptimization/optimizationOptions.cpp @@ -178,7 +178,7 @@ std::vector OptimizationOptions::getStartingPoint(H5::H5File const& file [[maybe_unused]] auto lock = hdf5MutexGetLock(); if (!file.nameExists(path)) { - logmessage(LOGLVL_DEBUG, "No initial parameters found in %s", path); + logmessage(loglevel::debug, "No initial parameters found in %s", path); return startingPoint; } @@ -192,13 +192,13 @@ std::vector OptimizationOptions::getStartingPoint(H5::H5File const& file hsize_t dims[ndims]; dataspace.getSimpleExtentDims(dims); if (dims[1] < static_cast(index)) { - logmessage(LOGLVL_ERROR, + logmessage(loglevel::debug, "Requested starting point index %d out of bounds (%d)", index, static_cast(dims[1])); return startingPoint; } - logmessage(LOGLVL_INFO, "Reading random initial theta %d from %s", + logmessage(loglevel::info, "Reading random initial theta %d from %s", index, path); startingPoint.resize(dims[0]); diff --git a/src/parpeoptimization/optimizationProblem.cpp b/src/parpeoptimization/optimizationProblem.cpp index bc0daf5c7..9ff299802 100644 --- a/src/parpeoptimization/optimizationProblem.cpp +++ b/src/parpeoptimization/optimizationProblem.cpp @@ -124,11 +124,11 @@ void optimizationProblemGradientCheckMultiEps(OptimizationProblem *problem, absErr_best = abs_err; } } - loglevel ll = LOGLVL_INFO; + loglevel ll = loglevel::info; if (fabs(regRelErr_best) > 1e-3) - ll = LOGLVL_WARNING; + ll = loglevel::warning; if (std::isnan(curGrad) || fabs(regRelErr_best) > 1e-2) - ll = LOGLVL_ERROR; + ll = loglevel::error; logmessage(ll, "%-25s (%d) g: %12.6g fd_c: %12.6g |Δ/fd_c|: %.6e " "|Δ|: %12.6g ϵ: %12.6g ", parameter_ids.empty()?"":parameter_ids[curInd].c_str(), @@ -207,11 +207,11 @@ void optimizationProblemGradientCheck(OptimizationProblem *problem, double reg = 1e-5; double regRelError = (curGrad - fd_c) / (ff + reg); - loglevel ll = LOGLVL_INFO; + loglevel ll = loglevel::info; if (fabs(regRelError) > 1e-3) - ll = LOGLVL_WARNING; + ll = loglevel::warning; if (fabs(regRelError) > 1e-2) - ll = LOGLVL_ERROR; + ll = loglevel::error; logmessage(ll, "%5d g: %12.6g fd_c: %12.6g Δ/ff: %.6e f: %12.6g", curInd, curGrad, fd_c, regRelError, ff); @@ -326,7 +326,7 @@ int OptimizationReporter::numParameters() const { void OptimizationReporter::printObjectiveFunctionFailureMessage() const { if (logger_) - logger_->logmessage(LOGLVL_ERROR, "Objective function evaluation failed!"); + logger_->logmessage(loglevel::error, "Objective function evaluation failed!"); } bool OptimizationReporter::starting(gsl::span initialParameters) const { @@ -353,7 +353,7 @@ bool OptimizationReporter::iterationFinished(gsl::span parameters, double wallTimeOptim = wall_timer_.getTotal(); if (logger_) - logger_->logmessage(LOGLVL_INFO, + logger_->logmessage(loglevel::info, "iter: %d cost: %g time_iter: wall: %gs cpu: %gs time_optim: wall: %gs cpu: %gs", num_iterations_, objectiveFunctionValue, wallTimeIter, cpu_time_iteration_sec_, wallTimeOptim, @@ -410,13 +410,13 @@ void OptimizationReporter::finished(double optimalCost, // the optimal value is not from the cached parameters and we did not get // the optimal parameters from the optimizer. since we don't know them, rather set to nan if (logger_) - logger_->logmessage(LOGLVL_INFO, "cachedCost != optimalCost && parameters.empty()"); + logger_->logmessage(loglevel::info, "cachedCost != optimalCost && parameters.empty()"); cached_parameters_.assign(cached_parameters_.size(), NAN); cached_cost_ = optimalCost; } // else: our cached parameters were better. use those if (logger_) - logger_->logmessage(LOGLVL_INFO, "Optimizer status %d, final llh: %e, " + logger_->logmessage(loglevel::info, "Optimizer status %d, final llh: %e, " "time: wall: %f cpu: %f.", exitStatus, cached_cost_, timeElapsed, cpu_time_total_sec_); diff --git a/src/parpeoptimization/optimizationResultWriter.cpp b/src/parpeoptimization/optimizationResultWriter.cpp index 85be40c90..97fb10ba1 100644 --- a/src/parpeoptimization/optimizationResultWriter.cpp +++ b/src/parpeoptimization/optimizationResultWriter.cpp @@ -26,7 +26,7 @@ OptimizationResultWriter::OptimizationResultWriter(const std::string &filename, bool overwrite, std::string rootPath) : rootPath(std::move(rootPath)) { - logmessage(LOGLVL_DEBUG, "Writing results to %s.", filename.c_str()); + logmessage(loglevel::debug, "Writing results to %s.", filename.c_str()); file = hdf5CreateFile(filename, overwrite); diff --git a/templates/main.cpp b/templates/main.cpp index e1722bb9e..304e6b1a5 100644 --- a/templates/main.cpp +++ b/templates/main.cpp @@ -19,11 +19,11 @@ class MyOptimizationApplication : public parpe::OptimizationApplication { public: using OptimizationApplication::OptimizationApplication; - virtual void initProblem(std::string inFileArgument, - std::string outFileArgument) override + virtual void initProblem(std::string const& inFileArgument, + std::string const& outFileArgument) override { if (!isWorker()) - parpe::logmessage(parpe::LOGLVL_INFO, + parpe::logmessage(parpe::loglevel::info, "Reading options and data from '%s'.", inFileArgument.c_str()); diff --git a/templates/main_debug.cpp b/templates/main_debug.cpp index 608543326..64129de14 100644 --- a/templates/main_debug.cpp +++ b/templates/main_debug.cpp @@ -20,7 +20,7 @@ int main(int argc, char **argv) { std::string inFileArgument = "/home/dweindl/src/benchmarkProblem/20190205221009_Speedy_v4_Jan2019_generic_degradation_r415549/Speedy_v4_Jan2019_generic_degradation_r415549.bak.h5"; - parpe::logmessage(parpe::LOGLVL_INFO, + parpe::logmessage(parpe::loglevel::info, "Reading options and data from '%s'.", inFileArgument.c_str()); diff --git a/templates/main_nominal.cpp b/templates/main_nominal.cpp index 8927f7b6c..0c0be6047 100644 --- a/templates/main_nominal.cpp +++ b/templates/main_nominal.cpp @@ -38,7 +38,7 @@ int main(int argc, char **argv) { } - parpe::logmessage(parpe::LOGLVL_INFO, + parpe::logmessage(parpe::loglevel::info, "Reading options and data from '%s'.", inFileArgument.c_str()); @@ -69,8 +69,8 @@ int main(int argc, char **argv) { auto maxAbsGradient = *std::max_element(gradient.begin(), gradient.end()); std::for_each(gradient.begin(), gradient.end(), [fval](double &d){ d /= std::fabs(fval); }); auto maxRelGradient = *std::max_element(gradient.begin(), gradient.end()); - parpe::logmessage(LOGLVL_INFO, "Max(abs(grad)) = " + std::to_string(maxAbsGradient)); - parpe::logmessage(LOGLVL_INFO, "Max(abs(grad)/fval) = " + std::to_string(maxRelGradient)); + parpe::logmessage(loglevel::info, "Max(abs(grad)) = " + std::to_string(maxAbsGradient)); + parpe::logmessage(loglevel::info, "Max(abs(grad)/fval) = " + std::to_string(maxRelGradient)); - parpe::logmessage(LOGLVL_INFO, "Likelihood: " + std::to_string(fval)); + parpe::logmessage(loglevel::info, "Likelihood: " + std::to_string(fval)); } diff --git a/tests/parpecommon/commonTests.cpp b/tests/parpecommon/commonTests.cpp index 64b7fba13..1437df30c 100644 --- a/tests/parpecommon/commonTests.cpp +++ b/tests/parpecommon/commonTests.cpp @@ -154,7 +154,7 @@ TEST(Logging, MessageIsPrinted) { captureStreamToString([](){ parpe::warning("bla"); parpe::error("bla"); - parpe::logmessage(parpe::LOGLVL_ERROR, "error"); + parpe::logmessage(parpe::loglevel::error, "error"); }, stdout); } From d335847c11a0688d1320216394d20ce3edb26fc0 Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Thu, 4 Nov 2021 09:28:46 +0100 Subject: [PATCH 08/11] Remove obsolete runInParallelAndWaitForFinish --- include/parpecommon/misc.h | 5 ----- src/parpecommon/misc.cpp | 22 ---------------------- tests/parpecommon/commonTests.cpp | 10 ---------- 3 files changed, 37 deletions(-) diff --git a/include/parpecommon/misc.h b/include/parpecommon/misc.h index 92077d76b..3b6b259f5 100644 --- a/include/parpecommon/misc.h +++ b/include/parpecommon/misc.h @@ -85,11 +85,6 @@ class CpuTimer void strFormatCurrentLocaltime(gsl::span buffer, const char* format); -void -runInParallelAndWaitForFinish(void* (*function)(void*), - void** args, - int numArgs); - void printBacktrace(int nMaxFrames = 20); diff --git a/src/parpecommon/misc.cpp b/src/parpecommon/misc.cpp index 8806a8092..7509c36cd 100644 --- a/src/parpecommon/misc.cpp +++ b/src/parpecommon/misc.cpp @@ -47,28 +47,6 @@ void strFormatCurrentLocaltime(gsl::span buffer, const char *format) { strftime(buffer.data(), buffer.size(), format, &local_time); } -void runInParallelAndWaitForFinish(void *(*function)(void *), void **args, - int numArgs) { - // create threads - pthread_attr_t threadAttr; - pthread_attr_init(&threadAttr); - pthread_attr_setdetachstate(&threadAttr, PTHREAD_CREATE_JOINABLE); - - auto threads = static_cast(alloca(numArgs * sizeof(pthread_t))); - - for (int i = 0; i < numArgs; ++i) { - pthread_create(&threads[i], &threadAttr, function, args[i]); - } - pthread_attr_destroy(&threadAttr); - - // wait for finish - for (int i = 0; i < numArgs; ++i) { - pthread_join(threads[i], nullptr); - logmessage(loglevel::debug, "Thread i %d finished", i); - } - logmessage(loglevel::debug, "All k threads finished."); -} - void printBacktrace(int nMaxFrames) { void *array[nMaxFrames]; size_t size; diff --git a/tests/parpecommon/commonTests.cpp b/tests/parpecommon/commonTests.cpp index 1437df30c..43f12888a 100644 --- a/tests/parpecommon/commonTests.cpp +++ b/tests/parpecommon/commonTests.cpp @@ -126,16 +126,6 @@ TEST(Common, Mpi) { #endif -TEST(Common, RunInParallelAndWaitForFinish) { - captureStreamToString([](){ - const int numThreads = 15; - void* args[numThreads]; - - parpe::runInParallelAndWaitForFinish( - [](void *) -> void* { return nullptr; }, args, numThreads); - }, stdout); -} - TEST(Common, StrFormatCurrentLocaltime) { int buflen = 10; char buf[buflen]; From 82b2ba9778df152dc436c3c8e4bb1f20f2bc8212 Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Thu, 4 Nov 2021 10:22:46 +0100 Subject: [PATCH 09/11] Various cleanup --- .../parpeamici/multiConditionDataProvider.h | 2 +- include/parpecommon/misc.h | 2 - include/parpecommon/parpeException.h | 4 +- .../parpeoptimization/minibatchOptimization.h | 2 +- src/parpeamici/hierarchicalOptimization.cpp | 24 ++++---- ...ptimizationAnalyticalParameterProvider.cpp | 12 ++-- src/parpeamici/multiConditionProblem.cpp | 13 ++-- src/parpeamici/standaloneSimulator.cpp | 61 +++++++++---------- src/parpecommon/misc.cpp | 9 --- 9 files changed, 56 insertions(+), 73 deletions(-) diff --git a/include/parpeamici/multiConditionDataProvider.h b/include/parpeamici/multiConditionDataProvider.h index d3dbe62f6..a344659d2 100644 --- a/include/parpeamici/multiConditionDataProvider.h +++ b/include/parpeamici/multiConditionDataProvider.h @@ -308,7 +308,7 @@ class MultiConditionDataProviderHDF5 : public MultiConditionDataProvider virtual void readFixedSimulationParameters(int conditionIdx, gsl::span buffer) const; - virtual std::unique_ptr getExperimentalDataForCondition( + std::unique_ptr getExperimentalDataForCondition( int conditionIdx) const override; std::vector> getAllMeasurements() const override; diff --git a/include/parpecommon/misc.h b/include/parpecommon/misc.h index 3b6b259f5..c092eccdc 100644 --- a/include/parpecommon/misc.h +++ b/include/parpecommon/misc.h @@ -80,8 +80,6 @@ class CpuTimer abort(); \ } -// void printMatlabArray(const double *buffer, int len); - void strFormatCurrentLocaltime(gsl::span buffer, const char* format); diff --git a/include/parpecommon/parpeException.h b/include/parpecommon/parpeException.h index 0ef006eb0..1a007642a 100644 --- a/include/parpecommon/parpeException.h +++ b/include/parpecommon/parpeException.h @@ -12,9 +12,9 @@ class ParPEException : public std::exception { explicit ParPEException(std::string message); - virtual ~ParPEException() throw() {} + ~ParPEException() throw() override = default; - virtual const char *what() const noexcept override; + const char *what() const noexcept override; private: std::string message; diff --git a/include/parpeoptimization/minibatchOptimization.h b/include/parpeoptimization/minibatchOptimization.h index 36bb265a0..c7bee47a3 100755 --- a/include/parpeoptimization/minibatchOptimization.h +++ b/include/parpeoptimization/minibatchOptimization.h @@ -632,7 +632,7 @@ class MinibatchOptimizer { finalFail = true; // If nothing helps and no cold restart wanted: cancel optimization - if (initialFail or (finalFail and interceptor != interceptType::reduceStepAndRestart)) { + if (initialFail || (finalFail && interceptor != interceptType::reduceStepAndRestart)) { logger->logmessage(loglevel::debug, "Failure at initial point of optimization. Stopping."); return functionEvaluationFailure; } diff --git a/src/parpeamici/hierarchicalOptimization.cpp b/src/parpeamici/hierarchicalOptimization.cpp index 60caa352b..2afd76db0 100644 --- a/src/parpeamici/hierarchicalOptimization.cpp +++ b/src/parpeamici/hierarchicalOptimization.cpp @@ -637,11 +637,11 @@ std::unique_ptr HierarchicalOptimizationProblemWrapper::getReporter() const { auto innerReporter = wrapped_problem_->getReporter(); - auto outerReporter = std::unique_ptr( - new HierarchicalOptimizationReporter( - dynamic_cast(cost_fun_.get()), - std::move(innerReporter->result_writer_), - std::make_unique(*logger_))); + auto outerReporter = std::make_unique( + dynamic_cast(cost_fun_.get()), + std::move(innerReporter->result_writer_), + std::make_unique(*logger_) + ); return outerReporter; } @@ -715,7 +715,7 @@ computeAnalyticalScalings( double denominator = 0.0; for (auto const conditionIdx : dependentConditions) { - auto dependentObservables = + auto const& dependentObservables = scalingReader.getObservablesForParameter(scalingIdx, conditionIdx); int numTimepoints = measurements[conditionIdx].size() / numObservables; @@ -808,7 +808,7 @@ computeAnalyticalOffsets( double denominator = 0.0; for (auto const conditionIdx : dependentConditions) { - auto dependentObservables = + auto const& dependentObservables = offsetReader.getObservablesForParameter(offsetIdx, conditionIdx); int numTimepoints = measurements[conditionIdx].size() / numObservables; for (auto const observableIdx : dependentObservables) { @@ -868,7 +868,7 @@ computeAnalyticalSigmas( double maxAbsMeasurement = 0.0; for (auto const conditionIdx : dependentConditions) { - auto dependentObservables = + auto const& dependentObservables = sigmaReader.getObservablesForParameter(sigmaIdx, conditionIdx); int numTimepoints = measurements[conditionIdx].size() / numObservables; for (auto const observableIdx : dependentObservables) { @@ -940,7 +940,7 @@ applyOptimalScaling(int scalingIdx, scalingReader.getConditionsForParameter(scalingIdx); for (auto const conditionIdx : dependentConditions) { int numTimepoints = modelOutputs[conditionIdx].size() / numObservables; - auto dependentObservables = + auto const& dependentObservables = scalingReader.getObservablesForParameter(scalingIdx, conditionIdx); for (auto const observableIdx : dependentObservables) { if (observableIdx >= numObservables) { @@ -970,7 +970,7 @@ applyOptimalOffset(int offsetIdx, offsetReader.getConditionsForParameter(offsetIdx); for (auto const conditionIdx : dependentConditions) { int numTimepoints = modelOutputs[conditionIdx].size() / numObservables; - auto dependentObservables = + auto const& dependentObservables = offsetReader.getObservablesForParameter(offsetIdx, conditionIdx); for (auto const observableIdx : dependentObservables) { if (observableIdx >= numObservables) { @@ -1137,9 +1137,9 @@ HierarchicalOptimizationReporter::HierarchicalOptimizationReporter( HierarchicalOptimizationWrapper* gradFun, std::unique_ptr rw, std::unique_ptr logger) - : OptimizationReporter(gradFun, std::move(rw), std::move(logger)) + : OptimizationReporter(gradFun, std::move(rw), std::move(logger)), + hierarchical_wrapper_(gradFun) { - hierarchical_wrapper_ = gradFun; } FunctionEvaluationStatus diff --git a/src/parpeamici/hierarchicalOptimizationAnalyticalParameterProvider.cpp b/src/parpeamici/hierarchicalOptimizationAnalyticalParameterProvider.cpp index 76fd3887b..8648c8845 100644 --- a/src/parpeamici/hierarchicalOptimizationAnalyticalParameterProvider.cpp +++ b/src/parpeamici/hierarchicalOptimizationAnalyticalParameterProvider.cpp @@ -46,8 +46,8 @@ AnalyticalParameterHdf5Reader::getConditionsForParameter( { std::vector result; result.reserve(mapping[parameterIndex].size()); - for (auto const& kvp : mapping[parameterIndex]) - result.push_back(kvp.first); + for (auto const& [k, v] : mapping[parameterIndex]) + result.push_back(k); return result; } @@ -69,8 +69,7 @@ AnalyticalParameterHdf5Reader::getOptimizationParameterIndices() const auto dataset = file.openDataSet(analyticalParameterIndicesPath); auto dataspace = dataset.getSpace(); - auto ndims = dataspace.getSimpleExtentNdims(); - if (ndims != 1) + if (dataspace.getSimpleExtentNdims() != 1) throw ParPEException( "Invalid dimension in getOptimizationParameterIndices."); hsize_t numScalings = 0; @@ -98,8 +97,7 @@ AnalyticalParameterHdf5Reader::getNumAnalyticalParameters() const if (file.nameExists(analyticalParameterIndicesPath)) { auto dataset = file.openDataSet(analyticalParameterIndicesPath); auto dataspace = dataset.getSpace(); - auto ndims = dataspace.getSimpleExtentNdims(); - if (ndims != 1) + if (dataspace.getSimpleExtentNdims() != 1) throw ParPEException( "Invalid dimension in getOptimizationParameterIndices."); dataspace.getSimpleExtentDims(&numAnalyticalParameters); @@ -135,7 +133,7 @@ AnalyticalParameterHdf5Reader::readParameterConditionObservableMappingFromFile() int observableIdx = rawMap[i * nCols + observableCol]; mapping[scalingIdx][conditionIdx].push_back(observableIdx); } - } catch (H5::FileIException&) { + } catch (H5::FileIException const&) { return; } H5_RESTORE_ERROR_HANDLER; diff --git a/src/parpeamici/multiConditionProblem.cpp b/src/parpeamici/multiConditionProblem.cpp index 72a2ab4a3..40303c9fd 100644 --- a/src/parpeamici/multiConditionProblem.cpp +++ b/src/parpeamici/multiConditionProblem.cpp @@ -197,9 +197,8 @@ std::unique_ptr MultiConditionProblemMultiStartOptimization data_provider_->getHdf5File(), multiStartIndex)); if(options_.hierarchicalOptimization) - return std::unique_ptr( - new parpe::HierarchicalOptimizationProblemWrapper( - std::move(problem), data_provider_)); + return std::make_unique( + std::move(problem), data_provider_); return problem; } @@ -540,10 +539,10 @@ FunctionEvaluationStatus getModelOutputsAndSigmas( job->recvBuffer.data(), job->recvBuffer.size()); std::vector().swap(job->recvBuffer); // free buffer - for (auto const& result : results) { - errors += result.second.status; - modelOutputs[result.first] = result.second.modelOutput; - modelSigmas[result.first] = result.second.modelSigmas; + for (auto const& [condition_idx, result]: results) { + errors += result.status; + modelOutputs[condition_idx] = result.modelOutput; + modelSigmas[condition_idx] = result.modelSigmas; } }; diff --git a/src/parpeamici/standaloneSimulator.cpp b/src/parpeamici/standaloneSimulator.cpp index 8e8da1ff9..f8d571e59 100644 --- a/src/parpeamici/standaloneSimulator.cpp +++ b/src/parpeamici/standaloneSimulator.cpp @@ -169,14 +169,14 @@ StandaloneSimulator::run(const std::string& resultFile, AmiciSimulationRunner::AmiciResultPackageSimple>>( job.recvBuffer.data(), job.recvBuffer.size()); std::vector().swap(job.recvBuffer); // free buffer - for (auto& result : results) { - swap(simulationResults[result.first], result.second); - modelOutputs[result.first] = - simulationResults[result.first].modelOutput; - modelSigmas[result.first] = - simulationResults[result.first].modelSigmas; - modelStates[result.first] = - simulationResults[result.first].modelStates; + for (auto& [condition_idx, result] : results) { + swap(simulationResults[condition_idx], result); + modelOutputs[condition_idx] = + simulationResults[condition_idx].modelOutput; + modelSigmas[condition_idx] = + simulationResults[condition_idx].modelSigmas; + modelStates[condition_idx] = + simulationResults[condition_idx].modelStates; } } @@ -195,8 +195,7 @@ StandaloneSimulator::run(const std::string& resultFile, auto sigmas = hierarchical.computeAnalyticalSigmas( allMeasurements, modelOutputs); if (!hierarchical.getSigmaParameterIndices().empty()) { - hierarchical.fillInAnalyticalSigmas(modelSigmas, - sigmas); + hierarchical.fillInAnalyticalSigmas(modelSigmas, sigmas); } // save parameters @@ -295,34 +294,31 @@ StandaloneSimulator::run(const std::string& resultFile, job->recvBuffer.data(), job->recvBuffer.size()); std::vector().swap(job->recvBuffer); // free buffer - for (auto const& result : results) { - errors += result.second.status; - int conditionIdx = result.first; + for (auto const& [condition_idx, result] : results) { + errors += result.status; auto edata = dataProvider->getExperimentalDataForCondition( - conditionIdx); + condition_idx); - rw.saveTimepoints(edata->getTimepoints(), - conditionIdx); - if(!result.second.modelStates.empty()) { - rw.saveStates(result.second.modelStates, edata->nt(), - model->nx_rdata, conditionIdx); + rw.saveTimepoints(edata->getTimepoints(), condition_idx); + if(!result.modelStates.empty()) { + rw.saveStates(result.modelStates, edata->nt(), + model->nx_rdata, condition_idx); } rw.saveMeasurements(edata->getObservedData(), edata->nt(), edata->nytrue(), - conditionIdx); - rw.saveModelOutputs(result.second.modelOutput, + condition_idx); + rw.saveModelOutputs(result.modelOutput, edata->nt(), model->nytrue, - conditionIdx); - rw.saveLikelihood(result.second.llh, - conditionIdx); + condition_idx); + rw.saveLikelihood(result.llh, condition_idx); // to save simulation parameters dataProvider->updateSimulationParametersAndScale( - conditionIdx, parameterValues, *model); - rw.saveParameters(model->getParameters(), conditionIdx); + condition_idx, parameterValues, *model); + rw.saveParameters(model->getParameters(), condition_idx); } }; @@ -453,19 +449,20 @@ getFinalParameters(std::string const& startIndex, H5::H5File const& file) } --iteration; // last one did not exist - auto bestPairLast = getFunctionEvaluationWithMinimalCost( + auto [costFunEvaluationIndexLast, costFunValLast] = getFunctionEvaluationWithMinimalCost( iterationPath + std::to_string(iteration) + "/costFunCost", file); - int costFunEvaluationIndex = bestPairLast.first; + int costFunEvaluationIndex = costFunEvaluationIndexLast; if (iteration > 0) { // If job got killed during line search, the final point of the previous // iteration might be better than any line search steps of the current // iteration - auto bestPairSecondLast = getFunctionEvaluationWithMinimalCost( - iterationPath + std::to_string(iteration - 1) + "/costFunCost", file); - if (bestPairSecondLast.second < bestPairLast.second) { + auto [costFunEvaluationIndexSecondLast, costFunValSecondLast] = + getFunctionEvaluationWithMinimalCost( + iterationPath + std::to_string(iteration - 1) + "/costFunCost", file); + if (costFunValSecondLast < costFunValLast) { --iteration; - costFunEvaluationIndex = bestPairSecondLast.first; + costFunEvaluationIndex = costFunEvaluationIndexSecondLast; } } diff --git a/src/parpecommon/misc.cpp b/src/parpecommon/misc.cpp index 7509c36cd..1ac4dc2a1 100644 --- a/src/parpecommon/misc.cpp +++ b/src/parpecommon/misc.cpp @@ -1,7 +1,6 @@ #include #include -#include #include #include #include @@ -27,14 +26,6 @@ #include #endif -// void printMatlabArray(const double *buffer, int len) -//{ -// printf("["); -// printfArray(buffer, len - 1, "%e, "); -// printf("%e]\n", buffer[len - 1]); -// fflush(stdout); -//} - namespace parpe { From 9559ce329b98fff3ddefc4237605e79f68cb20c2 Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Fri, 5 Nov 2021 09:42:00 +0100 Subject: [PATCH 10/11] Add option to specify maximum number of threads for multi-start optimization (#361) So far, this was equal to the number of local optimizations, but that wouldn't scale well. This decouples the number parallel local optimizations from the total number of optimizations. This is controlled by setting environment variable `PARPE_NUM_PARALLEL_STARTS` to the number of optimizations that should be run in parallel. * Now requires Boost.Thread library * Update dependencies * Remove obsolete functions * getLocalOptimumThreadWrapper * MultiStartOptimization::createLocalOptimizationProblems Closes #359, closes #84 --- README.md | 20 +- doc/optimizationApplication.md | 7 + .../multiStartOptimization.h | 7 +- .../parpeoptimization/optimizationProblem.h | 9 - src/parpeamici/CMakeLists.txt | 2 +- .../multiStartOptimization.cpp | 194 ++++++++---------- src/parpeoptimization/optimizationProblem.cpp | 7 - 7 files changed, 115 insertions(+), 131 deletions(-) diff --git a/README.md b/README.md index 0fe3d277d..80851602d 100644 --- a/README.md +++ b/README.md @@ -55,6 +55,7 @@ For full functionality, parPE requires the following libraries: * IPOPT (>= 1.2.7) (requires coinhsl) * CERES (>=1.13) ([requires Eigen](http://ceres-solver.org/installation.html#dependencies)) +* [Boost](https://www.boost.org/) (serialization, thread) * HDF5 (>= 1.10) * CBLAS compatible BLAS (libcblas, Intel MKL, ...) * [AMICI](https://github.com/ICB-DCM/AMICI) (included in this repository) @@ -64,10 +65,21 @@ For full functionality, parPE requires the following libraries: On Debian-based systems, dependencies can be installed via: ```shell -sudo apt-get install build-essential cmake cmake-curses-gui \ - coinor-libipopt-dev curl gfortran \ - libblas-dev libboost-serialization-dev libceres-dev \ - libmpich-dev libhdf5-dev libpython3-dev python3-pip +sudo apt-get install \ + build-essential \ + cmake \ + cmake-curses-gui \ + coinor-libipopt-dev \ + curl \ + gfortran \ + libblas-dev \ + libboost-serialization-dev \ + libboost-thread-dev \ + libceres-dev \ + libmpich-dev \ + libhdf5-dev \ + libpython3-dev \ + python3-pip ``` Scripts to fetch and build the remaining dependencies are provided in diff --git a/doc/optimizationApplication.md b/doc/optimizationApplication.md index 6a7c1231f..5c8b36557 100644 --- a/doc/optimizationApplication.md +++ b/doc/optimizationApplication.md @@ -44,6 +44,13 @@ Run the created executable with the `-h`/`--help` argument. ## Environment variables +- **PARPE_NUM_PARALLEL_STARTS** + + Setting `PARPE_NUM_PARALLEL_STARTS=n` will create a maximum of `n` threads + for concurrent local optimizations during multi-start optimization. + If unset, this defaults to the number of concurrent threads supported by + hardware. + - **PARPE_LOG_SIMULATIONS** Setting `PARPE_LOG_SIMULATIONS=1` will cause every single AMICI simulation to be saved in the result files. diff --git a/include/parpeoptimization/multiStartOptimization.h b/include/parpeoptimization/multiStartOptimization.h index a8e75e2a9..eba5d81d4 100644 --- a/include/parpeoptimization/multiStartOptimization.h +++ b/include/parpeoptimization/multiStartOptimization.h @@ -45,7 +45,7 @@ class MultiStartOptimization { /** * @brief Run all optimizations in parallel, each in a dedicated thread */ - void runMultiThreaded(); + void runMultiThreaded() const; /** * @brief Run optimizations sequentially @@ -58,8 +58,11 @@ class MultiStartOptimization { void setRunParallel(bool runParallel); private: + /** + * @brief Optimize local problem for the given start index + */ + int runStart(int start_idx) const; - std::vector createLocalOptimizationProblems(); /** Optimization problem to be solved */ MultiStartOptimizationProblem& msProblem; diff --git a/include/parpeoptimization/optimizationProblem.h b/include/parpeoptimization/optimizationProblem.h index 4e52dcda5..68524c879 100644 --- a/include/parpeoptimization/optimizationProblem.h +++ b/include/parpeoptimization/optimizationProblem.h @@ -217,15 +217,6 @@ class OptimizationProblemImpl: public OptimizationProblem { int getLocalOptimum(OptimizationProblem *problem); -/** - * @brief getLocalOptimumThreadWrapper wrapper for using getLocalOptimum with - * pThreads. - * @param optimizationProblemVp - * @return Pointer to int indicating status. 0: success, != 0: failure - */ - -void *getLocalOptimumThreadWrapper(void *optimizationProblemVp); - void optimizationProblemGradientCheckMultiEps(OptimizationProblem *problem, int numParameterIndicesToCheck); diff --git a/src/parpeamici/CMakeLists.txt b/src/parpeamici/CMakeLists.txt index dd50e3820..e85c2566e 100644 --- a/src/parpeamici/CMakeLists.txt +++ b/src/parpeamici/CMakeLists.txt @@ -9,7 +9,7 @@ else() # For python module we need -fPIC which is only the case with shared libs set(Boost_USE_STATIC_LIBS FALSE) endif() -find_package(Boost COMPONENTS serialization REQUIRED) +find_package(Boost COMPONENTS serialization thread REQUIRED) project(parpeamici) diff --git a/src/parpeoptimization/multiStartOptimization.cpp b/src/parpeoptimization/multiStartOptimization.cpp index 82b6aadd5..8580d4492 100644 --- a/src/parpeoptimization/multiStartOptimization.cpp +++ b/src/parpeoptimization/multiStartOptimization.cpp @@ -1,20 +1,20 @@ +#include + #include #include #include #include #include -#include -#include -#include +#include namespace parpe { MultiStartOptimization::MultiStartOptimization( - MultiStartOptimizationProblem &problem, - bool runParallel, - int first_start_idx) + MultiStartOptimizationProblem &problem, + bool runParallel, + int first_start_idx) : msProblem(problem), numberOfStarts(problem.getNumberOfStarts()), restartOnFailure(problem.restartOnFailure()), @@ -31,110 +31,90 @@ void MultiStartOptimization::run() { runSingleThreaded(); } -void MultiStartOptimization::runMultiThreaded() +void MultiStartOptimization::runMultiThreaded() const { - logmessage(loglevel::debug, - "Starting runParallelMultiStartOptimization with %d starts", - numberOfStarts); - - std::vector localOptimizationThreads(numberOfStarts); - - std::vector localProblems = - createLocalOptimizationProblems(); - - if(localProblems.size() != static_cast::size_type>(numberOfStarts)) { - throw ParPEException("Number of problems does not match number of specific starts."); + // Determine thread pool size + // (note that hardware_concurrency() may return 0) + auto num_threads = std::max(std::thread::hardware_concurrency(), 1U); + if(auto env = std::getenv("PARPE_NUM_PARALLEL_STARTS")) { + num_threads = std::stoi(env); } + num_threads = std::min(num_threads, + static_cast(numberOfStarts)); - pthread_attr_t threadAttr; - pthread_attr_init(&threadAttr); - pthread_attr_setdetachstate(&threadAttr, PTHREAD_CREATE_JOINABLE); - - int lastStartIdx = -1; - // launch threads for required number of starts - for (int ms = 0; ms < numberOfStarts; ++ms) { + logmessage(loglevel::debug, + "Running %d starts using %d threads", + numberOfStarts, num_threads); + + boost::asio::thread_pool pool(num_threads); + + auto num_successful_starts = 0; + auto num_finished_starts = 0; + auto lastStartIdx = -1; + + // submit the minimum number of starts + std::vector>> futures; + futures.reserve(numberOfStarts); + for (int start_idx = 0; start_idx < numberOfStarts; ++start_idx) { + futures.push_back( + boost::asio::post( + pool, + std::packaged_task()>([this, start_idx] { + return std::make_pair(start_idx, runStart(start_idx)); + }))); ++lastStartIdx; - - logmessage(loglevel::debug, - "Spawning thread for local optimization #%d (%d)", - lastStartIdx, ms); - - auto ret = pthread_create( - &localOptimizationThreads.at(ms), &threadAttr, - getLocalOptimumThreadWrapper, - static_cast(localProblems[ms])); - if(ret) { - throw ParPEException("Failure during thread creation: " - + std::to_string(ret)); - } } - int numCompleted = 0; + // Report finished runs and restart if necessary + while ((restartOnFailure && num_successful_starts < numberOfStarts) + || (!restartOnFailure && num_finished_starts < numberOfStarts)) { + for (auto &future: futures) { + // future value might have been retrieved before + if(!future.valid()) { + continue; + } - while (numCompleted < numberOfStarts) { - for (int ms = 0; ms < numberOfStarts; ++ms) { - // problem still running? - if (!localProblems[ms]) + if(auto status = future.wait_for(std::chrono::milliseconds(1)); + status != std::future_status::ready) { continue; + } - int *threadStatus = nullptr; -#ifndef __APPLE__ - // TODO(#84) pthread_tryjoin_np is not available on macOS. can replace easily by pthread_join, but this would only allow restarting failed threads rather late, so we disable the retry option for now. - int joinStatus = pthread_tryjoin_np(localOptimizationThreads[ms], - reinterpret_cast(&threadStatus)); -#else - int joinStatus = pthread_join(localOptimizationThreads[ms], - reinterpret_cast(&threadStatus)); -#endif - if (joinStatus == 0) { // joined successful - delete localProblems[ms]; - localProblems[ms] = nullptr; - - if (*threadStatus == 0 || !restartOnFailure) { - if (*threadStatus == 0) { - logmessage(loglevel::debug, - "Thread ms #%d finished successfully", ms); - } else { - logmessage(loglevel::debug, "Thread ms #%d finished " - "unsuccessfully. Not trying " - "new starting point.", - ms); - } - ++numCompleted; - } -#ifndef __APPLE__ - else { - logmessage(loglevel::warning, "Thread ms #%d finished " - "unsuccessfully... trying new " - "starting point", - ms); - ++lastStartIdx; - - localProblems[ms] = msProblem.getLocalProblem(lastStartIdx).release(); - logmessage( - loglevel::debug, - "Spawning thread for local optimization #%d (%d)", - lastStartIdx, ms); - auto ret = pthread_create( - &localOptimizationThreads[ms], &threadAttr, - getLocalOptimumThreadWrapper, - static_cast(localProblems[ms])); - if(ret) { - throw ParPEException("Failure during thread creation: " - + std::to_string(ret)); - } - } -#endif - delete threadStatus; + ++num_finished_starts; + auto [start_idx, retval] = future.get(); + + if (retval == 0) { + // success + logmessage(loglevel::debug, + "Optimization #%d finished successfully", + start_idx); + ++num_successful_starts; + } else if (!restartOnFailure) { + // failure, no new start + logmessage(loglevel::debug, + "Optimization ms #%d finished " + "unsuccessfully. Not trying " + "new starting point.", + start_idx); + } else { + // failure, new start + logmessage(loglevel::debug, + "Thread ms #%d finished unsuccessfully... " + "trying new starting point", start_idx); + ++lastStartIdx; + + future = boost::asio::post( + pool, + std::packaged_task()>( + [this, start_idx=lastStartIdx] { + return std::make_pair(start_idx, runStart(start_idx)); + })); } } - - sleep(1); // TODO: replace by condition via ThreadWrapper } - logmessage(loglevel::debug, "runParallelMultiStartOptimization finished"); + pool.join(); - pthread_attr_destroy(&threadAttr); + logmessage(loglevel::debug, "Multi-start optimization finished."); } void MultiStartOptimization::runSingleThreaded() @@ -153,15 +133,15 @@ void MultiStartOptimization::runSingleThreaded() if(ms == numberOfStarts) break; - auto problem = msProblem.getLocalProblem(first_start_idx + ms); - auto result = getLocalOptimum(problem.get()); + auto result = runStart(ms); + if(result) { logmessage(loglevel::debug, "Start #%d finished successfully", ms); ++numSucceeded; } else { - logmessage(loglevel::debug, "Thread ms #%d finished " - "unsuccessfully.",ms); + logmessage(loglevel::debug, "Start ms #%d finished " + "unsuccessfully.",ms); } ++ms; } @@ -174,15 +154,13 @@ void MultiStartOptimization::setRunParallel(bool runParallel) this->runParallel = runParallel; } -std::vector -MultiStartOptimization::createLocalOptimizationProblems() { - std::vector localProblems(numberOfStarts); - - for (int ms = 0; ms < numberOfStarts; ++ms) { - localProblems[ms] = msProblem.getLocalProblem(first_start_idx + ms).release(); - } +int MultiStartOptimization::runStart(int start_idx) const +{ + logmessage(loglevel::debug, + "Starting local optimization #%d", start_idx); - return localProblems; + auto problem = msProblem.getLocalProblem(first_start_idx + start_idx); + return getLocalOptimum(problem.get()); } } // namespace parpe diff --git a/src/parpeoptimization/optimizationProblem.cpp b/src/parpeoptimization/optimizationProblem.cpp index 9ff299802..d138dcb78 100644 --- a/src/parpeoptimization/optimizationProblem.cpp +++ b/src/parpeoptimization/optimizationProblem.cpp @@ -47,13 +47,6 @@ int getLocalOptimum(OptimizationProblem *problem) { } -void *getLocalOptimumThreadWrapper(void *optimizationProblemVp) { - auto problem = static_cast(optimizationProblemVp); - auto *result = new int; - *result = getLocalOptimum(problem); - return result; -} - void optimizationProblemGradientCheckMultiEps(OptimizationProblem *problem, int numParameterIndicesToCheck ) { From 944f4839613a5ee9b87f71efd4f14f8ab5589e76 Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Fri, 5 Nov 2021 11:14:00 +0100 Subject: [PATCH 11/11] Move CMake modules to directory to cmake/ --- CMakeLists.txt | 6 +++--- {CMakeModules => cmake}/BuildOptimized.cmake | 0 {CMakeModules => cmake}/BuildType.cmake | 0 {CMakeModules => cmake}/CodeCoverage.cmake | 0 {CMakeModules => cmake}/ConfigureVersion.cmake | 0 {CMakeModules => cmake}/ParPEConfig.cmake.in | 0 {CMakeModules => cmake}/clang-tools.cmake | 0 {CMakeModules => cmake}/getVersion.cmake | 0 {CMakeModules => cmake}/split_version.cmake | 0 9 files changed, 3 insertions(+), 3 deletions(-) rename {CMakeModules => cmake}/BuildOptimized.cmake (100%) rename {CMakeModules => cmake}/BuildType.cmake (100%) rename {CMakeModules => cmake}/CodeCoverage.cmake (100%) rename {CMakeModules => cmake}/ConfigureVersion.cmake (100%) rename {CMakeModules => cmake}/ParPEConfig.cmake.in (100%) rename {CMakeModules => cmake}/clang-tools.cmake (100%) rename {CMakeModules => cmake}/getVersion.cmake (100%) rename {CMakeModules => cmake}/split_version.cmake (100%) diff --git a/CMakeLists.txt b/CMakeLists.txt index 229f6e32e..5211a6896 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -8,7 +8,7 @@ endif(POLICY CMP0074) project(parpe) -set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}/CMakeModules/") +set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}/cmake/") set(CMAKE_CXX_STANDARD 17) set(CMAKE_CXX_STANDARD_REQUIRED True) @@ -35,7 +35,7 @@ add_custom_target(get_version ALL COMMAND "${CMAKE_COMMAND}" "-DINFILE=${PROJECT_SOURCE_DIR}/src/parpecommon/parpeVersion.h.in" "-DOUTFILE=${CMAKE_BINARY_DIR}/src/parpecommon/parpeVersion.h" - "-P" "${PROJECT_SOURCE_DIR}/CMakeModules/ConfigureVersion.cmake") + "-P" "${PROJECT_SOURCE_DIR}/cmake/ConfigureVersion.cmake") include("getVersion") include("split_version") @@ -234,7 +234,7 @@ install(FILES "${CMAKE_CURRENT_BINARY_DIR}/ParPETargets.cmake" set(LIB_INSTALL_DIR lib) include(CMakePackageConfigHelpers) configure_package_config_file( - "CMakeModules/ParPEConfig.cmake.in" + "cmake/ParPEConfig.cmake.in" "${CMAKE_CURRENT_BINARY_DIR}/ParPEConfig.cmake" INSTALL_DESTINATION "${LIB_INSTALL_DIR}/cmake/") diff --git a/CMakeModules/BuildOptimized.cmake b/cmake/BuildOptimized.cmake similarity index 100% rename from CMakeModules/BuildOptimized.cmake rename to cmake/BuildOptimized.cmake diff --git a/CMakeModules/BuildType.cmake b/cmake/BuildType.cmake similarity index 100% rename from CMakeModules/BuildType.cmake rename to cmake/BuildType.cmake diff --git a/CMakeModules/CodeCoverage.cmake b/cmake/CodeCoverage.cmake similarity index 100% rename from CMakeModules/CodeCoverage.cmake rename to cmake/CodeCoverage.cmake diff --git a/CMakeModules/ConfigureVersion.cmake b/cmake/ConfigureVersion.cmake similarity index 100% rename from CMakeModules/ConfigureVersion.cmake rename to cmake/ConfigureVersion.cmake diff --git a/CMakeModules/ParPEConfig.cmake.in b/cmake/ParPEConfig.cmake.in similarity index 100% rename from CMakeModules/ParPEConfig.cmake.in rename to cmake/ParPEConfig.cmake.in diff --git a/CMakeModules/clang-tools.cmake b/cmake/clang-tools.cmake similarity index 100% rename from CMakeModules/clang-tools.cmake rename to cmake/clang-tools.cmake diff --git a/CMakeModules/getVersion.cmake b/cmake/getVersion.cmake similarity index 100% rename from CMakeModules/getVersion.cmake rename to cmake/getVersion.cmake diff --git a/CMakeModules/split_version.cmake b/cmake/split_version.cmake similarity index 100% rename from CMakeModules/split_version.cmake rename to cmake/split_version.cmake