diff --git a/README.rst b/README.rst index 6c296fbd0..0693d0627 100644 --- a/README.rst +++ b/README.rst @@ -37,23 +37,23 @@ Installation The easiest way to install Sire is using our `conda channel `__. Sire is built using dependencies from `conda-forge `__, so please ensure that the channel takes strict priority. We recommend using -`mambaforge `__. +`miniforge3 `__. To create a new environment: .. code-block:: bash - mamba create -n openbiosim "python<3.11" - mamba activate openbiosim - mamba install -c conda-forge -c openbiosim sire + conda create -n openbiosim "python<3.12" + conda activate openbiosim + conda install -c conda-forge -c openbiosim sire To install the latest development version you can use: .. code-block:: bash - mamba create -n openbiosim-dev "python<3.11" - mamba activate openbiosim-dev - mamba install -c conda-forge -c openbiosim/label/dev sire + conda create -n openbiosim-dev "python<3.12" + conda activate openbiosim-dev + conda install -c conda-forge -c openbiosim/label/dev sire However, as you are here, it is likely you want to download the latest, greatest version of the code, which you will need to compile. To compile @@ -65,32 +65,32 @@ First, you need to create and activate a conda environment, e.g. .. code-block:: bash - mamba create -n openbiosim-dev "python<3.11" - mamba activate openbiosim-dev + conda create -n openbiosim-dev "python<3.12" + conda activate openbiosim-dev Next, you need to install the Sire build dependencies. .. code-block:: bash - mamba install cmake pip-requirements-parser + conda install cmake pip-requirements-parser You will also need to install compilers, e.g. on Linux use .. code-block:: bash - mamba install gcc gxx + conda install gcc gxx on MacOS use .. code-block:: bash - mamba install clang clangxx + conda install clang clangxx and on Windows use .. code-block:: bash - mamba install conda-build + conda install conda-build Next, you can clone the sire source code and compile and install sire:: diff --git a/SECURITY.md b/SECURITY.md index 99c80fafd..a084c17cc 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -4,13 +4,13 @@ As we have limited resource, we only support the latest major release of sire with security updates. For example, if the current version -is 2023.1.0, then only versions 2023.1.0 to 2023.1.X wil have updates, -which will be released as 2023.1.X+1. +is 2023.5.0, then only versions 2023.5.0 to 2023.5.X wil have updates, +which will be released as 2023.5.X+1. | Version | Supported | | ------- | ------------------ | -| 2023.1.x | :white_check_mark: | -| < 2023.1.x| :x: | +| 2023.5.x | :white_check_mark: | +| < 2023.5.x| :x: | ## Reporting a Vulnerability diff --git a/corelib/src/libs/SireCAS/lambdaschedule.cpp b/corelib/src/libs/SireCAS/lambdaschedule.cpp index 168a0a158..bd3ccfa3c 100644 --- a/corelib/src/libs/SireCAS/lambdaschedule.cpp +++ b/corelib/src/libs/SireCAS/lambdaschedule.cpp @@ -56,6 +56,13 @@ QDataStream &operator<<(QDataStream &ds, const LambdaSchedule &schedule) return ds; } +Symbol LambdaSchedule::lambda_symbol("λ"); +Symbol LambdaSchedule::initial_symbol("initial"); +Symbol LambdaSchedule::final_symbol("final"); + +Expression LambdaSchedule::default_morph_equation = (1.0 - LambdaSchedule::lam()) * LambdaSchedule::initial() + + LambdaSchedule::lam() * LambdaSchedule::final(); + QDataStream &operator>>(QDataStream &ds, LambdaSchedule &schedule) { VersionID v = readHeader(ds, r_schedule); @@ -68,6 +75,21 @@ QDataStream &operator>>(QDataStream &ds, LambdaSchedule &schedule) schedule.lever_names >> schedule.stage_names >> schedule.default_equations >> schedule.stage_equations >> static_cast(schedule); + + for (auto &expression : schedule.default_equations) + { + if (expression == LambdaSchedule::default_morph_equation) + expression = LambdaSchedule::default_morph_equation; + } + + for (auto &stage_equations : schedule.stage_equations) + { + for (auto &expression : stage_equations) + { + if (expression == LambdaSchedule::default_morph_equation) + expression = LambdaSchedule::default_morph_equation; + } + } } else throw version_error(v, "1", r_schedule, CODELOC); @@ -203,10 +225,6 @@ LambdaSchedule LambdaSchedule::charge_scaled_morph(double scale) return l; } -Symbol LambdaSchedule::lambda_symbol("λ"); -Symbol LambdaSchedule::initial_symbol("initial"); -Symbol LambdaSchedule::final_symbol("final"); - /** Return the symbol used to represent the :lambda: coordinate. * This symbol is used to represent the per-stage :lambda: * variable that goes from 0.0-1.0 within that stage. @@ -438,8 +456,7 @@ void LambdaSchedule::clear() */ void LambdaSchedule::addMorphStage(const QString &name) { - this->addStage(name, (this->lam() * this->final()) + - ((1 - this->lam()) * this->initial())); + this->addStage(name, default_morph_equation); } /** Append a morph stage onto this schedule. The morph stage is a @@ -501,9 +518,14 @@ void LambdaSchedule::addChargeScaleStages(double scale) void LambdaSchedule::prependStage(const QString &name, const SireCAS::Expression &equation) { + auto e = equation; + + if (e == default_morph_equation) + e = default_morph_equation; + if (this->nStages() == 0) { - this->appendStage(name, equation); + this->appendStage(name, e); return; } @@ -514,7 +536,7 @@ void LambdaSchedule::prependStage(const QString &name, CODELOC); this->stage_names.prepend(name); - this->default_equations.prepend(equation); + this->default_equations.prepend(e); this->stage_equations.prepend(QHash()); } @@ -532,8 +554,13 @@ void LambdaSchedule::appendStage(const QString &name, .arg(name), CODELOC); + auto e = equation; + + if (e == default_morph_equation) + e = default_morph_equation; + this->stage_names.append(name); - this->default_equations.append(equation); + this->default_equations.append(e); this->stage_equations.append(QHash()); } @@ -546,14 +573,19 @@ void LambdaSchedule::insertStage(int i, const QString &name, const SireCAS::Expression &equation) { + auto e = equation; + + if (e == default_morph_equation) + e = default_morph_equation; + if (i == 0) { - this->prependStage(name, equation); + this->prependStage(name, e); return; } else if (i >= this->nStages()) { - this->appendStage(name, equation); + this->appendStage(name, e); return; } @@ -564,7 +596,7 @@ void LambdaSchedule::insertStage(int i, CODELOC); this->stage_names.insert(i, name); - this->default_equations.insert(i, equation); + this->default_equations.insert(i, e); this->stage_equations.insert(i, QHash()); } @@ -605,7 +637,12 @@ int LambdaSchedule::find_stage(const QString &stage) const void LambdaSchedule::setDefaultEquation(const QString &stage, const Expression &equation) { - this->default_equations[this->find_stage(stage)] = equation; + auto e = equation; + + if (e == default_morph_equation) + e = default_morph_equation; + + this->default_equations[this->find_stage(stage)] = e; } /** Set the custom equation used to control the specified @@ -617,12 +654,17 @@ void LambdaSchedule::setEquation(const QString &stage, const QString &lever, const Expression &equation) { + auto e = equation; + + if (e == default_morph_equation) + e = default_morph_equation; + auto &lever_expressions = this->stage_equations[this->find_stage(stage)]; if (not this->lever_names.contains(lever)) this->addLever(lever); - lever_expressions[lever] = equation; + lever_expressions[lever] = e; } /** Remove the custom equation for the specified `lever` at the @@ -875,21 +917,31 @@ QVector LambdaSchedule::morph(const QString &lever_name, const auto equation = this->stage_equations[stage].value( lever_name, this->default_equations[stage]); - Values input_values = this->constant_values; - input_values.set(this->lam(), std::get<1>(resolved)); - QVector morphed(nparams); - auto morphed_data = morphed.data(); const auto initial_data = initial.constData(); const auto final_data = final.constData(); - for (int i = 0; i < nparams; ++i) + if (equation == default_morph_equation) { - input_values.set(this->initial(), initial_data[i]); - input_values.set(this->final(), final_data[i]); + for (int i = 0; i < nparams; ++i) + { + morphed_data[i] = (1.0 - lambda_value) * initial_data[i] + + lambda_value * final_data[i]; + } + } + else + { + Values input_values = this->constant_values; + input_values.set(this->lam(), std::get<1>(resolved)); + + for (int i = 0; i < nparams; ++i) + { + input_values.set(this->initial(), initial_data[i]); + input_values.set(this->final(), final_data[i]); - morphed_data[i] = equation(input_values); + morphed_data[i] = equation(input_values); + } } return morphed; diff --git a/corelib/src/libs/SireCAS/lambdaschedule.h b/corelib/src/libs/SireCAS/lambdaschedule.h index 2026899f9..412d0b8c5 100644 --- a/corelib/src/libs/SireCAS/lambdaschedule.h +++ b/corelib/src/libs/SireCAS/lambdaschedule.h @@ -204,6 +204,9 @@ namespace SireCAS /** The symbol used to represent the final value */ static SireCAS::Symbol final_symbol; + + /** The default morph equation */ + static SireCAS::Expression default_morph_equation; }; } diff --git a/corelib/src/libs/SireIO/pdbx.cpp b/corelib/src/libs/SireIO/pdbx.cpp index 5994d377d..e1717d927 100644 --- a/corelib/src/libs/SireIO/pdbx.cpp +++ b/corelib/src/libs/SireIO/pdbx.cpp @@ -149,7 +149,7 @@ PDBx::PDBx(const SireSystem::System &system, const PropertyMap &map) : ConcreteP throw SireError::unsupported( "No PDBx writer function has been registered. You need to " "install a library to write PDBx/mmCIF files, e.g. gemmi. " - "Do this by running 'mamba install -c conda-forge gemmi' " + "Do this by running 'conda install -c conda-forge gemmi' " "and then re-running this script.", CODELOC); @@ -274,7 +274,7 @@ QVector PDBx::toLines() const throw SireError::unsupported( "No PDBx writer function has been registered. You need to " "install a library to write PDBx/mmCIF files, e.g. gemmi. " - "Do this by running 'mamba install -c conda-forge gemmi' " + "Do this by running 'conda install -c conda-forge gemmi' " "and then re-running this script.", CODELOC); @@ -321,7 +321,7 @@ void PDBx::parseLines(const PropertyMap &map) throw SireError::unsupported( "No PDBx reader function has been registered. You need to " "install a library to read PDBx/mmCIF files, e.g. gemmi. " - "Do this by running 'mamba install -c conda-forge gemmi' " + "Do this by running 'conda install -c conda-forge gemmi' " "and then re-running this script.", CODELOC); diff --git a/doc/source/acknowledgements.rst b/doc/source/acknowledgements.rst index d30b33f6e..104c8a9da 100644 --- a/doc/source/acknowledgements.rst +++ b/doc/source/acknowledgements.rst @@ -119,11 +119,11 @@ does not affect :mod:`sire`. CMake is excellent. You can read more about it Anaconda --------- -:mod:`sire` uses Anaconda Python (specifically mambaforge and conda-forge) to +:mod:`sire` uses Anaconda Python (specifically miniforge and conda-forge) to simplify the management and installation of Python and the various modules on which :mod:`sire` depends. -Anaconda (and miniconda) are distributed as +Anaconda (and miniforge) are distributed as `open source projects `__. As :mod:`sire` does not explicitly link with them, the license is not an issue. You can find out more about Anaconda `from here `__. diff --git a/doc/source/changelog.rst b/doc/source/changelog.rst index 3b909ece3..7a72fb42d 100644 --- a/doc/source/changelog.rst +++ b/doc/source/changelog.rst @@ -12,6 +12,20 @@ Development was migrated into the `OpenBioSim `__ organisation on `GitHub `__. +`2023.5.1 `__ - January 2024 +-------------------------------------------------------------------------------------------- + +* Added a ``.dynamics().step(num_steps)`` function to make it easier to quickly run + steps of OpenMM dynamics with minimal overhead (this directly called ``Integrator.step()``) + +* Some optimisations to the OpenMM layer that make full use of the + experimental "updateSomeParametersInContext" functions. + +* Updated gemmi to 0.6.4, so that it can be default-enabled on all supported platforms. + As part of this, had to change the version of the compilers used on Windows and Linux + to make the conda packages. Windows now uses Visual Studio 2022 instead of 2017, + and Linux now uses GCC 12.3.0 instead of GCC 13. + `2023.5.0 `__ - December 2023 --------------------------------------------------------------------------------------------- @@ -918,14 +932,14 @@ Here is the changelog for this stage of development. only pythonize the C++ layer, and avoid the circular dependencies that were causing random import errors (particularly on Windows). - [2023.0.2] December 2023: Fix multiple distance restraint bug in SOMD + [2023.0.2] December 2022: Fix multiple distance restraint bug in SOMD (@fjclark). Add support for PME FEP with SOMD and fix associated bugs (@halx, @jmichel80). Fix CI issues so that PRs use the correct URL when triggered by external forks. Exclude dummy atoms when repartitioning hydrogen masses. Deprecate py37. - [2023.0.1] November 2023: Improve handling of HETATM and TER records in + [2023.0.1] November 2022: Improve handling of HETATM and TER records in PDB files. Fix SOMD selection issues following update to the 2023 API. Fix writing of steps to SOMD simfile.dat (@fjclark). Throw exception when CHAMBER format AMBER topology files are @@ -940,7 +954,7 @@ Here is the changelog for this stage of development. order. Ensure Sire is built against packages with the "dev" label. - [2023.0.0] July 2023 - Updated Sire's API to a more pythonic style. + [2023.0.0] July 2022 - Updated Sire's API to a more pythonic style. Module names are in lower case, e.g. `import Sire` becomes `import sire`, or `import sire as sr`. Functions are in underscore_case. This change is not backwards compatible. To diff --git a/doc/source/contributing/development.rst b/doc/source/contributing/development.rst index 36292fbe5..275f5c4f7 100644 --- a/doc/source/contributing/development.rst +++ b/doc/source/contributing/development.rst @@ -9,12 +9,11 @@ Setting up your computer ========================= You first need to create an anaconda, miniconda or -mambaforce environment as described in +miniforge environment as described in the :doc:`installation page <../install>`. -We recommend using mambaforge, as this sets the right -priority for the conda-forge channel, and it bundles -mamba, which we find to be much faster than conda. +We recommend using miniforge, as this sets the right +priority for the conda-forge channel. Virtual environments -------------------- @@ -42,7 +41,7 @@ or by running of your environment - e.g. ``openbiosim``). This will update your shell so that all python commands (such as -``python``, ``mamba`` etc.) will use the virtual environment. You can +``python``, ``conda`` etc.) will use the virtual environment. You can deactivate the environment and return to the "standard" Python using; .. code-block:: bash @@ -445,7 +444,7 @@ additional packages as described in the .. code-block:: bash - mamba install sphinx sphinxcontrib-programoutput sphinx_issues furo + conda install sphinx sphinxcontrib-programoutput sphinx_issues furo Then move to the ``doc`` directory and run: diff --git a/doc/source/contributing/packaging.rst b/doc/source/contributing/packaging.rst index 297b43cd1..0509e11d7 100644 --- a/doc/source/contributing/packaging.rst +++ b/doc/source/contributing/packaging.rst @@ -127,7 +127,7 @@ There are a number of stages to go through to create a release: the Python versions that :mod:`sire` should support. Activate this environment, and then checkout the ``main`` branch, run ``python actions/update_recipe.py`` and then run ``conda-build`` - via the command ``conda mambabuild -c conda-forge -c openbiosim/label/dev recipes/sire``. + via the command ``conda build -c conda-forge -c openbiosim/label/dev recipes/sire``. This will result in a conda package in the ``conda-bld`` directory in the root directory of your conda environment. You then need to upload these packages to anaconda.org, e.g. via the command diff --git a/doc/source/install.rst b/doc/source/install.rst index 4c5e2f2bc..97db15d3f 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -52,27 +52,25 @@ The easiest way to install :mod:`sire` is in a new `conda environment `__. You can use any conda environment or installation. We recommend using -`mambaforge `__, -as this is pre-configured to use `conda-forge `__, -and bundles `mamba `__, which -is a fast drop-in replacement for `conda `__. +`miniforge3 `__, +as this is pre-configured to use `conda-forge `__. -.. _Install_Mambaforge: -Either... Install a new copy of ``mambaforge`` +.. _Install_miniforge: +Either... Install a new copy of ``miniforge`` ---------------------------------------------- To install a new copy of -`mambaforge `__, -first download a ``Mambaforge`` from -`this page `__ that +`miniforge `__, +first download a ``miniforge3`` from +`this page `__ that matches your operating system and processor. -Install ``Mambaforge`` following the +Install ``miniforge`` following the `instructions here `__. -Once installed, you should be able to run the ``mamba`` command to -install other packages (e.g. ``mamba -h`` will print out help on -how to use the ``mamba`` command). +Once installed, you should be able to run the ``conda`` command to +install other packages (e.g. ``conda -h`` will print out help on +how to use the ``conda`` command). Or... Use an existing anaconda/miniconda install ------------------------------------------------ @@ -86,21 +84,7 @@ the full path to your anaconda or miniconda installation. You should now be able to run the ``conda`` command to install other packages (e.g. ``conda -h`` will print out help on how to use the -``conda`` command). We highly recommend that you use ``mamba`` as a -drop-in replacement for ``conda``, so first install ``mamba``. - -.. code-block:: bash - - $ conda install -c conda-forge mamba - -This should install mamba. If this fails, then your anaconda or miniconda -environment is likely quite full, or else it is outdated. We recommend -going back and following `the instructions <_Install_Mambaforge>` -to install a new copy of ``mambaforge``. - -If this works, then you should now be able to run the ``mamba`` command -to install other packages (e.g. ``mamba -h`` will print out help -on how to use the ``mamba`` command). +``conda`` command). And then... Install sire into a new environment ----------------------------------------------- @@ -113,7 +97,7 @@ by creating a Python 3.11 environment that we will call ``openbiosim``. .. code-block:: bash - $ mamba create -n openbiosim "python<3.12" + $ conda create -n openbiosim "python<3.12" .. note:: @@ -124,29 +108,29 @@ We can now install :mod:`sire` into that environment by typing .. code-block:: bash - $ mamba install -n openbiosim -c conda-forge -c openbiosim sire + $ conda install -n openbiosim -c conda-forge -c openbiosim sire .. note:: - The option ``-n openbiosim`` tells ``mamba`` to install :mod:`sire` + The option ``-n openbiosim`` tells ``conda`` to install :mod:`sire` into the ``openbiosim`` environment. The option ``-c conda-forge`` - tells ``mamba`` to use the ``conda-forge`` channel for all + tells ``conda`` to use the ``conda-forge`` channel for all dependencies. The option ``-c openbiosim`` - tells ``mamba`` to install :mod:`sire` from the ``openbiosim`` + tells ``conda`` to install :mod:`sire` from the ``openbiosim`` conda channel. If you want the latest development release, then install by typing .. code-block:: bash - $ mamba install -n openbiosim -c conda-forge -c "openbiosim/label/dev" sire + $ conda install -n openbiosim -c conda-forge -c "openbiosim/label/dev" sire You may (optionally) want to install additional tools such as ``ipython`` and ``jupyterlab``. To do this, type .. code-block:: bash - $ mamba install -n openbiosim ipython jupyterlab + $ conda install -n openbiosim ipython jupyterlab To run :mod:`sire`, you must now activate the ``openbiosim`` environment. You can do this by typing @@ -242,7 +226,7 @@ branch if you are a developer). You compile :mod:`sire` into an existing anaconda / miniconda environment. Please create and activate an environment, e.g. by following -`the instructions <_Install_Mambaforge>` to install a fresh ``mambaforge`` and +`the instructions <_Install_miniforge>` to install a fresh ``miniforge`` and then creating and activating Python 3.11 environment called ``openbiosim``. @@ -259,8 +243,8 @@ this directory (e.g. ``cd sire``). .. note:: This will fail if ``git`` is not installed on your computer. - You can easily install ``git`` using ``mamba``, e.g. - run ``mamba install git``. + You can easily install ``git`` using ``conda``, e.g. + run ``conda install git``. You can change to a different branch using the ``git checkout BRANCH`` command, e.g. @@ -299,24 +283,23 @@ type $ python setup.py install -This will download and install all of the dependencies via ``mamba`` -(or ``conda`` if you haven't installed ``mamba``). It will then compile +This will download and install all of the dependencies via ``conda``. It will then compile the :mod:`sire` C++ libraries, and then the Python wrappers. Be patient, as compilation can take quite a while! .. note:: - You need to have Visual Studio 2017 C++ compiler installed to compile on Windows. - The easiest way to do this is to `install chocolatey `__ - and then install the compilers using the command - ``choco install visualstudio2017-workload-vctools``. This is all free, but - you will need admin access to install chocolatey. If this doesn't work, then - go to `this page `__ - and download the "Build Tools for Visual Studio 2017". Use the installer - to select and install only the build tools. You will need a free Microsoft - developer account to access this page. If this doesn't work, then - follow the `excellent guidance here `__ - to set up your Windows computer for compiling conda packages. + You need to have Visual Studio C++ (2017 or newer) installed to compile on Windows. + The easiest way to do this is to install the free + `Visual Studio 2022 Community Edition `__. + Make sure to install "Desktop development with C++", + including the options "MSVC v143 - VS 2022 C++ x64/x86 build tools (v14.30)", + "C++ CMake tools for Windows", and at least one of "Windows 11 SDK" and/or + "Windows 10 SDK" (any version will do). You can, optionally, install the + older C++ compilers too, e.g. "MSVC v142 - VS 2019 C++ x64/x86 build tools (v14.29)", + and/or "MSVC v141 - VS 2017 C++ x64/x86 build tools (v14.16)". Currently + only the X64 compilers have been tested - we are interested to try + Windows/ARM64 once more of the dependencies are available. If you plan to install `BioSimSpace `__ on top of :mod:`sire`, then you should install using; @@ -325,7 +308,7 @@ top of :mod:`sire`, then you should install using; $ python --install-bss-deps install -This will use ``mamba`` (or ``conda``) to download and install all of +This will use ``conda`` to download and install all of BioSimSpace's dependencies as well. This ensures that incompatible versions of shared dependencies are not accidentally installed. @@ -418,8 +401,7 @@ And then install the tools needed to run conda-build .. code-block:: bash - $ conda install -y -c conda-forge mamba - $ mamba install -y -c conda-forge boa anaconda-client packaging=21 pip-requirements-parser + $ conda install -y -c conda-forge boa anaconda-client packaging=21 pip-requirements-parser D. Create the conda recipe -------------------------- @@ -455,7 +437,7 @@ You can now run ``conda-build`` to create the package. .. code-block:: bash - $ conda mambabuild -c conda-forge -c openbiosim/label/dev recipes/sire + $ conda build -c conda-forge -c openbiosim/label/dev recipes/sire This will take a while. At the end, it will print out the location of the sire conda package, e.g. @@ -465,7 +447,7 @@ sire conda package, e.g. The above command assumes that you don't need any other channels included to install all of the packages included in your ``environment.yml``. The ``actions/update_recipe.py`` script will print out the correct - ``conda mambabuild`` command at the end, which includes any extra + ``conda build`` command at the end, which includes any extra channels that are needed. :: @@ -473,7 +455,7 @@ sire conda package, e.g. # To have conda build upload to anaconda.org automatically, use # conda config --set anaconda_upload yes anaconda upload \ - /path/to/mambaforge/envs/build_sire/conda-bld/osx-64/sire-2023.3.0-py310hf95ea87_25.tar.bz2 + /path/to/miniforge/envs/build_sire/conda-bld/osx-64/sire-2023.3.0-py310hf95ea87_25.tar.bz2 anaconda_upload is not set. Not uploading wheels: [] INFO :: The inputs making up the hashes for the built packages are as follows: @@ -489,7 +471,7 @@ sire conda package, e.g. } In this case, you can see that the package is the file -``/path/to/mambaforge/envs/build_sire/conda-bld/osx-64/sire-2023.3.0-py310hf95ea87_25.tar.bz2``. +``/path/to/miniforge/envs/build_sire/conda-bld/osx-64/sire-2023.3.0-py310hf95ea87_25.tar.bz2``. Copy this conda package to wherever you need (e.g. into a channel, upload to conda, etc.). diff --git a/doc/source/tutorial/part01/02_loading_a_molecule.rst b/doc/source/tutorial/part01/02_loading_a_molecule.rst index f59e2e5e6..0e7ca0bfd 100644 --- a/doc/source/tutorial/part01/02_loading_a_molecule.rst +++ b/doc/source/tutorial/part01/02_loading_a_molecule.rst @@ -39,7 +39,7 @@ e.g. You may see a warning instructing you to install ``nglview``. If you see this, install ``nglview`` by typing - ``mamba install nglview`` at the command line. + ``conda install nglview`` at the command line. This uses `nglviewer `__ to view the molecule. There are lots of options to the :func:`~sire.mol.SelectorMol.view` diff --git a/doc/source/tutorial/part04/02_trajectory.rst b/doc/source/tutorial/part04/02_trajectory.rst index 34902b743..362d09fb2 100644 --- a/doc/source/tutorial/part04/02_trajectory.rst +++ b/doc/source/tutorial/part04/02_trajectory.rst @@ -319,8 +319,8 @@ this data by passing it into the ``DataFrame`` constructor, e.g. .. note:: - You may need to install pandas. You can do this with ``conda`` or ``mamba``, - e.g. ``mamba install pandas`` + You may need to install pandas. You can do this with ``conda``, + e.g. ``conda install pandas`` .. note:: @@ -501,7 +501,7 @@ plots. .. note:: - You can install jupyter using mamba via ``mamba install jupyter jupyterlab``. + You can install jupyter using conda via ``conda install jupyter jupyterlab``. Once installed, you can start a jupyter lab instance by running ``jupyter lab`` @@ -509,7 +509,7 @@ plots. You must also install matplotlib if you want to use pandas to generate plots. You can install matplotlib using the - command ``mamba install matplotlib`` + command ``conda install matplotlib`` For example, you could plot all of the bond lengths using diff --git a/doc/source/tutorial/part05/01_convert.rst b/doc/source/tutorial/part05/01_convert.rst index 7565f8065..fd4a829e7 100644 --- a/doc/source/tutorial/part05/01_convert.rst +++ b/doc/source/tutorial/part05/01_convert.rst @@ -139,7 +139,7 @@ with support for that package. .. note:: If ``rdkit`` isn't listed, then you should quit Python and install - it, e.g. using the command ``mamba install -c conda-forge rdkit``. + it, e.g. using the command ``conda install -c conda-forge rdkit``. If it still isn't listed then please raise an issue on the `sire GitHub repository `__. @@ -224,7 +224,7 @@ with support for that package. .. note:: If ``openmm`` isn't listed, then you should quit Python and install - it, e.g. using the command ``mamba install -c conda-forge openmm``. + it, e.g. using the command ``conda install -c conda-forge openmm``. If it still isn't listed then please raise an issue on the `sire GitHub repository `__. diff --git a/doc/source/tutorial/part05/02_view.rst b/doc/source/tutorial/part05/02_view.rst index aa62e7d09..bee0c9515 100644 --- a/doc/source/tutorial/part05/02_view.rst +++ b/doc/source/tutorial/part05/02_view.rst @@ -37,7 +37,7 @@ e.g. ``.png`` for PNG, ``.pdf`` for PDF and ``.svg`` for SVG. to PNG or PDF format. You will see a warning printed to the screen if this package is needed, and the image will instead be saved in SVG format. You can install ``cairosvg`` using - ``mamba install cairosvg``. + ``conda install cairosvg``. You can control the height and width of the image using the ``height`` and ``width`` arguments (measured in pixels), e.g. diff --git a/doc/source/tutorial/part06/05_free_energy_perturbation.rst b/doc/source/tutorial/part06/05_free_energy_perturbation.rst index 643b5ca70..0023f2d1c 100644 --- a/doc/source/tutorial/part06/05_free_energy_perturbation.rst +++ b/doc/source/tutorial/part06/05_free_energy_perturbation.rst @@ -126,8 +126,8 @@ First, we need to import alchemlyb .. note:: If you see an error then you may need to install (or reinstall) - alchemlyb. You can do this using conda or mamba, e.g. - ``mamba install -c conda-forge alchemlyb``. + alchemlyb. You can do this using conda e.g. + ``conda install -c conda-forge alchemlyb``. Next, we will load all of the :class:`~sire.maths.EnergyTrajectory` objects for each λ-window, and will convert them into pandas DataFrames arranged diff --git a/recipes/sire/conda_build_config.yaml b/recipes/sire/conda_build_config.yaml index 630daa995..f45e1ecfa 100644 --- a/recipes/sire/conda_build_config.yaml +++ b/recipes/sire/conda_build_config.yaml @@ -1,3 +1,9 @@ +c_compiler_version: + - 12.3.0 # [linux] + +cxx_compiler_version: + - 12.3.0 # [linux] + pin_run_as_build: boost: max_pin: x.x diff --git a/requirements_bss.txt b/requirements_bss.txt index fde780ff4..f1435328f 100644 --- a/requirements_bss.txt +++ b/requirements_bss.txt @@ -30,7 +30,7 @@ pydot pygtail pyyaml rdkit >=2023.0.0 -gemmi >=0.6.3,<0.7.0 +gemmi >=0.6.4 # The below are packages that aren't available on all # platforms/OSs and so need to be conditionally included diff --git a/requirements_build.txt b/requirements_build.txt index c550ad6d2..c2b7173ea 100644 --- a/requirements_build.txt +++ b/requirements_build.txt @@ -18,5 +18,5 @@ rdkit-dev >=2023.0.0 # These packages are needed to compile # the SireGemmi plugin -gemmi >=0.6.3,<0.7.0 +gemmi >=0.6.4 pybind11 diff --git a/requirements_host.txt b/requirements_host.txt index 3b9c2a2a3..13cd912df 100644 --- a/requirements_host.txt +++ b/requirements_host.txt @@ -12,6 +12,6 @@ qt-main rich tbb tbb-devel -gemmi >=0.6.3,<0.7.0 +gemmi >=0.6.4 rdkit >=2023.0.0 diff --git a/requirements_test.txt b/requirements_test.txt index 55ed43d38..dbd4a6cdd 100644 --- a/requirements_test.txt +++ b/requirements_test.txt @@ -2,4 +2,4 @@ # enable test to run to validate advanced functionality rdkit >=2023.0.0 -gemmi >=0.6.3,<0.7.0 +gemmi >=0.6.4 diff --git a/setup.py b/setup.py index de547447a..03cd5bc46 100644 --- a/setup.py +++ b/setup.py @@ -232,6 +232,15 @@ def parse_args(): default=[], help="pass CMake generator", ) + parser.add_argument( + "-A", + "--architecture", + action="append", + nargs=1, + metavar=("ARCHITECTURE",), + default=[], + help="pass CMake generator architecture, e.g. WIN64", + ) parser.add_argument( "-n", "--ncores", @@ -332,9 +341,7 @@ def conda_install(dependencies, install_bss_reqs=False): if not _is_conda_prepped: if install_bss_reqs: - cmd = ( - "%s config --prepend channels openbiosim/label/dev" % conda_exe - ) + cmd = "%s config --prepend channels openbiosim/label/dev" % conda_exe print("Activating openbiosim channel channel using: '%s'" % cmd) status = subprocess.run(cmd.split()) if status.returncode != 0: @@ -393,9 +400,7 @@ def install_requires(install_bss_reqs=False): print(f"Installing requirements for {platform_string}") if not os.path.exists(conda): - print( - "\nSire can only be installed into a conda or miniconda environment." - ) + print("\nSire can only be installed into a conda or miniconda environment.") print( "Please install conda, miniconda, miniforge or similar, then " "activate the conda environment, then rerun this installation " @@ -422,9 +427,7 @@ def install_requires(install_bss_reqs=False): from parse_requirements import parse_requirements except ImportError as e: print("\n\n[ERROR] ** You need to install pip-requirements-parser") - print( - "Run `conda install -c conda-forge pip-requirements-parser\n\n" - ) + print("Run `conda install -c conda-forge pip-requirements-parser\n\n") raise e reqs = parse_requirements("requirements_host.txt") @@ -481,10 +484,7 @@ def _get_build_ext(): else: ext = "" - return ( - os.path.basename(conda_base.replace(" ", "_").replace(".", "_")) - + ext - ) + return os.path.basename(conda_base.replace(" ", "_").replace(".", "_")) + ext def _get_bin_dir(): @@ -533,12 +533,8 @@ def build(ncores: int = 1, npycores: int = 1, coredefs=[], pydefs=[]): print(f"{CC} => {CC_bin}") if CXX_bin is None or CC_bin is None: - print( - "Cannot find the compilers requested by conda-build in the PATH" - ) - print( - "Please check that the compilers are installed and available." - ) + print("Cannot find the compilers requested by conda-build in the PATH") + print("Please check that the compilers are installed and available.") sys.exit(-1) # use the full paths, in case CMake struggles @@ -565,7 +561,9 @@ def build(ncores: int = 1, npycores: int = 1, coredefs=[], pydefs=[]): CXX = glob.glob(os.path.join(bindir, "*-g++"))[0] CC = glob.glob(os.path.join(bindir, "*-gcc"))[0] except Exception: - conda_install(["gcc", "gxx"], False) + # Need this version of gcc to stay compatible with conda-forge + # (i.e. gemmi needs the exact same compiler version) + conda_install(["gcc==12.3.0", "gxx==12.3.0"], False) try: CXX = glob.glob(os.path.join(bindir, "*-g++"))[0] CC = glob.glob(os.path.join(bindir, "*-gcc"))[0] @@ -632,6 +630,7 @@ def build(ncores: int = 1, npycores: int = 1, coredefs=[], pydefs=[]): cmake, *sum([["-D", d[0]] for d in args.corelib], []), *sum([["-G", g[0]] for g in args.generator], []), + *sum([["-A", a[0]] for a in args.architecture], []), sourcedir, ] @@ -669,9 +668,7 @@ def build(ncores: int = 1, npycores: int = 1, coredefs=[], pydefs=[]): # Compile and install, as need to install to compile the wrappers make_args = make_cmd(ncores, True) - print( - 'NOW RUNNING "%s" --build . --target %s' % (cmake, " ".join(make_args)) - ) + print('NOW RUNNING "%s" --build . --target %s' % (cmake, " ".join(make_args))) sys.stdout.flush() status = subprocess.run([cmake, "--build", ".", "--target", *make_args]) @@ -721,6 +718,7 @@ def build(ncores: int = 1, npycores: int = 1, coredefs=[], pydefs=[]): cmake, *sum([["-D", d[0]] for d in args.wrapper], []), *sum([["-G", g[0]] for g in args.generator], []), + *sum([["-A", a[0]] for a in args.architecture], []), sourcedir, ] @@ -735,9 +733,7 @@ def build(ncores: int = 1, npycores: int = 1, coredefs=[], pydefs=[]): # Just compile the wrappers make_args = make_cmd(npycores, False) - print( - 'NOW RUNNING "%s" --build . --target %s' % (cmake, " ".join(make_args)) - ) + print('NOW RUNNING "%s" --build . --target %s' % (cmake, " ".join(make_args))) sys.stdout.flush() status = subprocess.run([cmake, "--build", ".", "--target", *make_args]) @@ -797,6 +793,7 @@ def install_module(ncores: int = 1): cmake, *sum([["-D", d[0]] for d in args.wrapper], []), *sum([["-G", g[0]] for g in args.generator], []), + *sum([["-A", a[0]] for a in args.architecture], []), sourcedir, ] print(" ".join(cmake_cmd)) @@ -810,9 +807,7 @@ def install_module(ncores: int = 1): make_args = make_cmd(ncores, True) # Now that cmake has run, we can compile and install wrapper - print( - 'NOW RUNNING "%s" --build . --target %s' % (cmake, " ".join(make_args)) - ) + print('NOW RUNNING "%s" --build . --target %s' % (cmake, " ".join(make_args))) sys.stdout.flush() status = subprocess.run([cmake, "--build", ".", "--target", *make_args]) @@ -852,9 +847,7 @@ def install(ncores: int = 1, npycores: int = 1): # Now install the wrappers make_args = make_cmd(npycores, True) - print( - 'NOW RUNNING "%s" --build . --target %s' % (cmake, " ".join(make_args)) - ) + print('NOW RUNNING "%s" --build . --target %s' % (cmake, " ".join(make_args))) sys.stdout.flush() status = subprocess.run([cmake, "--build", ".", "--target", *make_args]) @@ -879,10 +872,8 @@ def install(ncores: int = 1, npycores: int = 1): action = args.action[0] if is_windows and (args.generator is None or len(args.generator) == 0): - # args.generator = [["Visual Studio 17 2022"]] - args.generator = [ - ["Visual Studio 15 2017 Win64"] - ] # preferred VC version for conda + args.generator = [["Visual Studio 17 2022"]] + args.architecture = [["x64"]] elif is_macos: # fix compile bug when INSTALL_NAME_TOOL is not set if "INSTALL_NAME_TOOL" not in os.environ: diff --git a/src/sire/convert/__init__.py b/src/sire/convert/__init__.py index 7b932d64d..8ddd7aa25 100644 --- a/src/sire/convert/__init__.py +++ b/src/sire/convert/__init__.py @@ -332,7 +332,7 @@ def openmm_to_sire(obj, map=None): except Exception: raise ModuleNotFoundError( "openmm is not available. Please install via " - "'mamba install -c conda-forge openmm'" + "'conda install -c conda-forge openmm'" ) from ..base import create_map @@ -383,7 +383,7 @@ def sire_to_openmm(obj, map=None): except Exception: raise ModuleNotFoundError( "openmm is not available. Please install via " - "'mamba install -c conda-forge openmm'" + "'conda install -c conda-forge openmm'" ) mols = _sire_to_openmm(obj, map=map) @@ -404,7 +404,7 @@ def rdkit_to_sire(obj, map=None): except Exception: raise ModuleNotFoundError( "rdkit is not available. Please install via " - "'mamba install -c conda-forge rdkit'" + "'conda install -c conda-forge rdkit'" ) from ..base import create_map @@ -435,7 +435,7 @@ def sire_to_rdkit(obj, map=None): except Exception: raise ModuleNotFoundError( "rdkit is not available. Please install via " - "'mamba install -c conda-forge rdkit'" + "'conda install -c conda-forge rdkit'" ) from ..base import create_map @@ -459,7 +459,7 @@ def gemmi_to_sire(obj, map=None): except Exception: raise ModuleNotFoundError( "gemmi is not available. Please install via " - "'mamba install -c conda-forge gemmi'" + "'conda install -c conda-forge gemmi'" ) from ..base import create_map @@ -514,7 +514,7 @@ def sire_to_gemmi(obj, map=None): except Exception: raise ModuleNotFoundError( "rdkit is not available. Please install via " - "'mamba install -c conda-forge rdkit'" + "'conda install -c conda-forge rdkit'" ) if not isinstance(obj, list): diff --git a/src/sire/mol/_dynamics.py b/src/sire/mol/_dynamics.py index f4816197b..575cd5108 100644 --- a/src/sire/mol/_dynamics.py +++ b/src/sire/mol/_dynamics.py @@ -528,6 +528,21 @@ def current_kinetic_energy(self): def energy_trajectory(self): return self._energy_trajectory.clone() + def step(self, num_steps: int = 1): + """ + Just perform 'num_steps' steps of dynamics, without saving + anything or running anything in a background thread. This is + designed for times when we want a minimial overhead, e.g. + when we want to run a small number of steps quickly. + """ + if self._is_running: + raise SystemError("Cannot step dynamics while it is already running!") + + self._omm_state = None + self._omm_state_has_cv = (False, False) + + self._omm_mols.getIntegrator().step(num_steps) + def run_minimisation(self, max_iterations: int): """ Internal method that runs minimisation on the molecules. @@ -1008,6 +1023,18 @@ def minimise( return self + def step(self, num_steps: int = 1): + """ + Simple function that performs `num_steps` steps of dynamics. + This does not save any frames or energies - it is designed for + times when you want to run a small number of steps quickly + with minimal overhead. + """ + if not self._d.is_null(): + self._d.step(num_steps=num_steps) + + return self + def run( self, time, diff --git a/src/sire/mol/_smiles.py b/src/sire/mol/_smiles.py index f7a8af5c8..72eae3308 100644 --- a/src/sire/mol/_smiles.py +++ b/src/sire/mol/_smiles.py @@ -313,7 +313,7 @@ def _selector_view2d( "any other format than SVG. As " "this is not available, we will save the file as a " "SVG. To install `cairosvg` run the command " - "'mamba install -c conda-forge cairosvg'" + "'conda install -c conda-forge cairosvg'" ) format = "svg" @@ -530,7 +530,7 @@ def _view2d( "any other format than SVG. As " "this is not available, we will save the file as a " "SVG. To install `cairosvg` run the command " - "'mamba install -c conda-forge cairosvg'" + "'conda install -c conda-forge cairosvg'" ) format = "svg" @@ -596,7 +596,7 @@ def _view2d(obj, *args, **kwargs): raise ImportError( "You need to install rdkit to be able to generate " "2D views of molecules. Do this by typing, e.g. " - "'mamba install -c conda-forge rdkit' and then restarting " + "'conda install -c conda-forge rdkit' and then restarting " "Python and running this script/notebook again." ) @@ -604,7 +604,7 @@ def _to_smiles(obj, *args, **kwargs): raise ImportError( "You need to install rdkit to be able to generate " "smiles strings Do this by typing, e.g. " - "'mamba install -c conda-forge rdkit' and then restarting " + "'conda install -c conda-forge rdkit' and then restarting " "Python and running this script/notebook again." ) @@ -612,7 +612,7 @@ def _to_smarts(obj, *args, **kwargs): raise ImportError( "You need to install rdkit to be able to generate " "smarts strings Do this by typing, e.g. " - "'mamba install -c conda-forge rdkit' and then restarting " + "'conda install -c conda-forge rdkit' and then restarting " "Python and running this script/notebook again." ) @@ -623,7 +623,7 @@ def _selector_to_smarts(obj, *args, **kwargs): raise ImportError( "You need to install rdkit to be able to generate " "smarts strings Do this by typing, e.g. " - "'mamba install -c conda-forge rdkit' and then restarting " + "'conda install -c conda-forge rdkit' and then restarting " "Python and running this script/notebook again." ) diff --git a/src/sire/mol/_view.py b/src/sire/mol/_view.py index b5749b70c..2a668d373 100644 --- a/src/sire/mol/_view.py +++ b/src/sire/mol/_view.py @@ -649,7 +649,7 @@ def view(obj, *args, **kwargs): f"when nglview was loaded ({_nglview_import_error}). One " "possibility is that nglview is incompatible with the installed " "version of ipywidgets. Try to downgrade ipywidgets, e.g. " - "\"mamba install 'ipywidgets>=7.6.0,<8'\". You will need to " + "\"conda install 'ipywidgets>=7.6.0,<8'\". You will need to " "restart Python and run this script/notebook again." ) @@ -659,6 +659,6 @@ def view(obj, *args, **kwargs): raise ImportError( "You need to install nglview to be able to view " "molecules. Do this by typing, e.g. " - "'mamba install nglview' and then restarting Python " + "'conda install nglview' and then restarting Python " "and running this script/notebook again." ) diff --git a/tests/conftest.py b/tests/conftest.py index 2335dd95a..3232687fc 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -185,3 +185,20 @@ def neopentane_methane(): @pytest.fixture(scope="session") def zero_lj_mols(): return sr.load_test_files("zero_lj.prm7", "zero_lj.rst7") + + +@pytest.fixture(scope="session") +def openmm_platform(): + if "openmm" not in sr.convert.supported_formats(): + return None + + mols = sr.load_test_files("ala.top", "ala.crd") + + for platform in ["CUDA", "OpenCL", "CPU"]: + try: + mols.dynamics(platform=platform) + return platform + except Exception: + pass + + return "Reference" diff --git a/tests/convert/test_openmm.py b/tests/convert/test_openmm.py index 82d3bca1c..fa0056fc9 100644 --- a/tests/convert/test_openmm.py +++ b/tests/convert/test_openmm.py @@ -6,7 +6,7 @@ "openmm" not in sr.convert.supported_formats(), reason="openmm support is not available", ) -def test_openmm_single_energy_neopentane(neopentane_methane): +def test_openmm_single_energy_neopentane(neopentane_methane, openmm_platform): mol = neopentane_methane[0] # this function will extract the lambda0 or lambda1 end state @@ -27,7 +27,7 @@ def get_end_state(mol, state, remove_state): map = { "space": sr.vol.Cartesian(), - "platform": "Reference", + "platform": openmm_platform, "constraint": "none", "ignore_perturbations": True, } @@ -61,14 +61,14 @@ def get_end_state(mol, state, remove_state): "openmm" not in sr.convert.supported_formats(), reason="openmm support is not available", ) -def test_openmm_single_energy(kigaki_mols): +def test_openmm_single_energy(kigaki_mols, openmm_platform): mols = kigaki_mols mol = mols[0] map = { "space": sr.vol.Cartesian(), - "platform": "Reference", + "platform": openmm_platform, "constraint": "bonds-h-angles", } @@ -101,13 +101,13 @@ def test_openmm_single_energy(kigaki_mols): "openmm" not in sr.convert.supported_formats(), reason="openmm support is not available", ) -def test_openmm_multi_energy_small_cart(kigaki_mols): +def test_openmm_multi_energy_small_cart(kigaki_mols, openmm_platform): # first, try just 50 molecules in a cartesian space mols = kigaki_mols[0:50] map = { "space": sr.vol.Cartesian(), - "platform": "Reference", + "platform": openmm_platform, "constraint": "bonds-h-angles", } @@ -131,7 +131,7 @@ def test_openmm_multi_energy_small_cart(kigaki_mols): reason="openmm support is not available", ) @pytest.mark.slow -def test_openmm_multi_energy_all_cart(kigaki_mols): +def test_openmm_multi_energy_all_cart(kigaki_mols, openmm_platform): # use all of the molecules mols = kigaki_mols @@ -140,7 +140,7 @@ def test_openmm_multi_energy_all_cart(kigaki_mols): "cutoff": 10000 * sr.units.angstrom, "cutoff_type": "REACTION_FIELD", "dielectric": 1.0, - "platform": "cpu", + "platform": openmm_platform, "constraint": "bonds-h-angles", } @@ -163,7 +163,7 @@ def test_openmm_multi_energy_all_cart(kigaki_mols): "openmm" not in sr.convert.supported_formats(), reason="openmm support is not available", ) -def test_openmm_multi_energy_all_cart_cutoff(kigaki_mols): +def test_openmm_multi_energy_all_cart_cutoff(kigaki_mols, openmm_platform): # use all of the molecules mols = kigaki_mols @@ -172,7 +172,7 @@ def test_openmm_multi_energy_all_cart_cutoff(kigaki_mols): "cutoff": 10 * sr.units.angstrom, "cutoff_type": "REACTION_FIELD", "dielectric": 78.0, - "platform": "cpu", + "platform": openmm_platform, "constraint": "bonds-h-angles", } @@ -195,7 +195,7 @@ def test_openmm_multi_energy_all_cart_cutoff(kigaki_mols): "openmm" not in sr.convert.supported_formats(), reason="openmm support is not available", ) -def test_openmm_multi_energy_all_periodic_cutoff(kigaki_mols): +def test_openmm_multi_energy_all_periodic_cutoff(kigaki_mols, openmm_platform): # use all of the molecules mols = kigaki_mols @@ -203,7 +203,7 @@ def test_openmm_multi_energy_all_periodic_cutoff(kigaki_mols): "cutoff": 10 * sr.units.angstrom, "cutoff_type": "REACTION_FIELD", "dielectric": 78.0, - "platform": "Reference", + "platform": openmm_platform, "constraint": "bonds-h-angles", } @@ -227,7 +227,7 @@ def test_openmm_multi_energy_all_periodic_cutoff(kigaki_mols): reason="openmm support is not available", ) @pytest.mark.slow -def test_openmm_dynamics(ala_mols): +def test_openmm_dynamics(ala_mols, openmm_platform): mols = ala_mols map = { @@ -235,7 +235,7 @@ def test_openmm_dynamics(ala_mols): "cutoff_type": "REACTION_FIELD", "dielectric": 78.0, "temperature": 25 * sr.units.celsius, - "platform": "Reference", + "platform": openmm_platform, "constraint": "bonds-h-angles", # "pressure": 1 * sr.units.atm, # currently disagree with energies for NPT... } @@ -284,7 +284,7 @@ def test_openmm_dynamics(ala_mols): "openmm" not in sr.convert.supported_formats(), reason="openmm support is not available", ) -def test_openmm_options(ala_mols): +def test_openmm_options(ala_mols, openmm_platform): mols = ala_mols mol = mols[0] @@ -294,7 +294,7 @@ def test_openmm_options(ala_mols): "temperature": 25 * sr.units.celsius, "pressure": 1 * sr.units.atm, "friction": 5 / sr.units.picosecond, - "platform": "Reference", + "platform": openmm_platform, "constraint": "bonds-h-angles", } @@ -314,7 +314,7 @@ def test_openmm_options(ala_mols): "openmm" not in sr.convert.supported_formats(), reason="openmm support is not available", ) -def test_openmm_ignore_constrained(ala_mols): +def test_openmm_ignore_constrained(ala_mols, openmm_platform): mols = ala_mols mol = mols[0] @@ -322,7 +322,7 @@ def test_openmm_ignore_constrained(ala_mols): d = mol.dynamics( constraint="bonds-h-angles", include_constrained_energies=True, - platform="Reference", + platform=openmm_platform, ) nrg1 = d.current_potential_energy() @@ -330,7 +330,7 @@ def test_openmm_ignore_constrained(ala_mols): d = mol.dynamics( constraint="bonds-h-angles", include_constrained_energies=False, - platform="Reference", + platform=openmm_platform, ) nrg2 = d.current_potential_energy() @@ -344,11 +344,11 @@ def test_openmm_ignore_constrained(ala_mols): "openmm" not in sr.convert.supported_formats(), reason="openmm support is not available", ) -def test_openmm_no_zero_sigmas(zero_lj_mols): +def test_openmm_no_zero_sigmas(zero_lj_mols, openmm_platform): mols = zero_lj_mols omm = sr.convert.to( - mols, "openmm", map={"constraint": "h-bonds", "platform": "Reference"} + mols, "openmm", map={"constraint": "h-bonds", "platform": openmm_platform} ) from openmm import XmlSerializer @@ -363,7 +363,7 @@ def test_openmm_no_zero_sigmas(zero_lj_mols): "openmm" not in sr.convert.supported_formats(), reason="openmm support is not available", ) -def test_openmm_skipped_constrained_bonds(zero_lj_mols): +def test_openmm_skipped_constrained_bonds(zero_lj_mols, openmm_platform): mols = zero_lj_mols omm1 = sr.convert.to( @@ -372,7 +372,7 @@ def test_openmm_skipped_constrained_bonds(zero_lj_mols): map={ "constraint": "h-bonds", "include_constrained_energies": True, - "platform": "Reference", + "platform": openmm_platform, }, ) @@ -382,7 +382,7 @@ def test_openmm_skipped_constrained_bonds(zero_lj_mols): map={ "constraint": "h-bonds", "include_constrained_energies": False, - "platform": "Reference", + "platform": openmm_platform, }, ) diff --git a/tests/convert/test_openmm_lambda.py b/tests/convert/test_openmm_lambda.py index 7fb7428fc..e992452b6 100644 --- a/tests/convert/test_openmm_lambda.py +++ b/tests/convert/test_openmm_lambda.py @@ -2,7 +2,7 @@ import pytest -def _run_test(mols, is_slow=False, use_taylor=False, precision=1e-3): +def _run_test(mols, is_slow=False, use_taylor=False, precision=1e-3, platform="CPU"): c = mols.cursor() # can only get the same energies if they have the same coordinates @@ -56,7 +56,7 @@ def get_end_state(mol, state, remove_state): # need to use the reference platform on GH Actions map = { - "platform": "cpu", + "platform": platform, "schedule": l, "constraint": "bonds-h-angles", } @@ -115,16 +115,16 @@ def get_end_state(mol, state, remove_state): "openmm" not in sr.convert.supported_formats(), reason="openmm support is not available", ) -def test_openmm_scale_lambda_simple(merged_ethane_methanol): - _run_test(merged_ethane_methanol.clone(), False) +def test_openmm_scale_lambda_simple(merged_ethane_methanol, openmm_platform): + _run_test(merged_ethane_methanol.clone(), False, platform=openmm_platform) @pytest.mark.skipif( "openmm" not in sr.convert.supported_formats(), reason="openmm support is not available", ) -def test_openmm_scale_lambda_taylor_simple(merged_ethane_methanol): - _run_test(merged_ethane_methanol.clone(), False, True) +def test_openmm_scale_lambda_taylor_simple(merged_ethane_methanol, openmm_platform): + _run_test(merged_ethane_methanol.clone(), False, True, platform=openmm_platform) @pytest.mark.veryslow @@ -132,16 +132,16 @@ def test_openmm_scale_lambda_taylor_simple(merged_ethane_methanol): "openmm" not in sr.convert.supported_formats(), reason="openmm support is not available", ) -def test_big_openmm_scale_lambda_simple(merged_ethane_methanol): - _run_test(merged_ethane_methanol.clone(), True) +def test_big_openmm_scale_lambda_simple(merged_ethane_methanol, openmm_platform): + _run_test(merged_ethane_methanol.clone(), True, platform=openmm_platform) @pytest.mark.skipif( "openmm" not in sr.convert.supported_formats(), reason="openmm support is not available", ) -def test_openmm_scale_lambda_ligand(merged_zan_ose): - _run_test(merged_zan_ose.clone(), False) +def test_openmm_scale_lambda_ligand(merged_zan_ose, openmm_platform): + _run_test(merged_zan_ose.clone(), False, platform=openmm_platform) @pytest.mark.veryslow @@ -149,27 +149,27 @@ def test_openmm_scale_lambda_ligand(merged_zan_ose): "openmm" not in sr.convert.supported_formats(), reason="openmm support is not available", ) -def test_big_openmm_scale_lambda_ligand(merged_zan_ose): - _run_test(merged_zan_ose.clone(), True) +def test_big_openmm_scale_lambda_ligand(merged_zan_ose, openmm_platform): + _run_test(merged_zan_ose.clone(), True, platform=openmm_platform) @pytest.mark.skipif( "openmm" not in sr.convert.supported_formats(), reason="openmm support is not available", ) -def test_openmm_scale_lambda_dichloroethane(ethane_12dichloroethane): - _run_test(ethane_12dichloroethane.clone(), False) +def test_openmm_scale_lambda_dichloroethane(ethane_12dichloroethane, openmm_platform): + _run_test(ethane_12dichloroethane.clone(), False, platform=openmm_platform) @pytest.mark.skipif( "openmm" not in sr.convert.supported_formats(), reason="openmm support is not available", ) -def test_openmm_scale_lambda_cyclopentane(pentane_cyclopentane): +def test_openmm_scale_lambda_cyclopentane(pentane_cyclopentane, openmm_platform): mols = pentane_cyclopentane.clone() for mol in mols.molecules("property is_perturbable"): mol = mol.edit().add_link("connectivity", "connectivity0").commit() mols.update(mol) - _run_test(mols, False) + _run_test(mols, False, platform=openmm_platform) diff --git a/tests/convert/test_openmm_minimise.py b/tests/convert/test_openmm_minimise.py index cab4a4247..bb26f0687 100644 --- a/tests/convert/test_openmm_minimise.py +++ b/tests/convert/test_openmm_minimise.py @@ -6,12 +6,12 @@ "openmm" not in sr.convert.supported_formats(), reason="openmm support is not available", ) -def test_openmm_simple_minimise(ala_mols): +def test_openmm_simple_minimise(ala_mols, openmm_platform): mols = ala_mols nrg = mols.energy() - mols = mols.minimisation(platform="cpu").run(5).commit() + mols = mols.minimisation(platform=openmm_platform).run(5).commit() nrg2 = mols.energy() @@ -22,7 +22,7 @@ def test_openmm_simple_minimise(ala_mols): "openmm" not in sr.convert.supported_formats(), reason="openmm support is not available", ) -def test_openmm_minimise_lambda(merged_ethane_methanol): +def test_openmm_minimise_lambda(merged_ethane_methanol, openmm_platform): mols = merged_ethane_methanol.clone() for mol in mols.molecules("molecule property is_perturbable"): @@ -30,15 +30,15 @@ def test_openmm_minimise_lambda(merged_ethane_methanol): # this blows up because of incompatible exceptions/exclusions mol = ( - mols[0].minimisation(platform="cpu", lambda_value=1.0).run(5).commit() + mols[0].minimisation(platform=openmm_platform, lambda_value=1.0).run(5).commit() ) mols = ( - mols[0].minimisation(platform="cpu", lambda_value=0.0).run(5).commit() + mols[0].minimisation(platform=openmm_platform, lambda_value=0.0).run(5).commit() ) mols = ( - mols[0].minimisation(platform="cpu", lambda_value=0.5).run(5).commit() + mols[0].minimisation(platform=openmm_platform, lambda_value=0.5).run(5).commit() ) @@ -46,14 +46,14 @@ def test_openmm_minimise_lambda(merged_ethane_methanol): "openmm" not in sr.convert.supported_formats(), reason="openmm support is not available", ) -def test_openmm_minimise_unbonded_water(kigaki_mols): +def test_openmm_minimise_unbonded_water(kigaki_mols, openmm_platform): mols = kigaki_mols atoms = mols[100].atoms() # the water molecules have no internal bonds, so this tests # whether or not constraints have been added correctly - mols = mols.minimisation(platform="cpu").run(10).commit() + mols = mols.minimisation(platform=openmm_platform).run(10).commit() new_atoms = mols[100].atoms() @@ -62,21 +62,13 @@ def test_openmm_minimise_unbonded_water(kigaki_mols): # value) assert ( sr.measure(atoms[0], atoms[1]).value() - == pytest.approx( - sr.measure(new_atoms[0], new_atoms[1]).value(), abs=1e-2 - ) - ) or ( - sr.measure(new_atoms[0], new_atoms[2]).value() == pytest.approx(0.9572) - ) + == pytest.approx(sr.measure(new_atoms[0], new_atoms[1]).value(), abs=1e-2) + ) or (sr.measure(new_atoms[0], new_atoms[2]).value() == pytest.approx(0.9572)) assert ( sr.measure(atoms[0], atoms[2]).value() - == pytest.approx( - sr.measure(new_atoms[0], new_atoms[2]).value(), abs=1e-2 - ) - ) or ( - sr.measure(new_atoms[0], new_atoms[2]).value() == pytest.approx(0.9572) - ) + == pytest.approx(sr.measure(new_atoms[0], new_atoms[2]).value(), abs=1e-2) + ) or (sr.measure(new_atoms[0], new_atoms[2]).value() == pytest.approx(0.9572)) assert sr.measure(atoms[1], atoms[2]).value() == pytest.approx( sr.measure(new_atoms[1], new_atoms[2]).value(), abs=1e-2 @@ -87,10 +79,10 @@ def test_openmm_minimise_unbonded_water(kigaki_mols): "openmm" not in sr.convert.supported_formats(), reason="openmm support is not available", ) -def test_openmm_minimise_vacuum(kigaki_mols): +def test_openmm_minimise_vacuum(kigaki_mols, openmm_platform): mols = kigaki_mols - mols = mols.minimisation(platform="cpu", vacuum=True).run(10).commit() + mols = mols.minimisation(platform=openmm_platform, vacuum=True).run(10).commit() assert not mols.property("space").is_periodic() @@ -98,6 +90,6 @@ def test_openmm_minimise_vacuum(kigaki_mols): mols.add_shared_property("space", sr.vol.Cartesian()) - mols = mols.minimisation(platform="cpu", vacuum=True).run(10).commit() + mols = mols.minimisation(platform=openmm_platform, vacuum=True).run(10).commit() assert not mols.property("space").is_periodic() diff --git a/tests/convert/test_openmm_restraints.py b/tests/convert/test_openmm_restraints.py index 9aed7f679..3aeaf1832 100644 --- a/tests/convert/test_openmm_restraints.py +++ b/tests/convert/test_openmm_restraints.py @@ -6,12 +6,12 @@ "openmm" not in sr.convert.supported_formats(), reason="openmm support is not available", ) -def test_openmm_positional_restraints(kigaki_mols): +def test_openmm_positional_restraints(kigaki_mols, openmm_platform): mols = kigaki_mols mol = mols[0] - map = {"space": sr.vol.Cartesian(), "platform": "Reference"} + map = {"space": sr.vol.Cartesian(), "platform": openmm_platform} # test restraining all C atoms restraints = sr.restraints.positional(mol, atoms="element C") @@ -40,12 +40,12 @@ def test_openmm_positional_restraints(kigaki_mols): "openmm" not in sr.convert.supported_formats(), reason="openmm support is not available", ) -def test_openmm_distance_restraints(ala_mols): +def test_openmm_distance_restraints(ala_mols, openmm_platform): mols = ala_mols mols = mols[0:2] - map = {"space": sr.vol.Cartesian(), "platform": "Reference"} + map = {"space": sr.vol.Cartesian(), "platform": openmm_platform} # test restraining the distance between the first and last molecule restraints = sr.restraints.distance( @@ -77,21 +77,19 @@ def test_openmm_distance_restraints(ala_mols): new_coords = [mols[0][0].coordinates(), mols[-1][0].coordinates()] - assert (new_coords[0] - new_coords[1]).length().value() == pytest.approx( - 5.0, 1e-2 - ) + assert (new_coords[0] - new_coords[1]).length().value() == pytest.approx(5.0, 1e-2) @pytest.mark.skipif( "openmm" not in sr.convert.supported_formats(), reason="openmm support is not available", ) -def test_openmm_fixed_atoms(kigaki_mols): +def test_openmm_fixed_atoms(kigaki_mols, openmm_platform): mols = kigaki_mols mol = mols[0] - map = {"space": sr.vol.Cartesian(), "platform": "Reference"} + map = {"space": sr.vol.Cartesian(), "platform": openmm_platform} # test fixing all C atoms coords = [] @@ -106,21 +104,19 @@ def test_openmm_fixed_atoms(kigaki_mols): mol = d.commit() for atom, coords in zip(mol.atoms("element C"), coords): - assert ( - atom.coordinates() - coords - ).length() < 0.001 * sr.units.angstrom + assert (atom.coordinates() - coords).length() < 0.001 * sr.units.angstrom @pytest.mark.skipif( "openmm" not in sr.convert.supported_formats(), reason="openmm support is not available", ) -def test_openmm_alchemical_restraints(ala_mols): +def test_openmm_alchemical_restraints(ala_mols, openmm_platform): mols = ala_mols mol = mols[0] - map = {"space": sr.vol.Cartesian(), "platform": "Reference"} + map = {"space": sr.vol.Cartesian(), "platform": openmm_platform} # test scaling a positional restraint restraints = sr.restraints.positional(mol, atoms="element C") @@ -143,21 +139,15 @@ def test_openmm_alchemical_restraints(ala_mols): l.add_stage("restraints", l.lam() * l.initial()) l.set_equation("restraints", "restraint", l.lam() * l.initial()) - d = mol.dynamics( - timestep="1fs", restraints=restraints, schedule=l, map=map - ) + d = mol.dynamics(timestep="1fs", restraints=restraints, schedule=l, map=map) d.set_lambda(0) - assert d.current_potential_energy().value() == pytest.approx( - nrg_0.value(), 1e-6 - ) + assert d.current_potential_energy().value() == pytest.approx(nrg_0.value(), 1e-6) d.set_lambda(1) - assert d.current_potential_energy().value() == pytest.approx( - nrg_1.value(), 1e-6 - ) + assert d.current_potential_energy().value() == pytest.approx(nrg_1.value(), 1e-6) d.set_lambda(0.3) @@ -170,17 +160,15 @@ def test_openmm_alchemical_restraints(ala_mols): "openmm" not in sr.convert.supported_formats(), reason="openmm support is not available", ) -def test_openmm_named_restraints(ala_mols): +def test_openmm_named_restraints(ala_mols, openmm_platform): mols = ala_mols mol = mols[0] - map = {"space": sr.vol.Cartesian(), "platform": "Reference"} + map = {"space": sr.vol.Cartesian(), "platform": openmm_platform} # test using named restraints, that we can scale these independently - posrests = sr.restraints.positional( - mol, atoms="element C", name="positional" - ) + posrests = sr.restraints.positional(mol, atoms="element C", name="positional") dstrests = sr.restraints.distance( mol, atoms0=mol[0], atoms1=mol[-1], name="distance", r0="5A" @@ -224,21 +212,15 @@ def test_openmm_named_restraints(ala_mols): l.set_equation("3", "positional", l.lam() * l.initial()) l.set_equation("3", "distance", l.lam() * l.initial()) - d = mol.dynamics( - timestep="1fs", restraints=restraints, schedule=l, map=map - ) + d = mol.dynamics(timestep="1fs", restraints=restraints, schedule=l, map=map) d.set_lambda(0) - assert d.current_potential_energy().value() == pytest.approx( - nrg_0.value(), 1e-6 - ) + assert d.current_potential_energy().value() == pytest.approx(nrg_0.value(), 1e-6) d.set_lambda(1) - assert d.current_potential_energy().value() == pytest.approx( - nrg_1_1.value(), 1e-6 - ) + assert d.current_potential_energy().value() == pytest.approx(nrg_1_1.value(), 1e-6) d.set_lambda(0.99999999999 / 3.0) @@ -248,9 +230,7 @@ def test_openmm_named_restraints(ala_mols): d.set_lambda(1.0 / 3.0) - assert d.current_potential_energy().value() == pytest.approx( - nrg_0.value(), 1e-6 - ) + assert d.current_potential_energy().value() == pytest.approx(nrg_0.value(), 1e-6) d.set_lambda(1.99999999999 / 3.0) @@ -260,6 +240,4 @@ def test_openmm_named_restraints(ala_mols): d.set_lambda(2.0 / 3.0) - assert d.current_potential_energy().value() == pytest.approx( - nrg_0.value(), 1e-6 - ) + assert d.current_potential_energy().value() == pytest.approx(nrg_0.value(), 1e-6) diff --git a/version.txt b/version.txt index 42ae83e93..dd41647cb 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -2023.5.0 +2023.5.1 diff --git a/wrapper/Convert/SireGemmi/CMakeLists.txt b/wrapper/Convert/SireGemmi/CMakeLists.txt index c1e572a64..69173463a 100644 --- a/wrapper/Convert/SireGemmi/CMakeLists.txt +++ b/wrapper/Convert/SireGemmi/CMakeLists.txt @@ -8,37 +8,12 @@ find_package(gemmi) find_package(pybind11) -if (${gemmi_FOUND}) - # We can only link against the dynamic library - static doesn't work - get_target_property(GEMMI_LIBRARY gemmi::gemmi_cpp LOCATION) - - # Test if we can link to Gemmi on Windows - if (WIN32) - # test if GEMMI_LIBRARY ends with .lib - if (${GEMMI_LIBRARY} MATCHES "\\.lib$") - message( STATUS "GEMMI_LIBRARY is a static library." ) - message( STATUS "We can only link against the dynamic library." ) - message( STATUS "Disabling gemmi PDBx/mmCIF support.") - set(gemmi_FOUND FALSE) - endif() - else() - # test if GEMMI_LIBRARY ends with .a - if (${GEMMI_LIBRARY} MATCHES "\\.a$") - message( STATUS "GEMMI_LIBRARY is a static library." ) - message( STATUS "We can only link against the dynamic library." ) - message( STATUS "Disabling gemmi PDBx/mmCIF support.") - set(gemmi_FOUND FALSE) - endif() - endif() -endif() - if (${gemmi_FOUND} AND ${pybind11_FOUND}) # Only compile and install if we have gemmi message( STATUS "Compiling SireGemmi converter" ) - get_target_property(GEMMI_LIBRARY gemmi::gemmi_cpp LOCATION) - - message( STATUS "GEMMI LIBRARY ${GEMMI_LIBRARY}" ) + get_target_property(GEMMI_LOCATION gemmi::gemmi_cpp IMPORTED_LOCATION_RELEASE) + message( STATUS "gemmi location: ${GEMMI_LOCATION}" ) message( STATUS "PYBIND11 INCLUDE ${pybind11_INCLUDE_DIR}") message( STATUS "PYBIND11 LIBRARY ${pybind11_LIBRARIES}" ) message( STATUS "ZLIB LIBRARIES ${ZLIB_LIBRARIES}" ) @@ -83,7 +58,7 @@ if (${gemmi_FOUND} AND ${pybind11_FOUND}) SIRE_SireBase SIRE_SireStream SIRE_SireError - ${GEMMI_LIBRARY} + gemmi::gemmi_cpp ${pybind11_LIBRARIES} ) diff --git a/wrapper/Convert/SireGemmi/_SireGemmi.main.cpp b/wrapper/Convert/SireGemmi/_SireGemmi.main.cpp index 7c9857c14..482f668d7 100644 --- a/wrapper/Convert/SireGemmi/_SireGemmi.main.cpp +++ b/wrapper/Convert/SireGemmi/_SireGemmi.main.cpp @@ -12,6 +12,8 @@ namespace bp = boost::python; using namespace SireGemmi; +#include + BOOST_PYTHON_MODULE(_SireGemmi) { bp::def("sire_to_gemmi", diff --git a/wrapper/Convert/SireGemmi/sire_gemmi.cpp b/wrapper/Convert/SireGemmi/sire_gemmi.cpp index c67e9f9a5..e2c2eb89a 100644 --- a/wrapper/Convert/SireGemmi/sire_gemmi.cpp +++ b/wrapper/Convert/SireGemmi/sire_gemmi.cpp @@ -6,6 +6,7 @@ #include "gemmi/polyheur.hpp" #include "gemmi/to_cif.hpp" #include "gemmi/to_mmcif.hpp" +#include "gemmi/mmcif.hpp" #include "SireIO/pdbx.h" diff --git a/wrapper/Convert/SireGemmi/sire_gemmi.h b/wrapper/Convert/SireGemmi/sire_gemmi.h index 99888b775..c1eb80e17 100644 --- a/wrapper/Convert/SireGemmi/sire_gemmi.h +++ b/wrapper/Convert/SireGemmi/sire_gemmi.h @@ -1,7 +1,10 @@ #ifndef SIRE_GEMMI_H #define SIRE_GEMMI_H -#include "gemmi/mmcif.hpp" +namespace gemmi +{ + struct Structure; +} #include "SireSystem/system.h" diff --git a/wrapper/Convert/SireOpenMM/lambdalever.cpp b/wrapper/Convert/SireOpenMM/lambdalever.cpp index a3cfccff6..774749f72 100644 --- a/wrapper/Convert/SireOpenMM/lambdalever.cpp +++ b/wrapper/Convert/SireOpenMM/lambdalever.cpp @@ -375,9 +375,18 @@ double LambdaLever::setLambda(OpenMM::Context &context, std::vector custom_params = {0.0, 0.0, 0.0, 0.0}; - // record the range of indicies of the atoms which change + // record the range of indicies of the atoms, bonds, angles, + // torsions which change int start_change_atom = -1; int end_change_atom = -1; + int start_change_14 = -1; + int end_change_14 = -1; + int start_change_bond = -1; + int end_change_bond = -1; + int start_change_angle = -1; + int end_change_angle = -1; + int start_change_torsion = -1; + int end_change_torsion = -1; // change the parameters for all of the perturbable molecules for (int i = 0; i < this->perturbable_mols.count(); ++i) @@ -571,6 +580,20 @@ double LambdaLever::setLambda(OpenMM::Context &context, {std::get<2>(p), std::get<3>(p), 4.0 * std::get<4>(p), std::get<5>(p)}; + if (start_change_14 == -1) + { + start_change_14 = nbidx; + end_change_14 = nbidx + 1; + } + else + { + if (nbidx < start_change_14) + start_change_14 = nbidx; + + if (nbidx + 1 > end_change_14) + end_change_14 = nbidx + 1; + } + ghost_14ff->setBondParameters(nbidx, std::get<0>(p), std::get<1>(p), @@ -596,6 +619,21 @@ double LambdaLever::setLambda(OpenMM::Context &context, { const int nparams = morphed_bond_k.count(); + if (start_change_bond == -1) + { + start_change_bond = start_index; + end_change_bond = start_index + nparams; + } + else if (start_index < start_change_bond) + { + start_change_bond = start_index; + } + + if (start_index + nparams > end_change_bond) + { + end_change_bond = start_index + nparams; + } + for (int j = 0; j < nparams; ++j) { const int index = start_index + j; @@ -618,6 +656,21 @@ double LambdaLever::setLambda(OpenMM::Context &context, { const int nparams = morphed_angle_k.count(); + if (start_change_angle == -1) + { + start_change_angle = start_index; + end_change_angle = start_index + nparams; + } + else if (start_index < start_change_angle) + { + start_change_angle = start_index; + } + + if (start_index + nparams > end_change_angle) + { + end_change_angle = start_index + nparams; + } + for (int j = 0; j < nparams; ++j) { const int index = start_index + j; @@ -642,6 +695,21 @@ double LambdaLever::setLambda(OpenMM::Context &context, { const int nparams = morphed_torsion_k.count(); + if (start_change_torsion == -1) + { + start_change_torsion = start_index; + end_change_torsion = start_index + nparams; + } + else if (start_index < start_change_torsion) + { + start_change_torsion = start_index; + } + + if (start_index + nparams > end_change_torsion) + { + end_change_torsion = start_index + nparams; + } + for (int j = 0; j < nparams; ++j) { const int index = start_index + j; @@ -666,34 +734,63 @@ double LambdaLever::setLambda(OpenMM::Context &context, } // update the parameters in the context - if (cljff) - cljff->updateParametersInContext(context); + const auto num_changed_atoms = end_change_atom - start_change_atom; + const auto num_changed_bonds = end_change_bond - start_change_bond; + const auto num_changed_angles = end_change_angle - start_change_angle; + const auto num_changed_torsions = end_change_torsion - start_change_torsion; + const auto num_changed_14 = end_change_14 - start_change_14; - if (ghost_ghostff) + if (num_changed_atoms > 0) + { + if (cljff) #ifdef SIRE_HAS_UPDATE_SOME_IN_CONTEXT - ghost_ghostff->updateSomeParametersInContext(start_change_atom, end_change_atom - start_change_atom, context); + cljff->updateSomeParametersInContext(start_change_atom, num_changed_atoms, context); #else - ghost_ghostff->updateParametersInContext(context); + cljff->updateParametersInContext(context); #endif - if (ghost_nonghostff) + if (ghost_ghostff) #ifdef SIRE_HAS_UPDATE_SOME_IN_CONTEXT - ghost_nonghostff->updateSomeParametersInContext(start_change_atom, end_change_atom - start_change_atom, context); + ghost_ghostff->updateSomeParametersInContext(start_change_atom, num_changed_atoms, context); #else - ghost_nonghostff->updateParametersInContext(context); + ghost_ghostff->updateParametersInContext(context); #endif - if (ghost_14ff) + if (ghost_nonghostff) +#ifdef SIRE_HAS_UPDATE_SOME_IN_CONTEXT + ghost_nonghostff->updateSomeParametersInContext(start_change_atom, num_changed_atoms, context); +#else + ghost_nonghostff->updateParametersInContext(context); +#endif + } + + if (ghost_14ff and num_changed_14 > 0) +#ifdef SIRE_HAS_UPDATE_SOME_IN_CONTEXT + ghost_14ff->updateSomeParametersInContext(start_change_14, num_changed_14, context); +#else ghost_14ff->updateParametersInContext(context); +#endif - if (bondff) + if (bondff and num_changed_bonds > 0) +#ifdef SIRE_HAS_UPDATE_SOME_IN_CONTEXT + bondff->updateSomeParametersInContext(start_change_bond, num_changed_bonds, context); +#else bondff->updateParametersInContext(context); +#endif - if (angff) + if (angff and num_changed_angles > 0) +#ifdef SIRE_HAS_UPDATE_SOME_IN_CONTEXT + angff->updateSomeParametersInContext(start_change_angle, num_changed_angles, context); +#else angff->updateParametersInContext(context); +#endif - if (dihff) + if (dihff and num_changed_torsions > 0) +#ifdef SIRE_HAS_UPDATE_SOME_IN_CONTEXT + dihff->updateSomeParametersInContext(start_change_torsion, num_changed_torsions, context); +#else dihff->updateParametersInContext(context); +#endif // now update any restraints that are scaled for (const auto &restraint : this->name_to_restraintidx.keys()) diff --git a/wrapper/Convert/__init__.py b/wrapper/Convert/__init__.py index e49865b76..2385c329b 100644 --- a/wrapper/Convert/__init__.py +++ b/wrapper/Convert/__init__.py @@ -39,7 +39,7 @@ def _no_rdkit(): print(_rdkit_import_error) raise ModuleNotFoundError( "Unable to convert to/from RDKit as it is not installed. " - "Please install using `mamba install -c conda-forge rdkit` " + "Please install using `conda install -c conda-forge rdkit` " "and then re-run this script." ) @@ -497,7 +497,7 @@ def _no_gemmi(): print(_gemmi_import_error) raise ModuleNotFoundError( "Unable to convert to/from Gemmi as it is not installed. " - "Please install using `mamba install -c conda-forge gemmi` " + "Please install using `conda install -c conda-forge gemmi` " "and then re-run this script." )