diff --git a/CMakeLists.txt b/CMakeLists.txt
index b2b0bdc2c9c..c95d9f8ffe0 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -90,6 +90,7 @@ option(ESPRESSO_BUILD_TESTS "Enable tests" ON)
option(ESPRESSO_BUILD_WITH_SCAFACOS "Build with ScaFaCoS support" OFF)
option(ESPRESSO_BUILD_WITH_STOKESIAN_DYNAMICS "Build with Stokesian Dynamics"
OFF)
+option(ESPRESSO_BUILD_WITH_METATENSOR "Build with Metatensor support" OFF)
option(ESPRESSO_BUILD_WITH_WALBERLA
"Build with waLBerla lattice-Boltzmann support" OFF)
option(ESPRESSO_BUILD_WITH_WALBERLA_AVX
@@ -923,6 +924,138 @@ if(ESPRESSO_BUILD_BENCHMARKS)
add_subdirectory(maintainer/benchmarks)
endif()
+#
+# Metatensor
+#
+
+if(ESPRESSO_BUILD_WITH_METATENSOR)
+ if (BUILD_OMP AND APPLE)
+ message(FATAL_ERROR
+ "Can not enable both BUILD_OMP and PGK_ML-METATOMIC on Apple systems, "
+ "since this results in two different versions of the OpenMP library (one "
+ "from the system and one from Torch) being linked to the final "
+ "executable, which then crashes"
+ )
+ endif()
+
+ # Bring the `torch` target in scope to allow evaluation of cmake generator
+ # expression from `metatensor_torch`
+ find_package(Torch REQUIRED)
+
+ # # cmake-format: off set(METATENSOR_URL_BASE
+ # "https://github.com/lab-cosmo/metatensor/releases/download")
+ # set(METATENSOR_CORE_VERSION "0.1.8") set(METATENSOR_TORCH_VERSION "0.5.3")
+ #
+ # include(FetchContent) set(BUILD_SHARED_LIBS on CACHE BOOL "")
+ # FetchContent_Declare( metatensor URL
+ # "${METATENSOR_URL_BASE}/metatensor-core-v${METATENSOR_CORE_VERSION}/metatensor-core-cxx-${METATENSOR_CORE_VERSION}.tar.gz"
+ # URL_HASH SHA1=3ed389770e5ec6dbb8cbc9ed88f84d6809b552ef )
+ # set(BUILD_SHARED_LIBS on CACHE BOOL "")
+ #
+ # # workaround for https://gitlab.kitware.com/cmake/cmake/-/issues/21146
+ # if(NOT DEFINED metatensor_SOURCE_DIR OR NOT EXISTS
+ # "${metatensor_SOURCE_DIR}") message(STATUS "Fetching metatensor
+ # v${METATENSOR_CORE_VERSION} from github") FetchContent_Populate(metatensor)
+ # endif() set(BUILD_SHARED_LIBS on CACHE BOOL "")
+ #
+ # FetchContent_Declare( metatensor_torch URL
+ # "${METATENSOR_URL_BASE}/metatensor-torch-v${METATENSOR_TORCH_VERSION}/metatensor-torch-cxx-${METATENSOR_TORCH_VERSION}.tar.gz"
+ # ) set(BUILD_SHARED_LIBS on CACHE BOOL "") if(NOT DEFINED
+ # metatensor_torch_SOURCE_DIR OR NOT EXISTS "${metatensor_torch_SOURCE_DIR}")
+ # message(STATUS "Fetching metatensor torch v${METATENSOR_CORE_VERSION} from
+ # github") FetchContent_Populate(metatensor_torch) endif() # cmake-format: on
+ # set(BUILD_SHARED_LIBS on CACHE BOOL "")
+ #
+ # set(METATENSOR_INSTALL_BOTH_STATIC_SHARED on CACHE BOOL "")
+ # add_subdirectory("${metatensor_SOURCE_DIR}")
+ # add_subdirectory("${metatensor_torch_SOURCE_DIR}")
+
+ # The caffe2::mkl target contains MKL_INCLUDE_DIR in it's
+ # INTERFACE_INCLUDE_DIRECTORIES even if MKL was not found, causing a build
+ # failure with "Imported target "torch" includes non-existent path" down the
+ # line. This code removes the missing path from INTERFACE_INCLUDE_DIRECTORIES,
+ # allowing the build to continue further.
+ # if (TARGET caffe2::mkl)
+ # get_target_property(CAFFE2_MKL_INCLUDE_DIRECTORIES caffe2::mkl INTERFACE_INCLUDE_DIRECTORIES)
+ # set(MKL_INCLUDE_DIR_NOTFOUND "")
+ # foreach(_include_dir_ ${CAFFE2_MKL_INCLUDE_DIRECTORIES})
+ # if ("${_include_dir_}" MATCHES "MKL_INCLUDE_DIR-NOTFOUND")
+ # set(MKL_INCLUDE_DIR_NOTFOUND "${_include_dir_}")
+ # endif()
+ # endforeach()
+ #
+ # if (NOT "${MKL_INCLUDE_DIR_NOTFOUND}" STREQUAL "")
+ # list(REMOVE_ITEM CAFFE2_MKL_INCLUDE_DIRECTORIES "${MKL_INCLUDE_DIR_NOTFOUND}")
+ # endif()
+ # set_target_properties(caffe2::mkl PROPERTIES
+ # INTERFACE_INCLUDE_DIRECTORIES "${CAFFE2_MKL_INCLUDE_DIRECTORIES}"
+ # )
+ # endif()
+
+ set(METATENSOR_CORE_VERSION "0.1.17")
+ set(METATENSOR_CORE_SHA256 "42119e11908239915ccc187d7ca65449b461f1d4b5af4d6df1fb613d687da76a")
+
+ set(METATENSOR_TORCH_VERSION "0.8.1")
+ set(METATENSOR_TORCH_SHA256 "9da124e8e09dc1859700723a76ff29aef7a216b84a19d38746cc45bf45bc599b")
+
+ set(METATOMIC_TORCH_VERSION "0.1.5")
+ set(METATOMIC_TORCH_SHA256 "8ecd1587797fe1cf6b2162ddc10cc84c558fdfd55ab225bc5de4fe15ace8fc3d")
+
+ set(DOWNLOAD_METATENSOR_DEFAULT ON)
+ find_package(metatensor_torch QUIET ${METATENSOR_TORCH_VERSION})
+ if (metatensor_torch_FOUND)
+ set(DOWNLOAD_METATENSOR_DEFAULT OFF)
+ endif()
+
+ set(DOWNLOAD_METATOMIC_DEFAULT ON)
+ find_package(metatomic_torch QUIET ${METATOMIC_TORCH_VERSION})
+ if (metatomic_torch_FOUND)
+ set(DOWNLOAD_METATOMIC_DEFAULT OFF)
+ endif()
+
+ option(DOWNLOAD_METATENSOR "Download metatensor package instead of using an already installed one" ${DOWNLOAD_METATENSOR_DEFAULT})
+ option(DOWNLOAD_METATOMIC "Download metatomic package instead of using an already installed one" ${DOWNLOAD_METATOMIC_DEFAULT})
+ if (DOWNLOAD_METATENSOR)
+ include(FetchContent)
+
+ set(URL_BASE "https://github.com/metatensor/metatensor/releases/download")
+ FetchContent_Declare(metatensor
+ URL ${URL_BASE}/metatensor-core-v${METATENSOR_CORE_VERSION}/metatensor-core-cxx-${METATENSOR_CORE_VERSION}.tar.gz
+ URL_HASH SHA256=${METATENSOR_CORE_SHA256}
+ )
+
+ message(STATUS "Fetching metatensor v${METATENSOR_CORE_VERSION} from github")
+ FetchContent_MakeAvailable(metatensor)
+
+ FetchContent_Declare(metatensor-torch
+ URL ${URL_BASE}/metatensor-torch-v${METATENSOR_TORCH_VERSION}/metatensor-torch-cxx-${METATENSOR_TORCH_VERSION}.tar.gz
+ URL_HASH SHA256=${METATENSOR_TORCH_SHA256}
+ )
+
+ message(STATUS "Fetching metatensor-torch v${METATENSOR_TORCH_VERSION} from github")
+ FetchContent_MakeAvailable(metatensor-torch)
+ else()
+ # make sure to fail the configuration if cmake can not find metatensor-torch
+ find_package(metatensor_torch REQUIRED ${METATENSOR_TORCH_VERSION})
+ endif()
+
+ if (DOWNLOAD_METATOMIC)
+ include(FetchContent)
+
+ set(URL_BASE "https://github.com/metatensor/metatomic/releases/download")
+ FetchContent_Declare(metatomic-torch
+ URL ${URL_BASE}/metatomic-torch-v${METATOMIC_TORCH_VERSION}/metatomic-torch-cxx-${METATOMIC_TORCH_VERSION}.tar.gz
+ URL_HASH SHA256=${METATOMIC_TORCH_SHA256}
+ )
+
+ message(STATUS "Fetching metatomic-torch v${METATOMIC_TORCH_VERSION} from github")
+ FetchContent_MakeAvailable(metatomic-torch)
+ else()
+ # make sure to fail the configuration if cmake can not find metatomic-torch
+ find_package(metatomic_torch REQUIRED ${METATOMIC_TORCH_VERSION})
+ endif()
+endif()
+
#
# waLBerla
#
diff --git a/build_script.sh b/build_script.sh
new file mode 100644
index 00000000000..7fa035d7017
--- /dev/null
+++ b/build_script.sh
@@ -0,0 +1,32 @@
+export PATH=/usr/local/cuda-12.8/bin:$PATH
+export LD_LIBRARY_PATH=/usr/local/cuda-12.8/lib64:$LD_LIBRARY_PATH
+export CUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda-12.8
+
+ESPRESSO_DIR=/tikhome/weeber/es
+BUILD_DIR=/tikhome/weeber/es/build_mt
+VENV_DIR=/work/weeber/es_mt-env
+
+rm -rf ${BUILD_DIR}
+mkdir -p ${BUILD_DIR}
+cd ${BUILD_DIR}
+
+source ${VENV_DIR}/bin/activate
+
+export TORCH_PREFIX=$(python -c "import torch; print(torch.utils.cmake_prefix_path)")
+export MTS_PREFIX=$(python -c "import metatensor; print(metatensor.utils.cmake_prefix_path)")
+export MTS_TORCH_PREFIX=$(python -c "import metatensor.torch; print(metatensor.torch.utils.cmake_prefix_path)")
+export MTA_TORCH_PREFIX=$(python -c "import metatomic.torch; print(metatomic.torch.utils.cmake_prefix_path)")
+export CMAKE_PREFIX_PATH="$TORCH_PREFIX;$MTS_PREFIX;$MTS_TORCH_PREFIX;$MTA_TORCH_PREFIX"
+
+cd ${BUILD_DIR}
+cmake ../ \
+ -D CMAKE_BUILD_TYPE=Debug \
+ -D ESPRESSO_BUILD_WITH_CUDA=OFF \
+ -D ESPRESSO_BUILD_WITH_CCACHE=OFF \
+ -D ESPRESSO_BUILD_WITH_WALBERLA=OFF \
+ -D ESPRESSO_BUILD_WITH_WALBERLA_AVX=OFF \
+ -D ESPRESSO_BUILD_WITH_GSL=OFF \
+ -D ESPRESSO_BUILD_WITH_METATENSOR=ON \
+ -D CMAKE_PREFIX_PATH="$CMAKE_PREFIX_PATH"
+
+make -j$(nproc)
diff --git a/cmake/espresso_cmake_config.cmakein b/cmake/espresso_cmake_config.cmakein
index 1d2acab8acb..b4ae031313b 100644
--- a/cmake/espresso_cmake_config.cmakein
+++ b/cmake/espresso_cmake_config.cmakein
@@ -13,6 +13,8 @@
#cmakedefine ESPRESSO_BUILD_WITH_STOKESIAN_DYNAMICS
+#cmakedefine ESPRESSO_BUILD_WITH_METATENSOR
+
#cmakedefine ESPRESSO_BUILD_WITH_WALBERLA
#cmakedefine ESPRESSO_BUILD_WITH_VALGRIND
diff --git a/lennard-jones.pt b/lennard-jones.pt
new file mode 100644
index 00000000000..760ebca085b
Binary files /dev/null and b/lennard-jones.pt differ
diff --git a/src/config/features.def b/src/config/features.def
index 5969e87d231..681e4d91a42 100644
--- a/src/config/features.def
+++ b/src/config/features.def
@@ -112,6 +112,7 @@ HDF5 external
SCAFACOS external
GSL external
STOKESIAN_DYNAMICS external
+METATENSOR external
WALBERLA external
VALGRIND external
CALIPER external
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index 4a98e3150e3..0a400604397 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -74,6 +74,12 @@ if(ESPRESSO_BUILD_WITH_CUDA)
"${CABANA_CUDA_CLANG_TIDY}")
endif()
endif()
+if(ESPRESSO_BUILD_WITH_METATENSOR)
+ target_link_libraries(espresso_core PUBLIC "${TORCH_LIBRARIES}")
+ target_link_libraries(espresso_core PUBLIC metatensor::shared)
+ target_link_libraries(espresso_core PUBLIC metatomic_torch)
+ target_link_libraries(espresso_core PUBLIC metatensor_torch)
+endif()
install(TARGETS espresso_core
LIBRARY DESTINATION ${ESPRESSO_INSTALL_PYTHON}/espressomd)
@@ -115,6 +121,7 @@ add_subdirectory(immersed_boundary)
add_subdirectory(integrators)
add_subdirectory(io)
add_subdirectory(lb)
+add_subdirectory(ml_metatensor)
add_subdirectory(magnetostatics)
add_subdirectory(nonbonded_interactions)
add_subdirectory(object-in-fluid)
diff --git a/src/core/ml_metatensor/CMakeLists.txt b/src/core/ml_metatensor/CMakeLists.txt
new file mode 100644
index 00000000000..1759a21e246
--- /dev/null
+++ b/src/core/ml_metatensor/CMakeLists.txt
@@ -0,0 +1,23 @@
+#
+# Copyright (C) 2018-2022 The ESPResSo project
+#
+# This file is part of ESPResSo.
+#
+# ESPResSo is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# ESPResSo is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+
+if(ESPRESSO_BUILD_WITH_METATENSOR)
+ target_sources(espresso_core PRIVATE model.cpp stub.cpp)
+ # target_sources(espresso_core PRIVATE load_model.cpp)
+endif()
diff --git a/src/core/ml_metatensor/add_neighbor_list.hpp b/src/core/ml_metatensor/add_neighbor_list.hpp
new file mode 100644
index 00000000000..2790ec90901
--- /dev/null
+++ b/src/core/ml_metatensor/add_neighbor_list.hpp
@@ -0,0 +1,71 @@
+#include "metatensor/torch/atomistic/system.hpp"
+#include "utils/Vector.hpp"
+#include
+
+struct PairInfo {
+ int part_id_1;
+ int part_id_2;
+ Utils::Vector3d distance;
+};
+
+using Sample = std::array;
+using Distances = std::variant>,
+ std::vector>>;
+
+template
+metatensor_torch::TorchTensorBlock
+neighbor_list_from_pairs(const metatensor_torch::System &system,
+ const PairIterable &pairs) {
+ auto dtype = system->positions().scalar_type();
+ auto device = system->positions().device();
+ std::vector samples;
+ Distances distances;
+
+ if (dtype == torch::kFloat64) {
+ distances = {std::vector>()};
+ } else if (dtype == torch::kFloat32) {
+ distances = {std::vector>()};
+ } else {
+ throw std::runtime_error("Unsupported floating point data type");
+ }
+
+ for (auto const &pair : pairs) {
+ samples.emplace_back(pair.particle_id_1, pair.particle_id_2, 0, 0, 0);
+ std::visit([&pair](auto &vec) { vec.push_back(pair.distance); }, distances);
+ }
+
+ auto n_pairs = static_cast(samples.size());
+
+ auto samples_tensor = torch::from_blob(
+ samples.data(), {n_pairs, 5},
+ torch::TensorOptions().dtype(torch::kInt32).device(torch::kCPU));
+
+ auto samples_ptr = torch::make_intrusive(
+ std::vector{"first_atom", "second_atom", "cell_shift_a",
+ "cell_shift_b", "cell_shift_c"},
+ samples);
+
+ auto distances_vectors = torch::from_blob(
+ std::visit([](auto &vec) { return vec.data(); }, distances),
+ {n_pairs, 3, 1}, torch::TensorOptions().dtype(dtype).device(torch::kCPU));
+
+ auto neighbors = torch::make_intrusive(
+ distances_vectors.to(dtype).to(device), samples_ptr->to(device),
+ std::vector{
+ metatensor_torch::LabelsHolder::create({"xyz"}, {{0}, {1}, {2}})
+ ->to(device),
+ },
+ metatensor_torch::LabelsHolder::create({"distance"}, {{0}})->to(device));
+
+ return neighbors;
+}
+
+void add_neighbor_list_to_system(
+ metatensor_torch::System &system,
+ const metatensor_torch::TorchTensorBlock &neighbors,
+ const metatensor_torch::NeighborListOptions &options,
+ bool check_consistency) {
+ metatensor_torch::register_autograd_neighbors(system, neighbors,
+ check_consistency);
+ system->add_neighbor_list(options, neighbors);
+}
diff --git a/src/core/ml_metatensor/compute.hpp b/src/core/ml_metatensor/compute.hpp
new file mode 100644
index 00000000000..2735dc9c9e3
--- /dev/null
+++ b/src/core/ml_metatensor/compute.hpp
@@ -0,0 +1,83 @@
+#include "metatensor/torch/atomistic/system.hpp"
+#include
+#include
+
+metatensor_torch::TensorMapHolder
+run_model(metatensor_torch::System &system, int64_t n_particles,
+ const metatensor_torch::ModelEvaluationOptions evaluation_options,
+ torch::Dtype dtype, torch::Device device, bool check_consistency) {
+
+
+ // only run the calculation for atoms actually in the current domain
+ auto options = torch::TensorOptions().dtype(torch::kInt32);
+ auto selected_atoms_values = torch::zeros({n_particles, 2}, options);
+
+ for (int i = 0; i < n_particles; i++) {
+ selected_atoms_values[i][0] = 0;
+ selected_atoms_values[i][1] = i;
+ }
+ auto selected_atoms = torch::make_intrusive(
+ std::vector{"system", "atom"}, selected_atoms_values);
+ evaluation_options->set_selected_atoms(selected_atoms->to(device));
+
+ torch::IValue result_ivalue;
+ model.forward({std::vector{system},
+ evaluation_options, check_consistency});
+
+ auto result = result_ivalue.toGenericDict();
+ auto energy =
+ result.at("energy").toCustomClass();
+ auto energy_tensor =
+ metatensor_torch::TensorMapHolder::block_by_id(energy, 0);
+}
+
+double get_energy(metatensor_torch::TensorMapHolder &energy,
+ bool energy_is_per_atom) {
+ auto energy_block = metatensor_torch::TensorMapHolder::block_by_id(energy, 0);
+ auto energy_tensor = energy_block->values();
+ auto energy_detached =
+ energy_tensor.detach().to(torch::kCPU).to(torch::kFloat64);
+ auto energy_samples = energy_block->samples();
+
+ // store the energy returned by the model
+ torch::Tensor global_energy;
+ if (energy_is_per_atom) {
+ assert(energy_samples->size() == 2);
+ assert(energy_samples->names()[0] == "system");
+ assert(energy_samples->names()[1] == "atom");
+
+ auto samples_values = energy_samples->values().to(torch::kCPU);
+ auto samples = samples_values.accessor();
+
+ // int n_atoms = selected_atoms_values.sizes();
+ // assert(samples_values.sizes() == selected_atoms_values.sizes());
+
+ auto energies = energy_detached.accessor();
+ global_energy = energy_detached.sum(0);
+ assert(energy_detached.sizes() == std::vector({1}));
+ } else {
+ assert(energy_samples->size() == 1);
+ assert(energy_samples->names()[0] == "system");
+
+ assert(energy_detached.sizes() == std::vector({1, 1}));
+ global_energy = energy_detached.reshape({1});
+ }
+
+ return global_energy.item();
+}
+
+torch::Tensor get_forces(metatensor::TensorMap &energy,
+ metatensor_torch::System &system) {
+ // reset gradients to zero before calling backward
+ system->positions().mutable_grad() = torch::Tensor();
+
+ auto energy_block = metatensor_torch::TensorMapHolder::block_by_id(energy, 0);
+ auto energy_tensor = energy_block->values();
+
+ // compute forces/virial with backward propagation
+ energy_tensor.backward(-torch::ones_like(energy_tensor));
+ auto forces_tensor = system->positions().grad();
+ assert(forces_tensor.is_cpu() &&
+ forces_tensor.scalar_type() == torch::kFloat64);
+ return forces_tensor;
+}
diff --git a/src/core/ml_metatensor/model.cpp b/src/core/ml_metatensor/model.cpp
new file mode 100644
index 00000000000..53e159d2438
--- /dev/null
+++ b/src/core/ml_metatensor/model.cpp
@@ -0,0 +1,32 @@
+#include "config/config.hpp"
+
+#ifdef ESPRESSO_METATENSOR
+#include "model.hpp"
+#include
+
+MetatensorModel::MetatensorModel(const std::string &path) {
+ this->model = std::make_unique(
+ metatomic_torch::load_atomistic_model(
+ path)); // TODO: Capture c10::Error and add custom exception?
+
+ auto capabilities_ivalue = this->model->run_method("capabilities");
+ this->capabilities =
+ capabilities_ivalue
+ .toCustomClass();
+
+ if (!this->capabilities->outputs().contains("energy")) {
+ throw std::runtime_error("Metatensor model at " + path +
+ " does not have energy as output!");
+ }
+}
+
+std::string MetatensorModel::print_metadata() {
+ if (!this->model) {
+ throw std::runtime_error("No model loaded!");
+ }
+ auto metadata_ivalue = this->model->run_method("metadata");
+ auto metadata =
+ metadata_ivalue.toCustomClass();
+ return metadata->print();
+}
+#endif
diff --git a/src/core/ml_metatensor/model.hpp b/src/core/ml_metatensor/model.hpp
new file mode 100644
index 00000000000..6884f60b12e
--- /dev/null
+++ b/src/core/ml_metatensor/model.hpp
@@ -0,0 +1,18 @@
+#include "config/config.hpp"
+
+#ifdef ESPRESSO_METATENSOR
+#include
+#include
+#include
+
+#include
+
+struct MetatensorModel {
+ MetatensorModel(const std::string &path);
+ std::string print_metadata();
+
+ std::unique_ptr model;
+ metatomic_torch::ModelCapabilities capabilities;
+ metatomic_torch::ModelEvaluationOptions evaluation_options;
+};
+#endif
diff --git a/src/core/ml_metatensor/sketches_espresso_bindings/add_neighbor_list.hpp b/src/core/ml_metatensor/sketches_espresso_bindings/add_neighbor_list.hpp
new file mode 100644
index 00000000000..ceb18cf1eea
--- /dev/null
+++ b/src/core/ml_metatensor/sketches_espresso_bindings/add_neighbor_list.hpp
@@ -0,0 +1,71 @@
+struct PairInfo {
+ int part_id_1,
+ int part_id_2,
+ Utils::Vector3d distance;
+}
+
+using Sample = std::array;
+using Distances =
+ std::variant>, std::vector>>;
+
+
+template
+TorchTensorBlock neighbor_list_from_pairs(const metatensor_torch::System& system, const PairIterable& pairs) {
+ auto dtype = system->positions().scalar_type();
+ auto device = system->positions().device();
+ std::vector samples;
+ Distances distances;
+ if (dtype == torch::kFloat64) {
+ distances = {std::vector>()};
+ }
+ else if (dtype == torch::kFloat32) {
+ distances = {std::vector>()};
+ }
+ else {
+ throw std::runtime_error("Unsupported floating poitn data type");
+ }
+
+ for (auto const& pair: pairs) {
+ auto sample = Sample{
+ pair.particle_id_1, pair.particle_id_2, 0, 0, 0};
+ samples.push_back(sample);
+ (*distances).push_back(pair.distance);
+ }
+
+
+ int64_t n_pairs = samples.size();
+ auto samples_tensor = torch::from_blob(
+ reinterpret_cast(samples.data()),
+ {n_pairs, 5},
+ torch::TensorOptions().dtype(torch::kInt32).device(torch::kCPU)
+ );
+
+ auto samples = torch::make_intrusive(
+ std::vector{"first_atom", "second_atom", "cell_shift_a", "cell_shift_b", "cell_shift_c"},
+ samples_values
+ );
+
+ distances_vectors = torch::from_blob(
+ (*distances).data(),
+ {n_pairs, 3, 1},
+ torch::TensorOptions().dtype(dtype).device(torch::kCPU)
+ );
+ return neighbors = torch::make_intrusive(
+ distances_vectors.to(dtype).to(device),
+ samples->to(device),
+ std::vector{
+ metatensor_torch::LabelsHolder::create({"xyz"}, {{0}, {1}, {2}})->to(device),
+ },
+ metatensor_torch::LabelsHolder::create({"distance"}, {{0}})->to(device)
+ );
+
+}
+
+void add_neighbor_list_to_system(MetatensorTorch::system& system,
+ const TorchTensorBlock& neighbors,
+ const NeighborListOptions& options) {
+ metatensor_torch::register_autograd_neighbors(system, neighbors, options_.check_consistency);
+ system->add_neighbor_list(options, neighbors);
+}
+
+
diff --git a/src/core/ml_metatensor/sketches_espresso_bindings/compute.hpp b/src/core/ml_metatensor/sketches_espresso_bindings/compute.hpp
new file mode 100644
index 00000000000..bea9df4e5be
--- /dev/null
+++ b/src/core/ml_metatensor/sketches_espresso_bindings/compute.hpp
@@ -0,0 +1,80 @@
+
+torch_metatensor::TensorMapHolder run_model(metatensor_toch::System& system,
+ const metatensor_torch::ModelEvaluationOptions evaluation_options,
+ torch::dtype dtypem
+ torch::Device device) {
+
+
+ // only run the calculation for atoms actually in the current domain
+ auto options = torch::TensorOptions().dtype(torch::kInt32);
+ this->selected_atoms_values = torch::zeros({n_particles, 2}, options);
+ for (int i=0; i(
+ std::vector{"system", "atom"}, mts_data->selected_atoms_values
+ );
+ evaluation_options->set_selected_atoms(selected_atoms->to(device));
+
+ torch::IValue result_ivalue;
+ model->forward({
+ std::vector{system},
+ evaluation_options,
+ check_consistency
+ });
+
+ auto result = result_ivalue.toGenericDict();
+ return result.at("energy").toCustomClass();
+}
+
+double get_energy(torch_metatensor::TensorMapHolder& energy, bool energy_is_per_atom) {
+ auto energy_block = metatensor_torch::TensorMapHolder::block_by_id(energy, 0);
+ auto energy_tensor = energy_block->values();
+ auto energy_detached = energy_tensor.detach().to(torch::kCPU).to(torch::kFloat64);
+ auto energy_samples = energy_block->samples();
+
+ // store the energy returned by the model
+ torch::Tensor global_energy;
+ if (energy_is_per_atom) {
+ assert(energy_samples->size() == 2);
+ assert(energy_samples->names()[0] == "system");
+ assert(energy_samples->names()[1] == "atom");
+
+ auto samples_values = energy_samples->values().to(torch::kCPU);
+ auto samples = samples_values.accessor();
+
+// int n_atoms = selected_atoms_values.sizes();
+// assert(samples_values.sizes() == selected_atoms_values.sizes());
+
+ auto energies = energy_detached.accessor();
+ global_energy = energy_detached.sum(0);
+ assert(energy_detached.sizes() == std::vector({1}));
+ } else {
+ assert(energy_samples->size() == 1);
+ assert(energy_samples->names()[0] == "system");
+
+ assert(energy_detached.sizes() == std::vector({1, 1}));
+ global_energy = energy_detached.reshape({1});
+ }
+
+ return global_energy.item();
+}
+
+
+torch::Tensor get_forces(torch_metatensor::TensorMap& energy, torch_metatensor::System& system) {
+ // reset gradients to zero before calling backward
+ system->positions.mutable_grad() = torch::Tensor();
+
+ auto energy_block = metatensor_torch::TensorMapHolder::block_by_id(energy, 0);
+ auto energy_tensor = energy_block->values();
+
+ // compute forces/virial with backward propagation
+ energy_tensor.backward(-torch::ones_like(energy_tensor));
+ auto forces_tensor = sytem->positions.grad();
+ assert(forces_tensor.is_cpu() && forces_tensor.scalar_type() == torch::kFloat64);
+ return forces_tensor;
+}
+
+
+
diff --git a/src/core/ml_metatensor/sketches_espresso_bindings/stub.cpp b/src/core/ml_metatensor/sketches_espresso_bindings/stub.cpp
new file mode 100644
index 00000000000..806d0ec8a31
--- /dev/null
+++ b/src/core/ml_metatensor/sketches_espresso_bindings/stub.cpp
@@ -0,0 +1,15 @@
+#include "config/config.hpp"
+
+#ifdef METATENSOR
+#undef CUDA
+#include
+#include
+#include
+
+#if TORCH_VERSION_MAJOR >= 2
+ #include
+#endif
+
+#include
+#include
+#endif
diff --git a/src/core/ml_metatensor/sketches_espresso_bindings/stub.hpp b/src/core/ml_metatensor/sketches_espresso_bindings/stub.hpp
new file mode 100644
index 00000000000..e6828ec9c52
--- /dev/null
+++ b/src/core/ml_metatensor/sketches_espresso_bindings/stub.hpp
@@ -0,0 +1,9 @@
+#include "config/config.hpp"
+
+#ifdef METATENSOR
+#undef CUDA
+
+#include
+#include
+#include
+#endif
diff --git a/src/core/ml_metatensor/sketches_espresso_bindings/system.hpp b/src/core/ml_metatensor/sketches_espresso_bindings/system.hpp
new file mode 100644
index 00000000000..d9a5dac3594
--- /dev/null
+++ b/src/core/ml_metatensor/sketches_espresso_bindings/system.hpp
@@ -0,0 +1,40 @@
+using ParticleTypeMap = std::unorderd_map;
+
+metatensor_torch::System
+ : system_from_lmp(const TypeMapping &type_map,
+ const std::vector &engine_positions,
+ const std::vector &engine_particle_types,
+ const Vector3d &box_size, bool do_virial,
+ torch::ScalarType dtype, torch::Device device) {
+ auto tensor_options =
+ torch::TensorOptions().dtype(torch::kFloat64).device(torch::kCPU);
+ if (engine_positions % 3 != 0)
+ throw std::runtime_error(
+ "Positoin array must have a multiple of 3 elements");
+ const int n_particles = engine_positions.size() / 3;
+ if (engine_particle_types.size() != n_particles)
+ throw std::runtime_error(
+ "Length of positon and particle tyep arrays inconsistent");
+
+ auto positions = torch::from_blob(
+ engien_positions.data(), {n_particles, 3},
+ // requires_grad=true since we always need gradients w.r.t. positions
+ tensor_options.requires_grad(true));
+ std::vector particle_types_ml;
+ std::ranges::transform(
+ particle_types_engine, std::back_inserter(particle_types_ml),
+ [&type_map](int engine_type) { return type_map.at(engine_type); });
+
+ auto particle_types_ml_tensor =
+ Torch::Tensor(particle_types_ml, tensor_options.requires_grad(true));
+
+ auto cell = torch::zeros({3, 3}, tensor_options);
+ for (int i : {0, 1, 2})
+ cell[i][i] = box_size[i];
+
+ positions.to(dtype).to(device);
+ cell = cell.to(dtype).to(device);
+
+ return system = torch::make_intrusive(
+ particle_types_ml_tensor.to(device), positions, cell);
+}
diff --git a/src/core/ml_metatensor/stub.cpp b/src/core/ml_metatensor/stub.cpp
new file mode 100644
index 00000000000..cae8b586a44
--- /dev/null
+++ b/src/core/ml_metatensor/stub.cpp
@@ -0,0 +1,23 @@
+#include "config/config.hpp"
+
+#ifdef ESPRESSO_METATENSOR
+#undef CUDA
+#include
+#include
+#include
+
+#if TORCH_VERSION_MAJOR >= 2
+#include
+#endif
+
+#include
+#include
+
+#include "model.hpp"
+#include "stub.hpp"
+
+std::string load_metadata(const std::string &path) {
+ MetatensorModel model(path);
+ return model.print_metadata();
+}
+#endif
diff --git a/src/core/ml_metatensor/stub.hpp b/src/core/ml_metatensor/stub.hpp
new file mode 100644
index 00000000000..8508babdc92
--- /dev/null
+++ b/src/core/ml_metatensor/stub.hpp
@@ -0,0 +1,12 @@
+#include "config/config.hpp"
+
+#ifdef ESPRESSO_METATENSOR
+#undef CUDA
+
+#include
+#include
+#include
+
+std::string load_metadata(const std::string &path);
+
+#endif
diff --git a/src/core/ml_metatensor/system.cpp b/src/core/ml_metatensor/system.cpp
new file mode 100644
index 00000000000..9782e8a825f
--- /dev/null
+++ b/src/core/ml_metatensor/system.cpp
@@ -0,0 +1 @@
+#include "system.hpp"
diff --git a/src/core/ml_metatensor/system.hpp b/src/core/ml_metatensor/system.hpp
new file mode 100644
index 00000000000..0c3dfa5452d
--- /dev/null
+++ b/src/core/ml_metatensor/system.hpp
@@ -0,0 +1,48 @@
+#include "ATen/core/TensorBody.h"
+#include "metatensor/torch/atomistic/system.hpp"
+#include "utils/Vector.hpp"
+#include
+
+using ParticleTypeMap = std::unordered_map;
+
+metatensor_torch::System ::system_from_lmp(
+ const ParticleTypeMap &type_map, std::vector &engine_positions,
+ const std::vector
+ &engine_particle_types, // TODO: This should be std::vector?
+ const Utils::Vector3d &box_size, bool do_virial, torch::ScalarType dtype,
+ torch::Device device) {
+ auto tensor_options =
+ torch::TensorOptions().dtype(torch::kFloat64).device(torch::kCPU);
+ if (engine_positions.size() % 3 != 0)
+ throw std::runtime_error(
+ "Position array must have a multiple of 3 elements");
+ const auto n_particles = static_cast(engine_positions.size()) / 3;
+ if (engine_particle_types.size() != n_particles)
+ throw std::runtime_error(
+ "Length of position and particle type arrays inconsistent");
+
+ auto positions = torch::from_blob(
+ engine_positions.data(), {n_particles, 3},
+ // requires_grad=true since we always need gradients w.r.t. positions
+ tensor_options.requires_grad(true));
+ std::vector particle_types_ml;
+ std::ranges::transform(
+ engine_particle_types, std::back_inserter(particle_types_ml),
+ [&type_map](int engine_type) { return type_map.at(engine_type); });
+
+ auto particle_types_ml_tensor =
+ torch::from_blob(particle_types_ml.data(),
+ {static_cast(particle_types_ml.size())},
+ tensor_options.requires_grad(true));
+
+ auto cell = torch::zeros({3, 3}, tensor_options);
+ for (int i : {0, 1, 2})
+ cell[i][i] = box_size[i];
+
+ positions.to(dtype).to(device);
+ cell = cell.to(dtype).to(device);
+
+ auto system = torch::make_intrusive(
+ particle_types_ml_tensor.to(device), positions, cell);
+ return system;
+}
diff --git a/src/python/espressomd/system.py b/src/python/espressomd/system.py
index af84f506512..2eb3825c928 100644
--- a/src/python/espressomd/system.py
+++ b/src/python/espressomd/system.py
@@ -331,6 +331,11 @@ def volume(self):
return float(np.prod(self.box_l))
+ def get_metadata(self, path):
+ assert_features("METATENSOR")
+ metadata = self.call_method("get_metadata", path=path)
+ return metadata
+
def distance(self, p1, p2):
"""Return the scalar distance between particles, between a particle
and a point or between two points, respecting periodic boundaries.
diff --git a/src/script_interface/system/System.cpp b/src/script_interface/system/System.cpp
index 238903b8c95..0640f26b6e2 100644
--- a/src/script_interface/system/System.cpp
+++ b/src/script_interface/system/System.cpp
@@ -38,6 +38,8 @@
#include "core/system/System.hpp"
#include "core/system/System.impl.hpp"
+#include "core/ml_metatensor/stub.hpp"
+
#include "script_interface/ObjectState.hpp"
#include "script_interface/accumulators/AutoUpdateAccumulators.hpp"
#include "script_interface/analysis/Analysis.hpp"
@@ -440,6 +442,12 @@ Variant System::do_call_method(std::string const &name,
}
return {};
}
+#ifdef ESPRESSO_METATENSOR
+ if (name == "get_metadata") {
+ auto const path = get_value(parameters, "path");
+ return load_metadata(path);
+ }
+#endif
if (name == "number_of_particles") {
auto const type = get_value(parameters, "type");
return ::number_of_particles_with_type(type);
diff --git a/test_metatensor.py b/test_metatensor.py
new file mode 100644
index 00000000000..6f38972d522
--- /dev/null
+++ b/test_metatensor.py
@@ -0,0 +1,5 @@
+import espressomd
+
+system = espressomd.System(box_l=3 * [1])
+
+print(system.get_metadata("lennard-jones.pt"))