From 26e8298d68fe7eb304815b682e693ba4e4bd6530 Mon Sep 17 00:00:00 2001 From: Sebastian Ehlert Date: Tue, 24 Feb 2026 16:06:58 +0100 Subject: [PATCH 1/4] Add example for running Skala directly in Fortran --- .../fortran/ftorch_integration/.gitignore | 3 + .../fortran/ftorch_integration/CMakeLists.txt | 37 ++ .../fortran/ftorch_integration/app/main.f90 | 105 ++++++ .../cmake/skala-dep-versions.cmake | 5 + .../ftorch_integration/cmake/skala-flap.cmake | 23 ++ .../cmake/skala-ftorch.cmake | 18 + .../ftorch_integration/environment-cpu.yml | 14 + .../ftorch_integration/src/skala_ftorch.cxx | 277 ++++++++++++++ .../ftorch_integration/src/skala_ftorch.f90 | 353 ++++++++++++++++++ 9 files changed, 835 insertions(+) create mode 100644 examples/fortran/ftorch_integration/.gitignore create mode 100644 examples/fortran/ftorch_integration/CMakeLists.txt create mode 100644 examples/fortran/ftorch_integration/app/main.f90 create mode 100644 examples/fortran/ftorch_integration/cmake/skala-dep-versions.cmake create mode 100644 examples/fortran/ftorch_integration/cmake/skala-flap.cmake create mode 100644 examples/fortran/ftorch_integration/cmake/skala-ftorch.cmake create mode 100644 examples/fortran/ftorch_integration/environment-cpu.yml create mode 100644 examples/fortran/ftorch_integration/src/skala_ftorch.cxx create mode 100644 examples/fortran/ftorch_integration/src/skala_ftorch.f90 diff --git a/examples/fortran/ftorch_integration/.gitignore b/examples/fortran/ftorch_integration/.gitignore new file mode 100644 index 0000000..4be1abf --- /dev/null +++ b/examples/fortran/ftorch_integration/.gitignore @@ -0,0 +1,3 @@ +_build/ +*.tar.gz +*.pt diff --git a/examples/fortran/ftorch_integration/CMakeLists.txt b/examples/fortran/ftorch_integration/CMakeLists.txt new file mode 100644 index 0000000..4b524de --- /dev/null +++ b/examples/fortran/ftorch_integration/CMakeLists.txt @@ -0,0 +1,37 @@ +cmake_minimum_required(VERSION 3.20 FATAL_ERROR) + +project("Skala" LANGUAGES Fortran C CXX) + +include(GNUInstallDirs) +include(FetchContent) +set(FETCHCONTENT_UPDATES_DISCONNECTED ON CACHE BOOL "Disable FC Updates") + +list(PREPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake) + +include(skala-dep-versions) +include(skala-ftorch) +include(skala-flap) +find_package(Torch REQUIRED) + +add_executable( + ${PROJECT_NAME} + app/main.f90 + src/skala_ftorch.f90 + src/skala_ftorch.cxx +) +target_link_libraries( + ${PROJECT_NAME} + FTorch::ftorch + FLAP::FLAP + ${TORCH_LIBRARIES} +) + +install( + TARGETS + ${PROJECT_NAME} + DESTINATION + ${CMAKE_INSTALL_BINDIR} +) + +list(REMOVE_AT CMAKE_MODULE_PATH 0) + diff --git a/examples/fortran/ftorch_integration/app/main.f90 b/examples/fortran/ftorch_integration/app/main.f90 new file mode 100644 index 0000000..337ad3f --- /dev/null +++ b/examples/fortran/ftorch_integration/app/main.f90 @@ -0,0 +1,105 @@ +program main + use ftorch, only : torch_tensor, torch_tensor_print, operator(*) + use skala_ftorch, only : skala_model, skala_model_load, skala_feature, skala_tensor_load, & + & skala_tensor_sum, skala_tensor_item_double, skala_dict, skala_dict_new + use flap, only : command_line_interface + implicit none + + type(skala_model) :: model + type(skala_dict) :: input, vxc + type(torch_tensor) :: exc, exc_sum + type(torch_tensor) :: density, grad, kin, grid_coords, grid_weights, coarse_0_atomic_coords + type(torch_tensor) :: dexc_ddensity, dexc_dgrad, dexc_dkin, dexc_dgrid_coords, & + & dexc_dgrid_weights, dexc_dcoarse_0_atomic_coords, vxc_norm + + type(command_line_interface) :: cli + + character(len=:), allocatable :: path, feature_dir + integer :: error + + cli_input: block + character(len=512) :: dummy + call cli%init(description="Test Skala model inference from Fortran") + call cli%add(position=1, required=.true., act="store", error=error, positional=.true.) + if (error /= 0) exit cli_input + call cli%add(position=2, required=.true., act="store", error=error, positional=.true.) + if (error /= 0) exit cli_input + call cli%parse(error=error) + if (error /= 0) exit cli_input + call cli%get(position=1, val=dummy, error=error) + if (error /= 0) exit cli_input + path = trim(dummy) + call cli%get(position=2, val=dummy, error=error) + if (error /= 0) exit cli_input + feature_dir = trim(dummy) + end block cli_input + if (error /= 0) then + print '(a)', cli%error_message + stop 1 + end if + + ! Load the model + print '(a)', "[1] Loading model from "//path + call skala_model_load(model, path) + + print '(a)', "[2] Loading features from "//feature_dir + get_features: block + integer :: ift + do ift = 1, size(model%features) + select case(model%features(ift)) + case(skala_feature%density) + print '(a)', " -> Loading density" + call skala_tensor_load(density, feature_dir//"/density.pt") + case(skala_feature%grad) + print '(a)', " -> Loading grad" + call skala_tensor_load(grad, feature_dir//"/grad.pt") + case(skala_feature%kin) + print '(a)', " -> Loading kin" + call skala_tensor_load(kin, feature_dir//"/kin.pt") + case(skala_feature%grid_coords) + print '(a)', " -> Loading grid_coords" + call skala_tensor_load(grid_coords, feature_dir//"/grid_coords.pt") + case(skala_feature%grid_weights) + print '(a)', " -> Loading grid_weights" + call skala_tensor_load(grid_weights, feature_dir//"/grid_weights.pt") + case(skala_feature%coarse_0_atomic_coords) + print '(a)', " -> Loading coarse_0_atomic_coords" + call skala_tensor_load(coarse_0_atomic_coords, feature_dir//"/coarse_0_atomic_coords.pt") + end select + end do + end block get_features + + ! Prepare the input dictionary for the model + print '(a)', "[3] Preparing input dictionary" + call skala_dict_new(input) + if (model%needs_feature(skala_feature%density)) & + call input%insert(skala_feature%density, density) + if (model%needs_feature(skala_feature%grad)) & + call input%insert(skala_feature%grad, grad) + if (model%needs_feature(skala_feature%kin)) & + call input%insert(skala_feature%kin, kin) + if (model%needs_feature(skala_feature%grid_coords)) & + call input%insert(skala_feature%grid_coords, grid_coords) + if (model%needs_feature(skala_feature%grid_weights)) & + call input%insert(skala_feature%grid_weights, grid_weights) + if (model%needs_feature(skala_feature%coarse_0_atomic_coords)) & + call input%insert(skala_feature%coarse_0_atomic_coords, coarse_0_atomic_coords) + + ! Request exc and vxc from the model + print '(a)', "[4] Running model inference" + call model%get_exc_and_vxc(input, exc, vxc) + + print '(a)', "[5] Extracting vxc components" + if (model%needs_feature(skala_feature%density)) & + call vxc%at(skala_feature%density, dexc_ddensity) + if (model%needs_feature(skala_feature%grad)) & + call vxc%at(skala_feature%grad, dexc_dgrad) + if (model%needs_feature(skala_feature%kin)) & + call vxc%at(skala_feature%kin, dexc_dkin) + if (model%needs_feature(skala_feature%grid_coords)) & + call vxc%at(skala_feature%grid_coords, dexc_dgrid_coords) + if (model%needs_feature(skala_feature%grid_weights)) & + call vxc%at(skala_feature%grid_weights, dexc_dgrid_weights) + if (model%needs_feature(skala_feature%coarse_0_atomic_coords)) & + call vxc%at(skala_feature%coarse_0_atomic_coords, dexc_dcoarse_0_atomic_coords) +end program main \ No newline at end of file diff --git a/examples/fortran/ftorch_integration/cmake/skala-dep-versions.cmake b/examples/fortran/ftorch_integration/cmake/skala-dep-versions.cmake new file mode 100644 index 0000000..0fc6628 --- /dev/null +++ b/examples/fortran/ftorch_integration/cmake/skala-dep-versions.cmake @@ -0,0 +1,5 @@ +set(Skala_FTorch_URL "https://github.com/Cambridge-ICCS/FTorch/archive/refs/tags/v1.0.0.tar.gz") +set(Skala_FTorch_SHA256 "c4b6741e582623b7ecaecd59d02f779e8a6f6017f8068c85da8a034f468df375") + +set(Skala_FLAP_URL "https://github.com/szaghi/FLAP/releases/download/v1.2.5/FLAP.tar.gz") +set(Skala_FLAP_SHA256 "f2a388898a1ee49a8559822280647eb112d884484cb6169710011daab6dbb501") \ No newline at end of file diff --git a/examples/fortran/ftorch_integration/cmake/skala-flap.cmake b/examples/fortran/ftorch_integration/cmake/skala-flap.cmake new file mode 100644 index 0000000..9a8324a --- /dev/null +++ b/examples/fortran/ftorch_integration/cmake/skala-flap.cmake @@ -0,0 +1,23 @@ +if(NOT DEFINED Skala_FLAP_URL) + include(skala-dep-versions) +endif() +find_package(FLAP QUIET CONFIG) +if(NOT FLAP_FOUND) + include(FetchContent) + + message(STATUS "Could not find FLAP... Building FLAP from source") + message(STATUS "FLAP URL: ${Skala_FLAP_URL}") + + FetchContent_Declare( + flap + URL ${Skala_FLAP_URL} + URL_HASH SHA256=${Skala_FLAP_SHA256} + DOWNLOAD_EXTRACT_TIMESTAMP ON + ) + FetchContent_MakeAvailable(flap) +endif() + +if(NOT TARGET FLAP::FLAP) + add_library(FLAP::FLAP INTERFACE IMPORTED) + target_link_libraries(FLAP::FLAP INTERFACE FLAP) +endif() \ No newline at end of file diff --git a/examples/fortran/ftorch_integration/cmake/skala-ftorch.cmake b/examples/fortran/ftorch_integration/cmake/skala-ftorch.cmake new file mode 100644 index 0000000..3631988 --- /dev/null +++ b/examples/fortran/ftorch_integration/cmake/skala-ftorch.cmake @@ -0,0 +1,18 @@ +if(NOT DEFINED Skala_FTorch_URL) + include(skala-dep-versions) +endif() +find_package(FTorch QUIET) +if(NOT FTorch_FOUND) + include(FetchContent) + + message(STATUS "Could not find FTorch... Building FTorch from source") + message(STATUS "FTorch URL: ${Skala_FTorch_URL}") + + FetchContent_Declare( + ftorch + URL ${Skala_FTorch_URL} + URL_HASH SHA256=${Skala_FTorch_SHA256} + DOWNLOAD_EXTRACT_TIMESTAMP ON + ) + FetchContent_MakeAvailable(ftorch) +endif() \ No newline at end of file diff --git a/examples/fortran/ftorch_integration/environment-cpu.yml b/examples/fortran/ftorch_integration/environment-cpu.yml new file mode 100644 index 0000000..5443002 --- /dev/null +++ b/examples/fortran/ftorch_integration/environment-cpu.yml @@ -0,0 +1,14 @@ +name: ftorch-dev +channels: + - conda-forge +dependencies: + # build requirements + - fortran-compiler + - c-compiler + - cxx-compiler + - cmake >=3.20,<4 + - ninja + # host/runtime requirements + - jonquil + - pytorch >=2.0 cpu_* + diff --git a/examples/fortran/ftorch_integration/src/skala_ftorch.cxx b/examples/fortran/ftorch_integration/src/skala_ftorch.cxx new file mode 100644 index 0000000..6b46a17 --- /dev/null +++ b/examples/fortran/ftorch_integration/src/skala_ftorch.cxx @@ -0,0 +1,277 @@ +#include +#include + +typedef void* torch_jit_script_module_t; +typedef void* torch_tensor_t; +typedef int torch_device_t; + +typedef void* skala_dict_t; +typedef void* skala_list_t; + +typedef c10::Dict> SkalaDict; +typedef std::vector SkalaList; +typedef enum SkalaFeature { + Feature_Density = 1, + Feature_Grad = 2, + Feature_Kin = 3, + Feature_GridCoords = 4, + Feature_GridWeights = 5, + Feature_Coarse0AtomicCoords = 6 +} SkalaFeature; + +static inline +void +ctorch_error(const std::string &message, + const std::function &cleanup = nullptr) { + std::cerr << "[ERROR]: " << message << std::endl; + if (cleanup) { + cleanup(); // Perform cleanup actions + } + exit(EXIT_FAILURE); +} + +extern "C" +torch_tensor_t +skala_tensor_load(const char* filename) +{ + std::ifstream input(std::string(filename), std::ios::binary); + if (!input.is_open()) + { + throw std::runtime_error("Failed to open feature file: " + std::string(filename)); + } + std::vector bytes( + (std::istreambuf_iterator(input)), + (std::istreambuf_iterator())); + + input.close(); + auto tensor = torch::jit::pickle_load(bytes).toTensor().to(torch::Device(torch::kCPU)); + return new torch::Tensor(std::move(tensor)); +} + +extern "C" +torch_tensor_t +skala_tensor_sum(const torch_tensor_t tensor) +{ + auto t = reinterpret_cast(tensor); + auto sum = t->sum(); + return new torch::Tensor(std::move(sum)); +} + +extern "C" +double +skala_tensor_item_double(const torch_tensor_t tensor) +{ + auto t = reinterpret_cast(tensor); + return t->item(); +} + +extern "C" +torch_jit_script_module_t +skala_model_load(const char *filename, + const bool requires_grad, + SkalaFeature* features) { + torch::AutoGradMode enable_grad(requires_grad); + torch::jit::ExtraFilesMap extra_files{{"features", ""}, {"protocol_version", ""}}; + torch::jit::script::Module *module = nullptr; + try { + module = new torch::jit::script::Module; + *module = + torch::jit::load(filename, torch::Device(torch::kCPU), extra_files); + } catch (const std::exception &e) { + ctorch_error(e.what(), [&]() { delete module; }); + } + + if (std::stoi(extra_files.at("protocol_version")) != 2) { + std::string message = "Unsupported protocol version " + extra_files.at("protocol_version"); + ctorch_error(message, [&]() { delete module; }); + } + + auto feature_str = extra_files.at("features"); + // formatted as ["feature1", "feature2", ...] parse without using a full json library + std::unordered_map feature_keys; + size_t pos = 0; + while ((pos = feature_str.find('"', pos)) != std::string::npos) { + size_t end_pos = feature_str.find('"', pos + 1); + if (end_pos == std::string::npos) break; + auto feature_key = feature_str.substr(pos + 1, end_pos - pos - 1); + if (feature_key == "density") { + feature_keys.insert({feature_key, Feature_Density}); + } else if (feature_key == "grad") { + feature_keys.insert({feature_key, Feature_Grad}); + } else if (feature_key == "kin") { + feature_keys.insert({feature_key, Feature_Kin}); + } else if (feature_key == "grid_coords") { + feature_keys.insert({feature_key, Feature_GridCoords}); + } else if (feature_key == "grid_weights") { + feature_keys.insert({feature_key, Feature_GridWeights}); + } else if (feature_key == "coarse_0_atomic_coords") { + feature_keys.insert({feature_key, Feature_Coarse0AtomicCoords}); + } + pos = end_pos + 1; + } + + if (features != nullptr) { + size_t i = 0; + for (const auto &[key, value] : feature_keys) { + features[i] = value; + ++i; + } + } + + return module; +} + +static inline +at::Tensor +skala_model_forward(torch::jit::script::Module module, const c10::Dict &features) +{ + std::vector args; + std::unordered_map kwargs; + kwargs["mol"] = features; + return module.get_method("get_exc_density")(args, kwargs).toTensor(); +} + +extern "C" +void +skala_model_get_exc(torch_jit_script_module_t module, skala_dict_t input, torch_tensor_t* output) +{ + auto model = static_cast(module); + auto dict = static_cast(input); + + c10::Dict features; + for (const auto& entry : *dict) { + auto tensor = torch::stack(entry.value()); + features.insert(entry.key(), tensor); + } + + auto exc_on_grid = skala_model_forward(*model, features); + *output = new at::Tensor(std::move(exc_on_grid)); +} + +extern "C" +void +skala_model_get_exc_and_vxc(torch_jit_script_module_t module, skala_dict_t input, torch_tensor_t* exc_output, skala_dict_t* grad_output) +{ + auto model = static_cast(module); + auto dict = static_cast(input); + + std::vector input_tensors; + std::vector tensor_keys; + c10::Dict features_with_grad; + for (const auto& entry : *dict) { + std::string key = entry.key(); + const auto& values = entry.value(); + std::vector tensors; + for (const auto &value : values) { + auto tensor_with_grad = value.clone().requires_grad_(true); + tensors.push_back(tensor_with_grad); + input_tensors.push_back(tensor_with_grad); + tensor_keys.push_back(key); + } + features_with_grad.insert(key, torch::concat(tensors)); + } + + auto exc_on_grid = skala_model_forward(*model, features_with_grad); + auto exc = (exc_on_grid * features_with_grad.at("grid_weights")).sum(); + + auto grad_tensors = torch::autograd::grad( + {exc}, // outputs + input_tensors, // inputs + /*grad_outputs=*/{}, // grad_outputs (defaults to ones) + /*retain_graph=*/false, // retain_graph, necessary for higher-order grads + /*create_graph=*/false, // create_graph, necessary for higher-order grads + /*allow_unused=*/true // allow_unused + ); + + std::unordered_map> grad_map; + for (size_t i = 0; i < tensor_keys.size(); ++i) { + grad_map[tensor_keys[i]].push_back(grad_tensors[i]); + } + c10::Dict> gradients; + for (auto& [key, value] : grad_map) { + gradients.insert(key, std::move(value)); + } + + *exc_output = new at::Tensor(std::move(exc_on_grid)); + *grad_output = new SkalaDict(std::move(gradients)); +} + +extern "C" +skala_dict_t +skala_dict_new() +{ + SkalaDict* input = new SkalaDict(); + return static_cast(input); +} + +extern "C" +void +skala_dict_insert(skala_dict_t input, const char* key, const torch_tensor_t* values, size_t size) +{ + auto dict = static_cast(input); + std::vector tensors; + tensors.reserve(size); + for (size_t i = 0; i < size; ++i) { + auto tensor = static_cast(values[i]); + tensors.push_back(*tensor); + } + dict->insert(std::string(key), tensors); +} + +extern "C" +skala_list_t +skala_dict_at(skala_dict_t input, const char* key) +{ + auto dict = static_cast(input); + if (!dict->contains(key)) { + std::string message = "Key '" + std::string(key) + "' not found in SkalaDict"; + ctorch_error(message, []() {}); + } + auto tensors = (*dict).at(key); + if (tensors.empty()) { + std::string message = "No tensors found for key '" + std::string(key) + "'"; + ctorch_error(message, []() {}); + } + + SkalaList* list = new SkalaList(std::move(tensors)); + return list; +} + +extern "C" +size_t +skala_list_size(skala_list_t input) +{ + auto list = static_cast(input); + return list->size(); +} + +extern "C" +torch_tensor_t +skala_list_at(skala_list_t input, size_t index) +{ + auto list = static_cast(input); + if (index >= list->size()) { + std::string message = "Index " + std::to_string(index) + " out of bounds for SkalaList of size " + std::to_string(list->size()); + ctorch_error(message, []() {}); + } + auto tensor = (*list)[index]; + return new at::Tensor(std::move(tensor)); +} + +extern "C" +void +skala_dict_delete(skala_dict_t input) +{ + if (input == nullptr) return; + auto dict = static_cast(input); + delete dict; +} + +extern "C" +void +skala_list_delete(skala_list_t input) +{ + if (input == nullptr) return; + auto list = static_cast(input); + delete list; +} \ No newline at end of file diff --git a/examples/fortran/ftorch_integration/src/skala_ftorch.f90 b/examples/fortran/ftorch_integration/src/skala_ftorch.f90 new file mode 100644 index 0000000..99dcbf2 --- /dev/null +++ b/examples/fortran/ftorch_integration/src/skala_ftorch.f90 @@ -0,0 +1,353 @@ +module skala_ftorch + use iso_c_binding, only : c_ptr, c_char, c_int, c_size_t, c_bool, c_null_ptr, c_null_char, c_double + use ftorch, only : torch_model, torch_model_delete, torch_tensor + implicit none + private + + public :: skala_model, skala_model_load, skala_feature, skala_tensor_load, & + & skala_tensor_sum, skala_tensor_item_double, skala_dict, skala_dict_new + + type :: skala_feature_enum + integer :: density = 1 + integer :: grad = 2 + integer :: kin = 3 + integer :: grid_coords = 4 + integer :: grid_weights = 5 + integer :: coarse_0_atomic_coords = 6 + integer :: max_feature = 6 + end type skala_feature_enum + type(skala_feature_enum), parameter :: skala_feature = skala_feature_enum() + + type :: skala_dict + type(c_ptr) :: p = c_null_ptr + contains + generic :: at => at_one, at_vec + procedure, private :: at_one => skala_dict_at_one + procedure, private :: at_vec => skala_dict_at_vec + generic :: insert => insert_vec, insert_one + procedure, private :: insert_vec => skala_dict_insert_vec + procedure, private :: insert_one => skala_dict_insert_one + final :: skala_dict_delete + end type skala_dict + + type, extends(torch_model) :: skala_model + integer(c_int), allocatable :: features(:) + contains + procedure :: get_exc => skala_model_get_exc + procedure :: get_exc_and_vxc => skala_model_get_exc_and_vxc + procedure :: needs_feature => skala_model_needs_feature + final :: skala_model_delete + end type skala_model + +contains + + subroutine skala_model_load(model, path, requires_grad) + type(skala_model), intent(out) :: model + character(len=*), intent(in) :: path + logical, intent(in), optional :: requires_grad + + interface + function load_skala_model_c(path, requires_grad, features) result(model) & + & bind(c, name="skala_model_load") + import :: c_ptr, c_char, c_int, c_bool + character(kind=c_char), intent(in) :: path(*) + logical(c_bool), value :: requires_grad + integer(c_int), intent(out) :: features(*) + type(c_ptr) :: model + end function load_skala_model_c + end interface + + logical(c_bool) :: requires_grad_c + integer(c_int) :: features(skala_feature%max_feature) + + requires_grad_c = .false. + if (present(requires_grad)) requires_grad_c = requires_grad + + features(:) = 0 + model%p = load_skala_model_c(to_c_str(path), requires_grad_c, features) + model%features = pack(features, features > 0) + end subroutine skala_model_load + + subroutine skala_tensor_load(tensor, path) + type(torch_tensor), intent(out) :: tensor + character(len=*), intent(in) :: path + + interface + function skala_tensor_load_c(path) result(tensor) bind(c, name="skala_tensor_load") + import :: c_ptr, c_char + character(kind=c_char), intent(in) :: path(*) + type(c_ptr) :: tensor + end function skala_tensor_load_c + end interface + + tensor%p = skala_tensor_load_c(to_c_str(path)) + end subroutine skala_tensor_load + + subroutine skala_tensor_sum(tensor, sum) + type(torch_tensor), intent(in) :: tensor + type(torch_tensor), intent(out) :: sum + + interface + function skala_tensor_sum_c(tensor) result(sum) bind(c, name="skala_tensor_sum") + import :: c_ptr, c_double + type(c_ptr), value :: tensor + type(c_ptr) :: sum + end function skala_tensor_sum_c + end interface + + sum%p = skala_tensor_sum_c(tensor%p) + end subroutine skala_tensor_sum + + function skala_tensor_item_double(tensor) result(value) + type(torch_tensor), intent(in) :: tensor + real(c_double) :: value + + interface + function skala_tensor_item_double_c(tensor) result(value) bind(c, name="skala_tensor_item_double") + import :: c_ptr, c_double + type(c_ptr), value :: tensor + real(c_double) :: value + end function skala_tensor_item_double_c + end interface + + value = skala_tensor_item_double_c(tensor%p) + end function skala_tensor_item_double + + subroutine skala_model_get_exc(model, input, exc) + class(skala_model), intent(inout) :: model + type(skala_dict), intent(in) :: input + type(torch_tensor), intent(out) :: exc + + interface + subroutine skala_model_get_exc_c(model_c, input_c, output_c) bind(c, name="skala_model_get_exc") + import :: c_ptr + type(c_ptr), value :: model_c + type(c_ptr), value :: input_c + type(c_ptr), intent(out) :: output_c + end subroutine skala_model_get_exc_c + end interface + + call skala_model_get_exc_c(model%torch_model%p, input%p, exc%p) + end subroutine skala_model_get_exc + + subroutine skala_model_get_exc_and_vxc(model, input, exc, vxc) + class(skala_model), intent(inout) :: model + type(skala_dict), intent(in) :: input + type(torch_tensor), intent(out) :: exc + type(skala_dict), intent(out) :: vxc + + interface + subroutine skala_model_get_exc_and_vxc_c(model_c, input_c, exc_c, vxc_c) bind(c, name="skala_model_get_exc_and_vxc") + import :: c_ptr + type(c_ptr), value :: model_c + type(c_ptr), value :: input_c + type(c_ptr), intent(out) :: exc_c + type(c_ptr), intent(out) :: vxc_c + end subroutine skala_model_get_exc_and_vxc_c + end interface + + call skala_model_get_exc_and_vxc_c(model%torch_model%p, input%p, exc%p, vxc%p) + end subroutine skala_model_get_exc_and_vxc + + function skala_model_needs_feature(model, feature) result(needs) + class(skala_model), intent(in) :: model + integer, intent(in) :: feature + logical :: needs + + needs = any(model%features == feature) + end function skala_model_needs_feature + + subroutine skala_model_delete(model) + type(skala_model), intent(inout) :: model + + call torch_model_delete(model%torch_model) + end subroutine skala_model_delete + + subroutine skala_dict_new(input) + type(skala_dict), intent(out) :: input + + interface + function skala_dict_new_c() result(input) bind(c, name="skala_dict_new") + import :: c_ptr + type(c_ptr) :: input + end function skala_dict_new_c + end interface + + input%p = skala_dict_new_c() + end subroutine skala_dict_new + + subroutine skala_feature_key(feature, key) + integer, intent(in) :: feature + character(kind=c_char, len=:), allocatable, intent(out) :: key + select case(feature) + case default + error stop "Unknown feature" + case (skala_feature%density) + key = "density" + case (skala_feature%grad) + key = "grad" + case (skala_feature%kin) + key = "kin" + case (skala_feature%grid_coords) + key = "grid_coords" + case (skala_feature%grid_weights) + key = "grid_weights" + case (skala_feature%coarse_0_atomic_coords) + key = "coarse_0_atomic_coords" + end select + end subroutine skala_feature_key + + pure function to_c_str(str) result(c_str) + character(len=*), intent(in) :: str + character(kind=c_char) :: c_str(len(str)+1) + + c_str = transfer(str // c_null_char, [character(kind=c_char)::], len(str)+1) + end function to_c_str + + subroutine skala_dict_insert_one(dict, feature, tensor) + class(skala_dict), intent(inout) :: dict + integer, intent(in) :: feature + type(torch_tensor), intent(in) :: tensor + + interface + subroutine skala_dict_insert_c(dict, feature, tensors, ntensors) bind(c, name="skala_dict_insert") + import :: c_ptr, c_size_t, c_char + type(c_ptr), value :: dict + character(kind=c_char), intent(in) :: feature(*) + type(c_ptr), intent(in) :: tensors(*) + integer(c_size_t), value :: ntensors + end subroutine skala_dict_insert_c + end interface + + character(kind=c_char, len=:), allocatable :: key + type(c_ptr) :: tensor_ptr(1) + + tensor_ptr(1) = tensor%p + + call skala_feature_key(feature, key) + call skala_dict_insert_c(dict%p, to_c_str(key), tensor_ptr, 1_c_size_t) + end subroutine skala_dict_insert_one + + subroutine skala_dict_insert_vec(dict, feature, tensors) + class(skala_dict), intent(inout) :: dict + integer, intent(in) :: feature + type(torch_tensor), intent(in) :: tensors(:) + + interface + subroutine skala_dict_insert_c(dict, feature, tensors, ntensors) bind(c, name="skala_dict_insert") + import :: c_ptr, c_size_t, c_char + type(c_ptr), value :: dict + character(kind=c_char), intent(in) :: feature(*) + type(c_ptr), intent(in) :: tensors(*) + integer(c_size_t), value :: ntensors + end subroutine skala_dict_insert_c + end interface + + character(kind=c_char, len=:), allocatable :: key + type(c_ptr), allocatable :: tensor_ptrs(:) + integer :: iptr + + allocate(tensor_ptrs(size(tensors))) + do iptr = 1, size(tensors) + tensor_ptrs(iptr) = tensors(iptr)%p + end do + + call skala_feature_key(feature, key) + call skala_dict_insert_c(dict%p, to_c_str(key), tensor_ptrs, size(tensor_ptrs, kind=c_size_t)) + end subroutine skala_dict_insert_vec + + subroutine skala_dict_at_one(dict, feature, tensor) + class(skala_dict), intent(in) :: dict + integer, intent(in) :: feature + type(torch_tensor), intent(out) :: tensor + + interface + function skala_dict_at_c(dict, key) result(list) bind(c, name="skala_dict_at") + import :: c_ptr, c_char + type(c_ptr), value :: dict + character(kind=c_char), intent(in) :: key(*) + type(c_ptr) :: list + end function skala_dict_at_c + + function skala_list_at_c(list, index) result(tensor) bind(c, name="skala_list_at") + import :: c_ptr, c_size_t + type(c_ptr), value :: list + integer(c_size_t), value :: index + type(c_ptr) :: tensor + end function skala_list_at_c + + subroutine skala_list_delete_c(list) bind(c, name="skala_list_delete") + import :: c_ptr + type(c_ptr), value :: list + end subroutine skala_list_delete_c + end interface + + character(kind=c_char, len=:), allocatable :: key + type(c_ptr) :: list + + call skala_feature_key(feature, key) + list = skala_dict_at_c(dict%p, to_c_str(key)) + tensor%p = skala_list_at_c(list, 0_c_size_t) + call skala_list_delete_c(list) + end subroutine skala_dict_at_one + + subroutine skala_dict_at_vec(dict, feature, tensors, ntensors) + class(skala_dict), intent(in) :: dict + integer, intent(in) :: feature + type(torch_tensor), intent(out) :: tensors(:) + integer, intent(out) :: ntensors + + interface + function skala_dict_at_c(dict, key) result(list) bind(c, name="skala_dict_at") + import :: c_ptr, c_char + type(c_ptr), value :: dict + character(kind=c_char), intent(in) :: key(*) + type(c_ptr) :: list + end function skala_dict_at_c + + function skala_list_size_c(list) result(n) bind(c, name="skala_list_size") + import :: c_ptr, c_size_t + type(c_ptr), value :: list + integer(c_size_t) :: n + end function skala_list_size_c + + function skala_list_at_c(list, index) result(tensor) bind(c, name="skala_list_at") + import :: c_ptr, c_size_t + type(c_ptr), value :: list + integer(c_size_t), value :: index + type(c_ptr) :: tensor + end function skala_list_at_c + + subroutine skala_list_delete_c(list) bind(c, name="skala_list_delete") + import :: c_ptr + type(c_ptr), value :: list + end subroutine skala_list_delete_c + end interface + + character(kind=c_char, len=:), allocatable :: key + type(c_ptr) :: list + integer :: i + + call skala_feature_key(feature, key) + list = skala_dict_at_c(dict%p, to_c_str(key)) + ntensors = int(skala_list_size_c(list)) + do i = 1, min(ntensors, size(tensors)) + tensors(i)%p = skala_list_at_c(list, int(i - 1, c_size_t)) + end do + call skala_list_delete_c(list) + end subroutine skala_dict_at_vec + + subroutine skala_dict_delete(dict) + type(skala_dict), intent(inout) :: dict + + interface + subroutine skala_dict_delete_c(dict) bind(c, name="skala_dict_delete") + import :: c_ptr + type(c_ptr), value :: dict + end subroutine skala_dict_delete_c + end interface + + call skala_dict_delete_c(dict%p) + end subroutine skala_dict_delete + +end module skala_ftorch \ No newline at end of file From e1a10cea18b27fe232d56833ef20833ee3e6fd09 Mon Sep 17 00:00:00 2001 From: Sebastian Ehlert Date: Wed, 25 Feb 2026 08:26:07 +0100 Subject: [PATCH 2/4] Add output sections for the main program --- README.md | 7 +- docs/ftorch.rst | 295 ++++++++++++++++++ docs/index.rst | 1 + .../fortran/ftorch_integration/CMakeLists.txt | 2 - .../fortran/ftorch_integration/app/main.f90 | 175 +++++++++-- .../cmake/skala-dep-versions.cmake | 5 +- .../ftorch_integration/cmake/skala-flap.cmake | 23 -- .../ftorch_integration/src/skala_ftorch.cxx | 54 ++++ .../ftorch_integration/src/skala_ftorch.f90 | 156 ++++++++- 9 files changed, 662 insertions(+), 56 deletions(-) create mode 100644 docs/ftorch.rst delete mode 100644 examples/fortran/ftorch_integration/cmake/skala-flap.cmake diff --git a/README.md b/README.md index 1631b8c..709ed9e 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,12 @@ Learn more about Skala in our [ArXiv paper](https://arxiv.org/abs/2506.14665). This repository contains two main components: 1. The Python package `skala`, which is also distributed [on PyPI](https://pypi.org/project/skala/) and contains a PyTorch implementation of the Skala model, its hookups to quantum chemistry packages [PySCF](https://pyscf.org/), [GPU4PySCF](https://pyscf.org/user/gpu.html) and [ASE](https://ase-lib.org/). -2. An example of using Skala in C++ CPU applications through LibTorch, see [`examples/cpp/cpp_integration`](examples/cpp/cpp_integration). +2. Example of using Skala applications through LibTorch and GauXC, the following examples are available + - [Skala in C++ with libtorch](examples/cpp/cpp_integration) + - [Skala in Fortran with FTorch](https://microsoft.github.io/skala/ftorch) + - [Skala in C++ with GauXC](https://microsoft.github.io/skala/gauxc/cpp-library) + - [Skala in C with GauXC](https://microsoft.github.io/skala/gauxc/c-library) + - [Skala in Fortran with GauXC](https://microsoft.github.io/skala/gauxc/fortran-library) ### Skala in Azure AI Foundry diff --git a/docs/ftorch.rst b/docs/ftorch.rst new file mode 100644 index 0000000..d4c2d00 --- /dev/null +++ b/docs/ftorch.rst @@ -0,0 +1,295 @@ +Using Skala in Fortran with FTorch +================================== + +This page provides an example of how to use Skala in Fortran with the FTorch library. +The example demonstrates how to load a Skala model, prepare input features, compute the exchange-correlation energy and potential, and access the results. + + +Setting up development environment +---------------------------------- + +For this example, we will be using mamba to manage our environment and dependencies. + +.. literalinclude:: ../examples/fortran/ftorch_integration/environment-cpu.yml + :language: yaml + :caption: environment.yml + +Create the environment and install dependencies: + +.. code-block:: bash + + mamba env create -n skala-dev -f environment.yml + mamba activate skala-dev + + +Build system setup +------------------ + +For building the Fortran application, we will use CMake. +We will use the following directory structure and files: + +.. code-block:: text + + skala/ + ├── CMakeLists.txt + ├── environment.yml + ├── app/ + │ └── main.f90 + ├── cmake/ + │ ├── skala-dep-versions.cmake + │ └── skala-ftorch.cmake + └── src/ + ├── skala_ftorch.cxx + └── skala_ftorch.f90 + +For the main ``CMakeLists.txt`` file, we will set up the project and include the necessary CMake modules for Skala and FTorch: + +.. literalinclude:: ../examples/fortran/ftorch_integration/CMakeLists.txt + :language: cmake + :caption: CMakeLists.txt + +To ensure that we have the correct versions of our dependencies, we will include a CMake module that specifies the versions of Skala and FTorch: + +.. literalinclude:: ../examples/fortran/ftorch_integration/cmake/skala-dep-versions.cmake + :language: cmake + :caption: cmake/skala-dep-versions.cmake + +Next, we will include the CMake modules for Skala and FTorch, which will handle finding the libraries and setting up the necessary include directories and link targets: + +.. literalinclude:: ../examples/fortran/ftorch_integration/cmake/skala-ftorch.cmake + :language: cmake + :caption: cmake/skala-ftorch.cmake + + +FTorch addons +------------- + +We create a number of extensions to the FTorch library in the ``src/`` directory, which provide the necessary Fortran bindings to interact with Skala. +However, we will not go into the details of these files here, as they are primarily focused on the Fortran-C++ interoperability. + +.. dropdown:: Fortran bindings for Skala + + The files ``skala_ftorch.cxx`` and ``skala_ftorch.f90`` contain the C++ and Fortran code, respectively, that define the bindings between Skala and FTorch. + These files include functions for loading Skala models, preparing input features, and computing exchange-correlation energies and potentials. + + .. literalinclude:: ../examples/fortran/ftorch_integration/src/skala_ftorch.cxx + :language: c++ + :caption: src/skala_ftorch.cxx + + .. literalinclude:: ../examples/fortran/ftorch_integration/src/skala_ftorch.f90 + :language: fortran + :caption: src/skala_ftorch.f90 + + +Fortran application +------------------- + +Finally, we have the Fortran application itself, which demonstrates how to use the Skala bindings to compute exchange-correlation energies and potentials. +We start the main program with the necessary module imports and variable declarations: + +.. literalinclude:: ../examples/fortran/ftorch_integration/app/main.f90 + :language: fortran + :caption: app/main.f90 + :lines: 1-17 + +First, we obtain the command line arguments for the model path and feature directory, and check that they are provided: + +.. literalinclude:: ../examples/fortran/ftorch_integration/app/main.f90 + :language: fortran + :caption: app/main.f90 + :lines: 19-29 + +We define a small contained helper procedure to read the command line arguments: + +.. literalinclude:: ../examples/fortran/ftorch_integration/app/main.f90 + :language: fortran + :caption: app/main.f90 + :lines: 211-232 + +The main types provided by the Skala bindings are the ``skala_model`` type, which extends the ``torch_model`` provided by FTorch and has a custom ``skala_model_load`` procedure for loading the Skala model and its meta data. + +.. literalinclude:: ../examples/fortran/ftorch_integration/app/main.f90 + :language: fortran + :caption: app/main.f90 + :lines: 31-33 + +The input to the Skala model is passed via a dictionary of tensors, which we prepare by loading the necessary features from disk and converting them to the appropriate format. + +.. literalinclude:: ../examples/fortran/ftorch_integration/app/main.f90 + :language: fortran + :caption: app/main.f90 + :lines: 35-60 + +.. note:: + + To export the features from Python, you can use the provided ``prepare_inputs.py`` script. + + .. literalinclude:: ../examples/cpp/cpp_integration/prepare_inputs.py + :language: python + :caption: prepare_inputs.py + +.. tip:: + + Here we load the features directly from torch ``.pt`` files, in your application you may want to create them directly from Fortran arrays as supported in FTorch. + +To place the features in the correct format for Skala, we add them to the input dictionary with the appropriate keys. + +.. literalinclude:: ../examples/fortran/ftorch_integration/app/main.f90 + :language: fortran + :caption: app/main.f90 + :lines: 62-76 + +.. note:: + + The input dictionary accepts both single tensors and lists of tensors. + If your program operates on multiple grids, you can pass those together to the input dictionary and Skala will handle them correctly. + + .. code-block:: fortran + + type(skala_dict) :: input + type(torch_tensor) :: grid_coords(3), grid_weights(3) + ! Load the grid features into the arrays + call input%insert(skala_feature%grid_coords, grid_coords) + call input%insert(skala_feature%grid_weights, grid_weights) + +With this we can now compute the exchange-correlation energy and potential by calling the Skala model with the prepared inputs. + +.. literalinclude:: ../examples/fortran/ftorch_integration/app/main.f90 + :language: fortran + :caption: app/main.f90 + :lines: 78-80 + +To get the exchange-correlation energy, we need to weight the exc values by the grid weights and sum over the grid points, which we can do using the provided tensor operations. + +.. literalinclude:: ../examples/fortran/ftorch_integration/app/main.f90 + :language: fortran + :caption: app/main.f90 + :lines: 82-89 + +.. note:: + + The used FTorch version 1.0.0 does not yet support the ``torch_tensor_sum`` operation, therefore we provide an implementation ourselves. + Future versions of FTorch might cover more features which are provided by the FTorch extensions in the ``src/`` directory, so be sure to check the documentation for the latest updates. + +Now we can build our application using CMake: + +.. code-block:: bash + + cmake -B build -S . -GNinja + cmake --build build + +To evaluate Skala, we download the model checkpoint from HuggingFace using the ``hf`` CLI from the ``huggingface_hub`` package: + +.. code-block:: shell + + hf download microsoft/skala skala-1.0.fun --local-dir . + +And run the application, passing the path to the Skala model and the feature directory as command line arguments. + +.. code-block:: bash + + ./build/Skala skala-1.0.fun features + +The output for the H2 molecule with the def2-QZVP basis set should look like this: + +.. code-block:: text + + [1] Loading model from skala-1.0.fun + [2] Loading features from H2-def2qzvp + -> Loading coarse_0_atomic_coords + -> Loading grad + -> Loading grid_coords + -> Loading kin + -> Loading grid_weights + -> Loading density + [3] Preparing input dictionary + [4] Running model inference + [5] Computing XC energy = sum(exc * grid_weights) + -> E_xc = -6.23580775902096E-0 + +The ``get_exc_vxc`` procedure computes the exchange-correlation energy and potential, which we can then access from the returned dictionary. +The potential terms are stored under the same keys as the input features and can be extracted as tensors. + +.. literalinclude:: ../examples/fortran/ftorch_integration/app/main.f90 + :language: fortran + :caption: app/main.f90 + :lines: 91-103 + +We can use those tensors for further processing in our application, for example to compute the norm of the potential + +.. literalinclude:: ../examples/fortran/ftorch_integration/app/main.f90 + :language: fortran + :caption: app/main.f90 + :lines: 105-139 + +Or by converting them to Fortran arrays and using the built-in array operations. + +.. literalinclude:: ../examples/fortran/ftorch_integration/app/main.f90 + :language: fortran + :caption: app/main.f90 + :lines: 141-209 + +We rebuild the application to include the latest changes: + +.. code-block:: bash + + cmake --build build + +Finally, we can run the application again to see the updated output with the computed potential values. + +.. code-block:: bash + + ./build/Skala skala-1.0.fun features + +In the output we can see the computed exchange-correlation energy as well as the mean values of the potential components, and the raw tensor data for each component. + +.. code-block:: text + + [1] Loading model from ../../cpp/gauxc_integration/skala-1.0.fun + [2] Loading features from H2-def2qzvp + -> Loading coarse_0_atomic_coords + -> Loading grad + -> Loading grid_coords + -> Loading kin + -> Loading grid_weights + -> Loading density + [3] Preparing input dictionary + [4] Running model inference + [5] Computing XC energy = sum(exc * grid_weights) + -> E_xc = -6.23580775902096E-01 + [6] Extracting vxc components + [7] Gradient means (dexc/dx) + -> mean(dexc/d_density) = -5.22209689934698E-03 + -> mean(dexc/d_grad) = -5.63822012469897E-12 + -> mean(dexc/d_kin) = -5.74746008322732E-04 + -> mean(dexc/d_grid_coords) = -2.86873728034485E-14 + -> mean(dexc/d_grid_weights) = -3.47603697347615E-02 + -> mean(dexc/d_coarse_0_atomic_coords) = 2.81365752618854E-10 + [8] Accessing tensor data as Fortran arrays + -> exc: shape = (19616) + [ -1.40859407629398E-11 -5.02191290297704E-14 -4.82271929094491E-19 ...] + -> dexc/d_density: shape = (19616, 2) + [[ -1.41238001293684E-02 -7.07236361510544E-03 -1.22373184625014E-03 ...] + [ -1.41238001293684E-02 -7.07236361510544E-03 -1.22373184625014E-03 ...]] + -> dexc/d_grad: shape = (19616, 3, 2) + [[[ 2.54671362468241E-15 3.18318623003652E-19 2.04989634182288E-27 ...] + [ 2.54671362468241E-15 3.18318623003652E-19 2.04989634182288E-27 ...]] + [[ ... ]]] + -> dexc/d_kin: shape = (19616, 2) + [[ -2.80759310169539E-07 -2.05221067626510E-09 -6.12585093687525E-14 ...] + [ -2.80759310169539E-07 -2.05221067626510E-09 -6.12585093687525E-14 ...]] + -> dexc/d_grid_coords: shape = (3, 19616) + [[ 0.00000000000000E+00 0.00000000000000E+00 0.00000000000000E+00 ...] + [ 0.00000000000000E+00 0.00000000000000E+00 0.00000000000000E+00 ...]] + -> dexc/d_grid_weights: shape = (19616) + [ -1.40859407629398E-11 -5.02191290297704E-14 -4.82271929094491E-19 ...] + -> dexc/d_coarse_0_atomic_coords: shape = (3, 2) + [[ -4.21657770765388E-10 1.95481841380307E-10 -4.14780685154337E-03] + [ ...]] + +Summary +------- + +In this example, we have demonstrated how to use Skala in Fortran with the FTorch library. +We have shown how to set up the development environment, build the application using CMake, and run the application to compute exchange-correlation energies and potentials using a Skala model. +This example serves as a starting point for integrating Skala into your Fortran applications using FTorch, and can be extended to include more complex features and functionalities as needed. \ No newline at end of file diff --git a/docs/index.rst b/docs/index.rst index c926eee..244f7dc 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -26,6 +26,7 @@ Please stay tuned for updates and new releases. pyscf/singlepoint pyscf/scf_settings ase + ftorch foundry gauxc/index diff --git a/examples/fortran/ftorch_integration/CMakeLists.txt b/examples/fortran/ftorch_integration/CMakeLists.txt index 4b524de..512a901 100644 --- a/examples/fortran/ftorch_integration/CMakeLists.txt +++ b/examples/fortran/ftorch_integration/CMakeLists.txt @@ -10,7 +10,6 @@ list(PREPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake) include(skala-dep-versions) include(skala-ftorch) -include(skala-flap) find_package(Torch REQUIRED) add_executable( @@ -22,7 +21,6 @@ add_executable( target_link_libraries( ${PROJECT_NAME} FTorch::ftorch - FLAP::FLAP ${TORCH_LIBRARIES} ) diff --git a/examples/fortran/ftorch_integration/app/main.f90 b/examples/fortran/ftorch_integration/app/main.f90 index 337ad3f..6a2fb64 100644 --- a/examples/fortran/ftorch_integration/app/main.f90 +++ b/examples/fortran/ftorch_integration/app/main.f90 @@ -1,40 +1,30 @@ program main - use ftorch, only : torch_tensor, torch_tensor_print, operator(*) + use ftorch, only : torch_tensor use skala_ftorch, only : skala_model, skala_model_load, skala_feature, skala_tensor_load, & - & skala_tensor_sum, skala_tensor_item_double, skala_dict, skala_dict_new - use flap, only : command_line_interface + & skala_tensor_sum, skala_tensor_mean, skala_tensor_mul, skala_tensor_item_double, & + & skala_tensor_to_array, & + & skala_dict, skala_dict_new + use iso_c_binding, only : c_double implicit none type(skala_model) :: model type(skala_dict) :: input, vxc - type(torch_tensor) :: exc, exc_sum + type(torch_tensor) :: exc type(torch_tensor) :: density, grad, kin, grid_coords, grid_weights, coarse_0_atomic_coords type(torch_tensor) :: dexc_ddensity, dexc_dgrad, dexc_dkin, dexc_dgrid_coords, & & dexc_dgrid_weights, dexc_dcoarse_0_atomic_coords, vxc_norm - type(command_line_interface) :: cli - character(len=:), allocatable :: path, feature_dir - integer :: error cli_input: block - character(len=512) :: dummy - call cli%init(description="Test Skala model inference from Fortran") - call cli%add(position=1, required=.true., act="store", error=error, positional=.true.) - if (error /= 0) exit cli_input - call cli%add(position=2, required=.true., act="store", error=error, positional=.true.) - if (error /= 0) exit cli_input - call cli%parse(error=error) - if (error /= 0) exit cli_input - call cli%get(position=1, val=dummy, error=error) - if (error /= 0) exit cli_input - path = trim(dummy) - call cli%get(position=2, val=dummy, error=error) - if (error /= 0) exit cli_input - feature_dir = trim(dummy) + call get_argument(1, path) + if (.not. allocated(path)) exit cli_input + call get_argument(2, feature_dir) + if (.not. allocated(feature_dir)) exit cli_input end block cli_input - if (error /= 0) then - print '(a)', cli%error_message + if (.not. allocated(path) .or. .not.allocated(feature_dir)) then + call get_argument(0, path) + print '(a)', "Usage: ", path, " " stop 1 end if @@ -89,7 +79,16 @@ program main print '(a)', "[4] Running model inference" call model%get_exc_and_vxc(input, exc, vxc) - print '(a)', "[5] Extracting vxc components" + ! Print the exchange-correlation energy + print '(a)', "[5] Computing XC energy = sum(exc * grid_weights)" + exc_weighted: block + type(torch_tensor) :: weighted, weighted_sum + call skala_tensor_mul(exc, grid_weights, weighted) + call skala_tensor_sum(weighted, weighted_sum) + print '(a, es22.14)', " -> E_xc = ", skala_tensor_item_double(weighted_sum) + end block exc_weighted + + print '(a)', "[6] Extracting vxc components" if (model%needs_feature(skala_feature%density)) & call vxc%at(skala_feature%density, dexc_ddensity) if (model%needs_feature(skala_feature%grad)) & @@ -102,4 +101,132 @@ program main call vxc%at(skala_feature%grid_weights, dexc_dgrid_weights) if (model%needs_feature(skala_feature%coarse_0_atomic_coords)) & call vxc%at(skala_feature%coarse_0_atomic_coords, dexc_dcoarse_0_atomic_coords) + + ! Print mean of each gradient component + print '(a)', "[7] Gradient means (dexc/dx)" + print_gradients: block + type(torch_tensor) :: grad_mean + if (model%needs_feature(skala_feature%density)) then + call skala_tensor_mean(dexc_ddensity, grad_mean) + print '(a, es22.14)', " -> mean(dexc/d_density) = ", & + skala_tensor_item_double(grad_mean) + end if + if (model%needs_feature(skala_feature%grad)) then + call skala_tensor_mean(dexc_dgrad, grad_mean) + print '(a, es22.14)', " -> mean(dexc/d_grad) = ", & + skala_tensor_item_double(grad_mean) + end if + if (model%needs_feature(skala_feature%kin)) then + call skala_tensor_mean(dexc_dkin, grad_mean) + print '(a, es22.14)', " -> mean(dexc/d_kin) = ", & + skala_tensor_item_double(grad_mean) + end if + if (model%needs_feature(skala_feature%grid_coords)) then + call skala_tensor_mean(dexc_dgrid_coords, grad_mean) + print '(a, es22.14)', " -> mean(dexc/d_grid_coords) = ", & + skala_tensor_item_double(grad_mean) + end if + if (model%needs_feature(skala_feature%grid_weights)) then + call skala_tensor_mean(dexc_dgrid_weights, grad_mean) + print '(a, es22.14)', " -> mean(dexc/d_grid_weights) = ", & + skala_tensor_item_double(grad_mean) + end if + if (model%needs_feature(skala_feature%coarse_0_atomic_coords)) then + call skala_tensor_mean(dexc_dcoarse_0_atomic_coords, grad_mean) + print '(a, es22.14)', " -> mean(dexc/d_coarse_0_atomic_coords) = ", & + skala_tensor_item_double(grad_mean) + end if + end block print_gradients + + ! Demonstrate direct Fortran array access to tensor data + print '(a)', "[8] Accessing tensor data as Fortran arrays" + array_access: block + real(c_double), pointer :: arr1d(:), arr2d(:,:), arr3d(:,:,:) + integer :: i + + ! exc is 1-D (npts) + call skala_tensor_to_array(exc, arr1d) + print '(a, i0, a)', " -> exc: shape = (", size(arr1d), ")" + print '(a, 3es22.14, a)', " [", arr1d(:3), " ...]" + + ! density is 2-D (nspin, npts) + if (model%needs_feature(skala_feature%density)) then + call skala_tensor_to_array(dexc_ddensity, arr2d) + print '(a, i0, a, i0, a)', " -> dexc/d_density: shape = (", & + size(arr2d, 1), ", ", size(arr2d, 2), ")" + print '(a, 3es22.14, a)', & + " [[", arr2d(:3, 1), " ...]", & + " [", arr2d(:3, 2), " ...]]" + end if + + ! grad is 3-D (nspin, 3, npts) + if (model%needs_feature(skala_feature%grad)) then + call skala_tensor_to_array(dexc_dgrad, arr3d) + print '(a, i0, a, i0, a, i0, a)', " -> dexc/d_grad: shape = (", & + size(arr3d, 1), ", ", size(arr3d, 2), ", ", size(arr3d, 3), ")" + print '(a, 3es22.14, a)', & + " [[[", arr3d(:3, 1, 1), " ...]", & + " [", arr3d(:3, 2, 1), " ...]]", & + " [[ ... ]]]" + end if + + ! kin is 2-D (nspin, npts) + if (model%needs_feature(skala_feature%kin)) then + call skala_tensor_to_array(dexc_dkin, arr2d) + print '(a, i0, a, i0, a)', " -> dexc/d_kin: shape = (", & + size(arr2d, 1), ", ", size(arr2d, 2), ")" + print '(a, 3es22.14, a)', & + " [[", arr2d(:3, 1), " ...]", & + " [", arr2d(:3, 2), " ...]]" + end if + + ! grid_coords is 2-D (npts, 3) + if (model%needs_feature(skala_feature%grid_coords)) then + call skala_tensor_to_array(dexc_dgrid_coords, arr2d) + print '(a, i0, a, i0, a)', " -> dexc/d_grid_coords: shape = (", & + size(arr2d, 1), ", ", size(arr2d, 2), ")" + print '(a, 3es22.14, a)', & + " [[", arr2d(:3, 1), " ...]", & + " [", arr2d(:3, 2), " ...]]" + end if + + ! grid_weights is 1-D (npts) + if (model%needs_feature(skala_feature%grid_weights)) then + call skala_tensor_to_array(dexc_dgrid_weights, arr1d) + print '(a, i0, a)', " -> dexc/d_grid_weights: shape = (", size(arr1d), ")" + print '(a, 3es22.14, a)', & + " [", arr1d(:3), " ...]" + end if + + ! coarse_0_atomic_coords is 2-D (natoms, 3) + if (model%needs_feature(skala_feature%coarse_0_atomic_coords)) then + call skala_tensor_to_array(dexc_dcoarse_0_atomic_coords, arr2d) + print '(a, i0, a, i0, a)', " -> dexc/d_coarse_0_atomic_coords: shape = (", & + size(arr2d, 1), ", ", size(arr2d, 2), ")" + print '(a, 3es22.14, a)', & + " [[", arr2d(:3, 1), "]", " [ ...]]" + end if + end block array_access + +contains + subroutine get_argument(idx, arg) + integer, intent(in) :: idx + character(len=:), allocatable, intent(out) :: arg + + integer :: length, stat + + call get_command_argument(idx, length=length, status=stat) + if (stat /= 0) return + + allocate(character(len=length) :: arg, stat=stat) + if (stat /= 0) return + + if (length > 0) then + call get_command_argument(idx, arg, status=stat) + if (stat /= 0) then + deallocate(arg) + return + end if + end if + end subroutine get_argument end program main \ No newline at end of file diff --git a/examples/fortran/ftorch_integration/cmake/skala-dep-versions.cmake b/examples/fortran/ftorch_integration/cmake/skala-dep-versions.cmake index 0fc6628..7d1d80c 100644 --- a/examples/fortran/ftorch_integration/cmake/skala-dep-versions.cmake +++ b/examples/fortran/ftorch_integration/cmake/skala-dep-versions.cmake @@ -1,5 +1,2 @@ set(Skala_FTorch_URL "https://github.com/Cambridge-ICCS/FTorch/archive/refs/tags/v1.0.0.tar.gz") -set(Skala_FTorch_SHA256 "c4b6741e582623b7ecaecd59d02f779e8a6f6017f8068c85da8a034f468df375") - -set(Skala_FLAP_URL "https://github.com/szaghi/FLAP/releases/download/v1.2.5/FLAP.tar.gz") -set(Skala_FLAP_SHA256 "f2a388898a1ee49a8559822280647eb112d884484cb6169710011daab6dbb501") \ No newline at end of file +set(Skala_FTorch_SHA256 "c4b6741e582623b7ecaecd59d02f779e8a6f6017f8068c85da8a034f468df375") \ No newline at end of file diff --git a/examples/fortran/ftorch_integration/cmake/skala-flap.cmake b/examples/fortran/ftorch_integration/cmake/skala-flap.cmake deleted file mode 100644 index 9a8324a..0000000 --- a/examples/fortran/ftorch_integration/cmake/skala-flap.cmake +++ /dev/null @@ -1,23 +0,0 @@ -if(NOT DEFINED Skala_FLAP_URL) - include(skala-dep-versions) -endif() -find_package(FLAP QUIET CONFIG) -if(NOT FLAP_FOUND) - include(FetchContent) - - message(STATUS "Could not find FLAP... Building FLAP from source") - message(STATUS "FLAP URL: ${Skala_FLAP_URL}") - - FetchContent_Declare( - flap - URL ${Skala_FLAP_URL} - URL_HASH SHA256=${Skala_FLAP_SHA256} - DOWNLOAD_EXTRACT_TIMESTAMP ON - ) - FetchContent_MakeAvailable(flap) -endif() - -if(NOT TARGET FLAP::FLAP) - add_library(FLAP::FLAP INTERFACE IMPORTED) - target_link_libraries(FLAP::FLAP INTERFACE FLAP) -endif() \ No newline at end of file diff --git a/examples/fortran/ftorch_integration/src/skala_ftorch.cxx b/examples/fortran/ftorch_integration/src/skala_ftorch.cxx index 6b46a17..00e97e3 100644 --- a/examples/fortran/ftorch_integration/src/skala_ftorch.cxx +++ b/examples/fortran/ftorch_integration/src/skala_ftorch.cxx @@ -258,6 +258,60 @@ skala_list_at(skala_list_t input, size_t index) return new at::Tensor(std::move(tensor)); } +extern "C" +torch_tensor_t +skala_tensor_mul(const torch_tensor_t a, const torch_tensor_t b) +{ + auto ta = reinterpret_cast(a); + auto tb = reinterpret_cast(b); + auto result = (*ta) * (*tb); + return new torch::Tensor(std::move(result)); +} + +extern "C" +torch_tensor_t +skala_tensor_mean(const torch_tensor_t tensor) +{ + auto t = reinterpret_cast(tensor); + auto m = t->mean(); + return new torch::Tensor(std::move(m)); +} + +extern "C" +void* +skala_tensor_data_ptr(const torch_tensor_t tensor) +{ + auto t = reinterpret_cast(tensor); + auto contiguous = t->contiguous().to(torch::kFloat64); + // Replace the tensor in-place so the pointer stays valid + *t = contiguous; + return t->data_ptr(); +} + +extern "C" +int64_t +skala_tensor_ndim(const torch_tensor_t tensor) +{ + auto t = reinterpret_cast(tensor); + return t->ndimension(); +} + +extern "C" +int64_t +skala_tensor_size(const torch_tensor_t tensor, int64_t dim) +{ + auto t = reinterpret_cast(tensor); + return t->size(dim); +} + +extern "C" +int64_t +skala_tensor_numel(const torch_tensor_t tensor) +{ + auto t = reinterpret_cast(tensor); + return t->numel(); +} + extern "C" void skala_dict_delete(skala_dict_t input) diff --git a/examples/fortran/ftorch_integration/src/skala_ftorch.f90 b/examples/fortran/ftorch_integration/src/skala_ftorch.f90 index 99dcbf2..66f5776 100644 --- a/examples/fortran/ftorch_integration/src/skala_ftorch.f90 +++ b/examples/fortran/ftorch_integration/src/skala_ftorch.f90 @@ -1,11 +1,21 @@ module skala_ftorch - use iso_c_binding, only : c_ptr, c_char, c_int, c_size_t, c_bool, c_null_ptr, c_null_char, c_double + use iso_c_binding, only : c_ptr, c_char, c_int, c_int64_t, c_size_t, c_bool, & + & c_null_ptr, c_null_char, c_double, c_f_pointer use ftorch, only : torch_model, torch_model_delete, torch_tensor implicit none private public :: skala_model, skala_model_load, skala_feature, skala_tensor_load, & - & skala_tensor_sum, skala_tensor_item_double, skala_dict, skala_dict_new + & skala_tensor_sum, skala_tensor_mean, skala_tensor_mul, skala_tensor_item_double, & + & skala_tensor_to_array, & + & skala_tensor_numel, skala_tensor_ndim, skala_tensor_size, & + & skala_dict, skala_dict_new + + interface skala_tensor_to_array + module procedure skala_tensor_to_array_1d + module procedure skala_tensor_to_array_2d + module procedure skala_tensor_to_array_3d + end interface skala_tensor_to_array type :: skala_feature_enum integer :: density = 1 @@ -113,6 +123,148 @@ end function skala_tensor_item_double_c value = skala_tensor_item_double_c(tensor%p) end function skala_tensor_item_double + subroutine skala_tensor_mul(a, b, result) + type(torch_tensor), intent(in) :: a, b + type(torch_tensor), intent(out) :: result + + interface + function skala_tensor_mul_c(a, b) result(r) bind(c, name="skala_tensor_mul") + import :: c_ptr + type(c_ptr), value :: a, b + type(c_ptr) :: r + end function skala_tensor_mul_c + end interface + + result%p = skala_tensor_mul_c(a%p, b%p) + end subroutine skala_tensor_mul + + subroutine skala_tensor_mean(tensor, mean) + type(torch_tensor), intent(in) :: tensor + type(torch_tensor), intent(out) :: mean + + interface + function skala_tensor_mean_c(tensor) result(mean) bind(c, name="skala_tensor_mean") + import :: c_ptr + type(c_ptr), value :: tensor + type(c_ptr) :: mean + end function skala_tensor_mean_c + end interface + + mean%p = skala_tensor_mean_c(tensor%p) + end subroutine skala_tensor_mean + + function skala_tensor_numel(tensor) result(n) + type(torch_tensor), intent(in) :: tensor + integer(c_int64_t) :: n + + interface + function skala_tensor_numel_c(tensor) result(n) bind(c, name="skala_tensor_numel") + import :: c_ptr, c_int64_t + type(c_ptr), value :: tensor + integer(c_int64_t) :: n + end function skala_tensor_numel_c + end interface + + n = skala_tensor_numel_c(tensor%p) + end function skala_tensor_numel + + function skala_tensor_ndim(tensor) result(n) + type(torch_tensor), intent(in) :: tensor + integer(c_int64_t) :: n + + interface + function skala_tensor_ndim_c(tensor) result(n) bind(c, name="skala_tensor_ndim") + import :: c_ptr, c_int64_t + type(c_ptr), value :: tensor + integer(c_int64_t) :: n + end function skala_tensor_ndim_c + end interface + + n = skala_tensor_ndim_c(tensor%p) + end function skala_tensor_ndim + + function skala_tensor_size(tensor, dim) result(n) + type(torch_tensor), intent(in) :: tensor + integer, intent(in) :: dim + integer(c_int64_t) :: n + + interface + function skala_tensor_size_c(tensor, dim) result(n) bind(c, name="skala_tensor_size") + import :: c_ptr, c_int64_t + type(c_ptr), value :: tensor + integer(c_int64_t), value :: dim + integer(c_int64_t) :: n + end function skala_tensor_size_c + end interface + + n = skala_tensor_size_c(tensor%p, int(dim, c_int64_t)) + end function skala_tensor_size + + subroutine skala_tensor_to_array_1d(tensor, array) + type(torch_tensor), intent(in) :: tensor + real(c_double), pointer, intent(out) :: array(:) + + interface + function skala_tensor_data_ptr_c(tensor) result(ptr) bind(c, name="skala_tensor_data_ptr") + import :: c_ptr + type(c_ptr), value :: tensor + type(c_ptr) :: ptr + end function skala_tensor_data_ptr_c + end interface + + type(c_ptr) :: data_ptr + integer(c_int64_t) :: n + + n = skala_tensor_numel(tensor) + data_ptr = skala_tensor_data_ptr_c(tensor%p) + call c_f_pointer(data_ptr, array, [n]) + end subroutine skala_tensor_to_array_1d + + subroutine skala_tensor_to_array_2d(tensor, array) + type(torch_tensor), intent(in) :: tensor + real(c_double), pointer, intent(out) :: array(:,:) + + interface + function skala_tensor_data_ptr_c(tensor) result(ptr) bind(c, name="skala_tensor_data_ptr") + import :: c_ptr + type(c_ptr), value :: tensor + type(c_ptr) :: ptr + end function skala_tensor_data_ptr_c + end interface + + type(c_ptr) :: data_ptr + integer(c_int64_t) :: nrow, ncol + + ! C is row-major, Fortran is column-major: swap dimensions + nrow = skala_tensor_size(tensor, 1) + ncol = skala_tensor_size(tensor, 0) + data_ptr = skala_tensor_data_ptr_c(tensor%p) + call c_f_pointer(data_ptr, array, [nrow, ncol]) + end subroutine skala_tensor_to_array_2d + + subroutine skala_tensor_to_array_3d(tensor, array) + type(torch_tensor), intent(in) :: tensor + real(c_double), pointer, intent(out) :: array(:,:,:) + + interface + function skala_tensor_data_ptr_c(tensor) result(ptr) bind(c, name="skala_tensor_data_ptr") + import :: c_ptr + type(c_ptr), value :: tensor + type(c_ptr) :: ptr + end function skala_tensor_data_ptr_c + end interface + + type(c_ptr) :: data_ptr + integer(c_int64_t) :: n0, n1, n2 + + ! C is row-major, Fortran is column-major: reverse dimension order + n0 = skala_tensor_size(tensor, 2) + n1 = skala_tensor_size(tensor, 1) + n2 = skala_tensor_size(tensor, 0) + data_ptr = skala_tensor_data_ptr_c(tensor%p) + call c_f_pointer(data_ptr, array, [n0, n1, n2]) + end subroutine skala_tensor_to_array_3d + subroutine skala_model_get_exc(model, input, exc) class(skala_model), intent(inout) :: model type(skala_dict), intent(in) :: input From 0a15bcad9500e47badc84deab4dd6cf2f7314220 Mon Sep 17 00:00:00 2001 From: Sebastian Ehlert Date: Wed, 25 Feb 2026 10:51:04 +0100 Subject: [PATCH 3/4] Minor fixes --- examples/fortran/ftorch_integration/app/main.f90 | 12 ++++++------ .../fortran/ftorch_integration/environment-cpu.yml | 3 +-- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/examples/fortran/ftorch_integration/app/main.f90 b/examples/fortran/ftorch_integration/app/main.f90 index 6a2fb64..60e935e 100644 --- a/examples/fortran/ftorch_integration/app/main.f90 +++ b/examples/fortran/ftorch_integration/app/main.f90 @@ -1,10 +1,10 @@ program main + use iso_c_binding, only : c_double use ftorch, only : torch_tensor use skala_ftorch, only : skala_model, skala_model_load, skala_feature, skala_tensor_load, & & skala_tensor_sum, skala_tensor_mean, skala_tensor_mul, skala_tensor_item_double, & - & skala_tensor_to_array, & - & skala_dict, skala_dict_new - use iso_c_binding, only : c_double + & skala_tensor_to_array, skala_dict, skala_dict_new + implicit none type(skala_model) :: model @@ -24,7 +24,7 @@ program main end block cli_input if (.not. allocated(path) .or. .not.allocated(feature_dir)) then call get_argument(0, path) - print '(a)', "Usage: ", path, " " + print '(a)', "Usage: "//path//" " stop 1 end if @@ -142,12 +142,12 @@ program main print '(a)', "[8] Accessing tensor data as Fortran arrays" array_access: block real(c_double), pointer :: arr1d(:), arr2d(:,:), arr3d(:,:,:) - integer :: i ! exc is 1-D (npts) call skala_tensor_to_array(exc, arr1d) print '(a, i0, a)', " -> exc: shape = (", size(arr1d), ")" - print '(a, 3es22.14, a)', " [", arr1d(:3), " ...]" + print '(a, 3es22.14, a)', & + " [", arr1d(:3), " ...]" ! density is 2-D (nspin, npts) if (model%needs_feature(skala_feature%density)) then diff --git a/examples/fortran/ftorch_integration/environment-cpu.yml b/examples/fortran/ftorch_integration/environment-cpu.yml index 5443002..e44633f 100644 --- a/examples/fortran/ftorch_integration/environment-cpu.yml +++ b/examples/fortran/ftorch_integration/environment-cpu.yml @@ -8,7 +8,6 @@ dependencies: - cxx-compiler - cmake >=3.20,<4 - ninja - # host/runtime requirements - - jonquil + # host/runtime requirements - pytorch >=2.0 cpu_* From 32c25eb3c7f96e7e1907dcf12de45b1011cdded7 Mon Sep 17 00:00:00 2001 From: Sebastian Ehlert Date: Wed, 25 Feb 2026 15:09:49 +0100 Subject: [PATCH 4/4] Update workflow and adjust documentation --- .github/workflows/examples.yml | 117 ++++++++++++++++++ docs/ftorch.rst | 17 ++- examples/cpp/cpp_integration/environment.yml | 3 +- .../{environment-cpu.yml => environment.yml} | 1 + 4 files changed, 135 insertions(+), 3 deletions(-) rename examples/fortran/ftorch_integration/{environment-cpu.yml => environment.yml} (91%) diff --git a/.github/workflows/examples.yml b/.github/workflows/examples.yml index 5a539be..75eddaf 100644 --- a/.github/workflows/examples.yml +++ b/.github/workflows/examples.yml @@ -10,7 +10,59 @@ permissions: contents: read jobs: + skala-checkpoint: + runs-on: ubuntu-latest + steps: + - name: Setup Python + uses: actions/setup-python@v6 + with: + version: "3.x" + + - name: Install huggingface_hub CLI + run: pip install huggingface_hub + + - name: Download checkpoint + run: >- + hf download microsoft/skala skala-1.0.fun --local-dir . + + - name: Upload checkpoint + uses: actions/upload-artifact@v4 + with: + name: skala-checkpoint + path: skala-1.0.fun + + pt-features: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Setup micromamba + uses: mamba-org/setup-micromamba@v1 + with: + environment-file: environment-cpu.yml + environment-name: skala-dev + cache-environment: true + cache-downloads: true + + - name: Install Skala + run: pip install -e . + shell: micromamba-shell {0} + + - name: Generate features + run: >- + python examples/cpp/cpp_integration/prepare_inputs.py + --output-dir features + shell: micromamba-shell {0} + + - name: Upload features + uses: actions/upload-artifact@v4 + with: + name: pt-features + path: features + gauxc: + needs: + - skala-checkpoint runs-on: ubuntu-latest strategy: fail-fast: false @@ -91,3 +143,68 @@ jobs: ./gauxc/tests/ref_data/onedft_he_def2qzvp_tpss_uks.hdf5 --model TPSS shell: micromamba-shell {0} + + - name: Download checkpoint + uses: actions/download-artifact@v5 + with: + name: skala-checkpoint + + - name: Run example + run: >- + Skala + ./gauxc/tests/ref_data/onedft_he_def2qzvp_tpss_uks.hdf5 + --model ./skala-1.0.fun + shell: micromamba-shell {0} + + ftorch: + runs-on: ubuntu-latest + needs: + - pt-features + - skala-checkpoint + + steps: + - uses: actions/checkout@v4 + + - name: Setup micromamba + uses: mamba-org/setup-micromamba@v1 + with: + environment-file: examples/fortran/ftorch_integration/environment.yml + environment-name: ftorch-dev + cache-environment: true + cache-downloads: true + + - name: Configure project + run: >- + cmake + -B build_example + -S examples/fortran/ftorch_integration + -G Ninja + -DCMAKE_INSTALL_PREFIX=$CONDA_PREFIX + shell: micromamba-shell {0} + + - name: Build project + run: cmake --build build_example + shell: micromamba-shell {0} + + - name: Install project + run: cmake --install build_example + shell: micromamba-shell {0} + + - name: Download features + uses: actions/download-artifact@v5 + with: + name: pt-features + path: features + + - name: Download checkpoint + uses: actions/download-artifact@v5 + with: + name: skala-checkpoint + path: . + + - name: Run example + run: >- + Skala + ./skala-1.0.fun + ./features + shell: micromamba-shell {0} \ No newline at end of file diff --git a/docs/ftorch.rst b/docs/ftorch.rst index d4c2d00..936f26c 100644 --- a/docs/ftorch.rst +++ b/docs/ftorch.rst @@ -10,7 +10,7 @@ Setting up development environment For this example, we will be using mamba to manage our environment and dependencies. -.. literalinclude:: ../examples/fortran/ftorch_integration/environment-cpu.yml +.. literalinclude:: ../examples/fortran/ftorch_integration/environment.yml :language: yaml :caption: environment.yml @@ -184,6 +184,21 @@ To evaluate Skala, we download the model checkpoint from HuggingFace using the ` hf download microsoft/skala skala-1.0.fun --local-dir . +.. note:: + + To create the features directory run the ``prepare_inputs.py`` script from the ``examples/cpp/cpp_integration/`` directory. + This will generate the necessary input features for the H2 molecule with the def2-QZVP basis set. + + .. code-block:: bash + + python prepare_inputs.py --output_dir features --molecule H2 --basis def2-QZVP + + The script needs the ``skala`` package installed in your Python environment, which can be done via pip: + + .. code-block:: bash + + pip install skala + And run the application, passing the path to the Skala model and the feature directory as command line arguments. .. code-block:: bash diff --git a/examples/cpp/cpp_integration/environment.yml b/examples/cpp/cpp_integration/environment.yml index 1e184f5..e99313e 100644 --- a/examples/cpp/cpp_integration/environment.yml +++ b/examples/cpp/cpp_integration/environment.yml @@ -8,5 +8,4 @@ dependencies: - nlohmann_json - libtorch - pytorch # Only required to produce example features. -- pip: - - microsoft-skala +- skala diff --git a/examples/fortran/ftorch_integration/environment-cpu.yml b/examples/fortran/ftorch_integration/environment.yml similarity index 91% rename from examples/fortran/ftorch_integration/environment-cpu.yml rename to examples/fortran/ftorch_integration/environment.yml index e44633f..572bc7f 100644 --- a/examples/fortran/ftorch_integration/environment-cpu.yml +++ b/examples/fortran/ftorch_integration/environment.yml @@ -9,5 +9,6 @@ dependencies: - cmake >=3.20,<4 - ninja # host/runtime requirements + - huggingface_hub - pytorch >=2.0 cpu_*