diff --git a/CMakeLists.txt b/CMakeLists.txt index b9e2095..09e8b95 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -3,6 +3,16 @@ set(CMAKE_CXX_STANDARD 17) project(nuTens) +# Changes default install path to be a subdirectory of the build dir. +# Can set build dir at configure time with -DCMAKE_INSTALL_PREFIX=/install/path +if(CMAKE_INSTALL_PREFIX STREQUAL "" OR CMAKE_INSTALL_PREFIX STREQUAL + "/usr/local") + set(CMAKE_INSTALL_PREFIX "${CMAKE_BINARY_DIR}") +elseif(NOT DEFINED CMAKE_INSTALL_PREFIX) + set(CMAKE_INSTALL_PREFIX "${CMAKE_BINARY_DIR}") +endif() + + # Need to add some special compile flags to check the code test coverage OPTION(NT_TEST_COVERAGE "produce code coverage reports when running tests" OFF) IF(NT_TEST_COVERAGE) @@ -46,6 +56,19 @@ ELSE() message("Won't benchmark") ENDIF() +# If user wants to enable python interface we need to include pybind +OPTION(NT_ENABLE_PYTHON "enable python interface" OFF) +IF(NT_ENABLE_PYTHON) + message("Enabling python") + CPMAddPackage( + GITHUB_REPOSITORY "pybind/pybind11" + VERSION 2.13.5 + ) + +ELSE() + message("Won't enable python interface") +ENDIF() + ## check build times ## have this optional as it's not supported on all CMake platforms @@ -66,6 +89,13 @@ IF(NT_ENABLE_BENCHMARKING) add_subdirectory(benchmarks) ENDIF() +IF(NT_ENABLE_PYTHON) + set_property( TARGET tensor PROPERTY POSITION_INDEPENDENT_CODE ON ) + set_property( TARGET propagator PROPERTY POSITION_INDEPENDENT_CODE ON ) + set_property( TARGET spdlog PROPERTY POSITION_INDEPENDENT_CODE ON ) + set_property( TARGET logging PROPERTY POSITION_INDEPENDENT_CODE ON ) + add_subdirectory(python) +ENDIF() # Print out a handy message to more easily see the config options message( STATUS "The following variables have been used to configure the build: " ) diff --git a/README.md b/README.md index 5365f02..89c7afd 100644 --- a/README.md +++ b/README.md @@ -77,9 +77,9 @@ nuTens uses [Googles benchmark library](https://github.com/google/benchmark) to - [x] Add suite of benchmarking tests - [x] Integrate benchmarks into CI ( maybe use [hyperfine](https://github.com/sharkdp/hyperfine) and [bencher](https://bencher.dev/) for this? ) - [ ] Add proper unit tests -- [ ] Expand CI to include more platforms +- [x] Expand CI to include more platforms - [ ] Add support for modules (see [PyTorch doc](https://pytorch.org/cppdocs/api/classtorch_1_1nn_1_1_module.html)) - [ ] Propagation in variable matter density - [ ] Add support for Tensorflow backend -- [ ] Add python interface +- [x] Add python interface diff --git a/benchmarks/benchmarks.cpp b/benchmarks/benchmarks.cpp index 2f729d2..03a8220 100644 --- a/benchmarks/benchmarks.cpp +++ b/benchmarks/benchmarks.cpp @@ -97,7 +97,7 @@ static void BM_constMatterOscillations(benchmark::State &state) // set up the propagator Propagator matterProp(3, 100.0); - std::unique_ptr matterSolver = std::make_unique(3, 2.6); + std::shared_ptr matterSolver = std::make_shared(3, 2.6); matterProp.setPMNS(PMNS); matterProp.setMasses(masses); matterProp.setMatterSolver(matterSolver); diff --git a/nuTens/CMakeLists.txt b/nuTens/CMakeLists.txt index 30f8314..286cb74 100644 --- a/nuTens/CMakeLists.txt +++ b/nuTens/CMakeLists.txt @@ -5,6 +5,7 @@ add_library(logging logging.hpp) target_link_libraries(logging spdlog::spdlog) set_target_properties(logging PROPERTIES LINKER_LANGUAGE CXX) +target_include_directories(logging PUBLIC "${SPDLOG_INCLUDE_DIRS}") set( NT_LOG_LEVEL "INFO" CACHE STRING "the level of detail to log to the console" ) diff --git a/nuTens/propagator/const-density-solver.cpp b/nuTens/propagator/const-density-solver.cpp index fa040a3..bed0ecd 100644 --- a/nuTens/propagator/const-density-solver.cpp +++ b/nuTens/propagator/const-density-solver.cpp @@ -10,8 +10,8 @@ void ConstDensityMatterSolver::calculateEigenvalues(const Tensor &energies, Tens for (int j = 0; j < nGenerations; j++) { hamiltonian.setValue({"...", i, j}, - Tensor::div(diagMassMatrix.getValue({i, j}), energies.getValue({"...", 0})) - - electronOuter.getValue({i, j})); + Tensor::div(diagMassMatrix.getValues({i, j}), energies.getValues({"...", 0})) - + electronOuter.getValues({i, j})); } } diff --git a/nuTens/propagator/const-density-solver.hpp b/nuTens/propagator/const-density-solver.hpp index 5aa5b34..e6fb5d8 100644 --- a/nuTens/propagator/const-density-solver.hpp +++ b/nuTens/propagator/const-density-solver.hpp @@ -51,8 +51,9 @@ class ConstDensityMatterSolver : public BaseMatterSolver // construct the outer product of the electron neutrino row of the PMNS // matrix used to construct the hamiltonian - electronOuter = Tensor::scale(Tensor::outer(PMNS.getValue({0, 0, "..."}), PMNS.getValue({0, 0, "..."}).conj()), - Constants::Groot2 * density); + electronOuter = + Tensor::scale(Tensor::outer(PMNS.getValues({0, 0, "..."}), PMNS.getValues({0, 0, "..."}).conj()), + Constants::Groot2 * density); }; /// @brief Set new mass eigenvalues for this solver @@ -64,7 +65,7 @@ class ConstDensityMatterSolver : public BaseMatterSolver masses = newMasses; - Tensor m = masses.getValue({0, "..."}); + Tensor m = masses.getValues({0, "..."}); Tensor diag = Tensor::scale(Tensor::mul(m, m), 0.5); // construct the diagonal mass^2 matrix used in the hamiltonian diff --git a/nuTens/propagator/propagator.cpp b/nuTens/propagator/propagator.cpp index e82e661..4339361 100644 --- a/nuTens/propagator/propagator.cpp +++ b/nuTens/propagator/propagator.cpp @@ -44,7 +44,7 @@ Tensor Propagator::_calculateProbs(const Tensor &energies, const Tensor &massesS { for (int j = 0; j < _nGenerations; j++) { - weightMatrix.setValue({"...", i, j}, weightVector.getValue({"...", j})); + weightMatrix.setValue({"...", i, j}, weightVector.getValues({"...", j})); } } weightMatrix.requiresGrad(true); diff --git a/nuTens/propagator/propagator.hpp b/nuTens/propagator/propagator.hpp index b917aa2..a840699 100644 --- a/nuTens/propagator/propagator.hpp +++ b/nuTens/propagator/propagator.hpp @@ -40,7 +40,7 @@ class Propagator /// @brief Set a matter solver to use to deal with matter effects /// @param newSolver A derivative of BaseMatterSolver - inline void setMatterSolver(std::unique_ptr &newSolver) + inline void setMatterSolver(std::shared_ptr &newSolver) { NT_PROFILE(); _matterSolver = std::move(newSolver); @@ -113,5 +113,5 @@ class Propagator int _nGenerations; float _baseline; - std::unique_ptr _matterSolver; + std::shared_ptr _matterSolver; }; \ No newline at end of file diff --git a/nuTens/tensors/CMakeLists.txt b/nuTens/tensors/CMakeLists.txt index 1045c50..b86ccf8 100644 --- a/nuTens/tensors/CMakeLists.txt +++ b/nuTens/tensors/CMakeLists.txt @@ -9,6 +9,8 @@ IF(NT_USE_PCH) target_link_libraries(tensor PUBLIC nuTens-pch) ELSE() + target_link_libraries(tensor PUBLIC logging) + IF(TORCH_FOUND) target_link_libraries(tensor PUBLIC "${TORCH_LIBRARIES}") diff --git a/nuTens/tensors/dtypes.hpp b/nuTens/tensors/dtypes.hpp index 46d2508..c589c15 100644 --- a/nuTens/tensors/dtypes.hpp +++ b/nuTens/tensors/dtypes.hpp @@ -1,5 +1,9 @@ #pragma once +#if USE_PYTORCH +#include +#endif + /*! * @file dtypes.hpp * @brief Defines various datatypes used in the project @@ -16,13 +20,34 @@ enum scalarType kDouble, kComplexFloat, kComplexDouble, + kUninitScalar, }; /// Devices that a Tensor can live on enum deviceType { kCPU, - kGPU + kGPU, + kUninitDevice, }; +#if USE_PYTORCH +/// map between the data types used in nuTens and those used by pytorch +const static std::map scalarTypeMap = {{kFloat, torch::kFloat}, + {kDouble, torch::kDouble}, + {kComplexFloat, torch::kComplexFloat}, + {kComplexDouble, torch::kComplexDouble}}; + +/// inverse map between the data types used in nuTens and those used by pytorch +const static std::map invScalarTypeMap = {{torch::kFloat, kFloat}, + {torch::kDouble, kDouble}, + {torch::kComplexFloat, kComplexFloat}, + {torch::kComplexDouble, kComplexDouble}}; + +// map between the device types used in nuTens and those used by pytorch +const static std::map deviceTypeMap = {{kCPU, torch::kCPU}, {kGPU, torch::kCUDA}}; + +// inverse map between the device types used in nuTens and those used by pytorch +const static std::map invDeviceTypeMap = {{torch::kCPU, kCPU}, {torch::kCUDA, kGPU}}; +#endif } // namespace NTdtypes \ No newline at end of file diff --git a/nuTens/tensors/tensor.hpp b/nuTens/tensors/tensor.hpp index 2b0eaac..438e8c7 100644 --- a/nuTens/tensors/tensor.hpp +++ b/nuTens/tensors/tensor.hpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -42,14 +43,23 @@ class Tensor */ public: + /// Holds the possible "index" types, this allows us to pass integers OR strings as index values which allows us to + /// do some basic slicing of tensors similar to python using indexType = std::variant; + /// Container that holds all allowed types that can be returned by a tensor + using variantType = std::variant, std::complex>; + /// @name Constructors /// Use these methods to construct tensors /// @{ /// @brief Default constructor with no initialisation - Tensor(){}; + Tensor() + { + _dType = NTdtypes::kUninitScalar; + _device = NTdtypes::kUninitDevice; + }; /// @brief Construct a 1-d array with specified values /// @arg values The values to include in the tensor @@ -233,13 +243,37 @@ class Tensor /// @brief Get elementwise phases of a complex tensor [[nodiscard]] Tensor angle() const; - /// @brief Get the result of summing this tensor over some dimension + /// @brief Get the cumulative sum over some dimension /// @param dim The dimension to sum over [[nodiscard]] Tensor cumsum(int dim) const; /// @brief Get the result of summing this tensor over all dimensions [[nodiscard]] Tensor sum() const; + /// @brief Get the result of summing this tensor over all dimensions + /// @param dims The dimensions to sum over + [[nodiscard]] Tensor sum(const std::vector &dims) const; + + /// @brief Get the cumulative sum over some dimension + /// @param dim The dimension to sum over + static inline Tensor cumsum(const Tensor &t, int dim) + { + return t.cumsum(dim); + } + + /// @brief Get the result of summing this tensor over all dimensions + static inline Tensor sum(const Tensor &t) + { + return t.sum(); + } + + /// @brief Get the result of summing this tensor over all dimensions + /// @param dims The dimensions to sum over + static inline Tensor sum(const Tensor &t, const std::vector &dims) + { + return t.sum(dims); + } + /// @name Gradients /// @{ @@ -280,18 +314,24 @@ class Tensor /// @arg indices The indices of the value to set /// @arg value The value to set it to void setValue(const Tensor &indices, const Tensor &value); - void setValue(const std::vector> &indices, const Tensor &value); + void setValue(const std::vector &indices, const Tensor &value); void setValue(const std::vector &indices, float value); void setValue(const std::vector &indices, std::complex value); /// @brief Get the value at a certain entry in the tensor /// @param indices The index of the entry to get - [[nodiscard]] Tensor getValue(const std::vector> &indices) const; + [[nodiscard]] Tensor getValues(const std::vector &indices) const; + + /// @brief Get the value at a certain entry in the tensor as an std::variant + /// @details This mainly exists so we can get the values of a tensor in python as pybind11 DOES NOT like templated + /// functions If using the c++ interface it is probably easier, faster and safer to use the templated getValue() + /// function. + [[nodiscard]] variantType getVariantValue(const std::vector &indices) const; /// @brief Get the number of dimensions in the tensor [[nodiscard]] size_t getNdim() const; - /// @brief Get the batch dimension size of the tensor + /// @brief Get the size of the batch dimension of the tensor [[nodiscard]] int getBatchDim() const; /// @brief Get the shape of the tensor @@ -302,6 +342,8 @@ class Tensor private: bool _hasBatchDim = false; + NTdtypes::scalarType _dType; + NTdtypes::deviceType _device; // ################################################### // ########## Tensor library specific stuff ########## @@ -312,38 +354,85 @@ class Tensor public: /// @brief Get the value at a particular index of the tensor /// @arg indices The indices of the value to set - template inline T getValue(const std::vector &indices) + template inline T getValue(const std::vector &indices) const { NT_PROFILE(); - std::vector indicesVec; - indicesVec.reserve(indices.size()); - for (const int &i : indices) - { - indicesVec.push_back(at::indexing::TensorIndex(i)); - } - - return _tensor.index(indicesVec).item(); + return _tensor.index(convertIndices(indices)).item(); } /// Get the value of a size 0 tensor (scalar) - template inline T getValue() + template inline T getValue() const { - NT_PROFILE(); return _tensor.item(); } + // return the underlying tensor with type determined by the backend library [[nodiscard]] inline const torch::Tensor &getTensor() const { - NT_PROFILE(); return _tensor; } + private: + /// Set the underlying tensor, setting the relevant information like _dtype and _device + inline void setTensor(const torch::Tensor &tensor) + { + NT_PROFILE(); + + _tensor = tensor; + _dType = NTdtypes::invScalarTypeMap.at(tensor.scalar_type()); + _device = NTdtypes::invDeviceTypeMap.at(tensor.device().type()); + } + + /// Utility function to convert from a vector of ints to a vector of a10 tensor indices, which is needed for + /// accessing values of a tensor. + [[nodiscard]] static inline std::vector convertIndices(const std::vector &indices) + { + NT_PROFILE(); + + std::vector indicesVec; + indicesVec.reserve(indices.size()); + for (const int &i : indices) + { + indicesVec.push_back(at::indexing::TensorIndex(i)); + } + + return indicesVec; + } + + /// Utility function to convert from a vector of ints to a vector of a10 tensor indices, which is needed for + /// accessing values of a tensor. + [[nodiscard]] static inline std::vector convertIndices( + const std::vector &indices) + { + NT_PROFILE(); + + std::vector indicesVec; + for (const Tensor::indexType &i : indices) + { + if (const int *index = std::get_if(&i)) + { + indicesVec.push_back(at::indexing::TensorIndex(*index)); + } + else if (const std::string *index = std::get_if(&i)) + { + indicesVec.push_back(at::indexing::TensorIndex((*index).c_str())); + } + else + { + assert(false && "ERROR: Unsupported index type"); + throw; + } + } + + return indicesVec; + } + private: torch::Tensor _tensor; #endif -}; \ No newline at end of file +}; diff --git a/nuTens/tensors/torch-tensor.cpp b/nuTens/tensors/torch-tensor.cpp index 3324317..a0c8c90 100644 --- a/nuTens/tensors/torch-tensor.cpp +++ b/nuTens/tensors/torch-tensor.cpp @@ -1,17 +1,6 @@ #include -// map between the data types used in nuTens and those used by pytorch -const static std::map scalarTypeMap = { - {NTdtypes::kFloat, torch::kFloat}, - {NTdtypes::kDouble, torch::kDouble}, - {NTdtypes::kComplexFloat, torch::kComplexFloat}, - {NTdtypes::kComplexDouble, torch::kComplexDouble}}; - -// map between the device types used in nuTens and those used by pytorch -const static std::map deviceTypeMap = {{NTdtypes::kCPU, torch::kCPU}, - {NTdtypes::kGPU, torch::kCUDA}}; - std::string Tensor::getTensorLibrary() { return "PyTorch"; @@ -22,9 +11,11 @@ Tensor::Tensor(std::vector values, NTdtypes::scalarType type, NTdtypes::d NT_PROFILE(); _tensor = torch::tensor(values, torch::TensorOptions() - .dtype(scalarTypeMap.at(type)) - .device(deviceTypeMap.at(device)) + .dtype(NTdtypes::scalarTypeMap.at(type)) + .device(NTdtypes::deviceTypeMap.at(device)) .requires_grad(requiresGrad)); + _dType = type; + _device = device; } Tensor Tensor::eye(int n, NTdtypes::scalarType type, NTdtypes::deviceType device, bool requiresGrad) @@ -32,10 +23,12 @@ Tensor Tensor::eye(int n, NTdtypes::scalarType type, NTdtypes::deviceType device NT_PROFILE(); Tensor ret; - ret._tensor = torch::eye(n, torch::TensorOptions() - .dtype(scalarTypeMap.at(type)) - .device(deviceTypeMap.at(device)) - .requires_grad(requiresGrad)); + ret.setTensor(torch::eye(n, torch::TensorOptions() + .dtype(NTdtypes::scalarTypeMap.at(type)) + .device(NTdtypes::deviceTypeMap.at(device)) + .requires_grad(requiresGrad))); + ret._dType = type; + ret._device = device; return ret; } @@ -45,10 +38,13 @@ Tensor Tensor::rand(const std::vector &shape, NTdtypes::scalarType typ NT_PROFILE(); Tensor ret; - ret._tensor = torch::rand(c10::IntArrayRef(shape), torch::TensorOptions() - .dtype(scalarTypeMap.at(type)) - .device(deviceTypeMap.at(device)) - .requires_grad(requiresGrad)); + ret.setTensor(torch::rand(c10::IntArrayRef(shape), torch::TensorOptions() + .dtype(NTdtypes::scalarTypeMap.at(type)) + .device(NTdtypes::deviceTypeMap.at(device)) + .requires_grad(requiresGrad))); + + ret._dType = type; + ret._device = device; return ret; } @@ -58,7 +54,9 @@ Tensor Tensor::diag(const Tensor &diag) NT_PROFILE(); Tensor ret; - ret._tensor = torch::diag(diag._tensor); + ret.setTensor(torch::diag(diag._tensor)); + ret._dType = diag._dType; + ret._device = diag._device; return ret; } @@ -68,10 +66,12 @@ Tensor Tensor::ones(const std::vector &shape, NTdtypes::scalarType typ NT_PROFILE(); Tensor ret; - ret._tensor = torch::ones(c10::IntArrayRef(shape), torch::TensorOptions() - .dtype(scalarTypeMap.at(type)) - .device(deviceTypeMap.at(device)) - .requires_grad(requiresGrad)); + ret.setTensor(torch::ones(c10::IntArrayRef(shape), torch::TensorOptions() + .dtype(NTdtypes::scalarTypeMap.at(type)) + .device(NTdtypes::deviceTypeMap.at(device)) + .requires_grad(requiresGrad))); + ret._dType = type; + ret._device = device; return ret; } @@ -81,7 +81,9 @@ Tensor Tensor::zeros(const std::vector &shape, NTdtypes::scalarType ty NT_PROFILE(); Tensor ret; - ret._tensor = torch::zeros(c10::IntArrayRef(shape), scalarTypeMap.at(type)); + ret.setTensor(torch::zeros(c10::IntArrayRef(shape), NTdtypes::scalarTypeMap.at(type))); + ret._dType = type; + ret._device = device; return ret; } @@ -89,7 +91,8 @@ Tensor &Tensor::dType(NTdtypes::scalarType type) { NT_PROFILE(); - _tensor = _tensor.to(scalarTypeMap.at(type)); + _tensor = _tensor.to(NTdtypes::scalarTypeMap.at(type)); + _dType = type; return *this; } @@ -97,7 +100,8 @@ Tensor &Tensor::device(NTdtypes::deviceType device) { NT_PROFILE(); - _tensor = _tensor.to(deviceTypeMap.at(device)); + _tensor = _tensor.to(NTdtypes::deviceTypeMap.at(device)); + _device = device; return *this; } @@ -122,33 +126,43 @@ Tensor &Tensor::addBatchDim() return *this; } -Tensor Tensor::getValue(const std::vector &indices) const +Tensor Tensor::getValues(const std::vector &indices) const { NT_PROFILE(); - std::vector indicesVec; - for (const Tensor::indexType &i : indices) - { - if (const int *index = std::get_if(&i)) - { - indicesVec.push_back(at::indexing::TensorIndex(*index)); - } - else if (const std::string *index = std::get_if(&i)) - { - indicesVec.push_back(at::indexing::TensorIndex((*index).c_str())); - } - else - { - assert(false && "ERROR: Unsupported index type"); - throw; - } - } - Tensor ret; - ret._tensor = _tensor.index(indicesVec); + ret.setTensor(_tensor.index(convertIndices(indices))); return ret; } +Tensor::variantType Tensor::getVariantValue(const std::vector &indices) const +{ + NT_PROFILE(); + + switch (_dType) + { + case NTdtypes::kInt: + return _tensor.index(convertIndices(indices)).item(); + + case NTdtypes::kFloat: + return _tensor.index(convertIndices(indices)).item(); + + case NTdtypes::kDouble: + return _tensor.index(convertIndices(indices)).item(); + + case NTdtypes::kComplexFloat: + return (std::complex)_tensor.index(convertIndices(indices)).item>(); + + case NTdtypes::kComplexDouble: + return (std::complex)_tensor.index(convertIndices(indices)).item>(); + + default: + NT_ERROR("Invalid dtype has been set for this tensor: {}", _dType); + NT_ERROR("{}:{}", __FILE__, __LINE__); + throw; + } +} + void Tensor::setValue(const Tensor &indices, const Tensor &value) { NT_PROFILE(); @@ -160,53 +174,21 @@ void Tensor::setValue(const std::vector &indices, const Tenso { NT_PROFILE(); - std::vector indicesVec; - for (const Tensor::indexType &i : indices) - { - if (const int *index = std::get_if(&i)) - { - indicesVec.push_back(at::indexing::TensorIndex(*index)); - } - else if (const std::string *index = std::get_if(&i)) - { - indicesVec.push_back(at::indexing::TensorIndex((*index).c_str())); - } - else - { - assert(false && "ERROR: Unsupported index type"); - throw; - } - } - - _tensor.index_put_(indicesVec, value._tensor); + _tensor.index_put_(convertIndices(indices), value._tensor); } void Tensor::setValue(const std::vector &indices, float value) { NT_PROFILE(); - std::vector indicesVec; - indicesVec.reserve(indices.size()); - for (const int &i : indices) - { - indicesVec.push_back(at::indexing::TensorIndex(i)); - } - - _tensor.index_put_(indicesVec, value); + _tensor.index_put_(convertIndices(indices), value); } void Tensor::setValue(const std::vector &indices, std::complex value) { NT_PROFILE(); - std::vector indicesVec; - indicesVec.reserve(indices.size()); - for (const int &i : indices) - { - indicesVec.push_back(at::indexing::TensorIndex(i)); - } - - _tensor.index_put_(indicesVec, c10::complex(value.real(), value.imag())); + _tensor.index_put_(convertIndices(indices), c10::complex(value.real(), value.imag())); } size_t Tensor::getNdim() const @@ -240,7 +222,7 @@ Tensor Tensor::matmul(const Tensor &t1, const Tensor &t2) NT_PROFILE(); Tensor ret; - ret._tensor = torch::matmul(t1._tensor, t2._tensor); + ret.setTensor(torch::matmul(t1._tensor, t2._tensor)); return ret; } @@ -249,7 +231,7 @@ Tensor Tensor::outer(const Tensor &t1, const Tensor &t2) NT_PROFILE(); Tensor ret; - ret._tensor = torch::outer(t1._tensor, t2._tensor); + ret.setTensor(torch::outer(t1._tensor, t2._tensor)); return ret; } @@ -258,7 +240,7 @@ Tensor Tensor::mul(const Tensor &t1, const Tensor &t2) NT_PROFILE(); Tensor ret; - ret._tensor = torch::mul(t1._tensor, t2._tensor); + ret.setTensor(torch::mul(t1._tensor, t2._tensor)); return ret; } @@ -267,7 +249,7 @@ Tensor Tensor::div(const Tensor &t1, const Tensor &t2) NT_PROFILE(); Tensor ret; - ret._tensor = torch::div(t1._tensor, t2._tensor); + ret.setTensor(torch::div(t1._tensor, t2._tensor)); return ret; } @@ -276,7 +258,7 @@ Tensor Tensor::pow(const Tensor &t, float s) NT_PROFILE(); Tensor ret; - ret._tensor = torch::pow(t._tensor, s); + ret.setTensor(torch::pow(t._tensor, s)); return ret; } @@ -285,7 +267,7 @@ Tensor Tensor::pow(const Tensor &t, std::complex s) NT_PROFILE(); Tensor ret; - ret._tensor = torch::pow(t._tensor, c10::complex(s.real(), s.imag())); + ret.setTensor(torch::pow(t._tensor, c10::complex(s.real(), s.imag()))); return ret; } @@ -294,7 +276,7 @@ Tensor Tensor::exp(const Tensor &t) NT_PROFILE(); Tensor ret; - ret._tensor = torch::exp(t._tensor); + ret.setTensor(torch::exp(t._tensor)); return ret; } @@ -303,7 +285,7 @@ Tensor Tensor::transpose(const Tensor &t, int dim1, int dim2) NT_PROFILE(); Tensor ret; - ret._tensor = torch::transpose(t._tensor, dim1, dim2); + ret.setTensor(torch::transpose(t._tensor, dim1, dim2)); return ret; } @@ -312,7 +294,7 @@ Tensor Tensor::scale(const Tensor &t, float s) NT_PROFILE(); Tensor ret; - ret._tensor = torch::multiply(t._tensor, s); + ret.setTensor(torch::multiply(t._tensor, s)); return ret; } @@ -321,7 +303,7 @@ Tensor Tensor::scale(const Tensor &t, std::complex s) NT_PROFILE(); Tensor ret; - ret._tensor = torch::multiply(t._tensor, c10::complex(s.real(), s.imag())); + ret.setTensor(torch::multiply(t._tensor, c10::complex(s.real(), s.imag()))); return ret; } @@ -402,7 +384,7 @@ Tensor Tensor::real() const NT_PROFILE(); Tensor ret; - ret._tensor = at::real(_tensor); + ret.setTensor(at::real(_tensor)); return ret; } @@ -411,7 +393,7 @@ Tensor Tensor::imag() const NT_PROFILE(); Tensor ret; - ret._tensor = at::imag(_tensor); + ret.setTensor(at::imag(_tensor)); return ret; } @@ -420,7 +402,7 @@ Tensor Tensor::conj() const NT_PROFILE(); Tensor ret; - ret._tensor = torch::conj(_tensor); + ret.setTensor(torch::conj(_tensor)); // torch::conj() returns a view of the original tensor // I *think* that means that the tensor returned here will be pointing to the // same memory as the original one might need to be careful with this @@ -432,7 +414,7 @@ Tensor Tensor::abs() const NT_PROFILE(); Tensor ret; - ret._tensor = torch::abs(_tensor); + ret.setTensor(torch::abs(_tensor)); return ret; } @@ -441,7 +423,7 @@ Tensor Tensor::angle() const NT_PROFILE(); Tensor ret; - ret._tensor = torch::angle(_tensor); + ret.setTensor(torch::angle(_tensor)); return ret; } @@ -464,7 +446,7 @@ Tensor Tensor::operator+(const Tensor &rhs) const NT_PROFILE(); Tensor ret; - ret._tensor = _tensor + rhs._tensor; + ret.setTensor(_tensor + rhs._tensor); return ret; } @@ -473,7 +455,7 @@ Tensor Tensor::operator-(const Tensor &rhs) const NT_PROFILE(); Tensor ret; - ret._tensor = _tensor - rhs._tensor; + ret.setTensor(_tensor - rhs._tensor); return ret; } @@ -482,7 +464,7 @@ Tensor Tensor::operator-() const NT_PROFILE(); Tensor ret; - ret._tensor = -_tensor; + ret.setTensor(-_tensor); return ret; } @@ -491,7 +473,7 @@ Tensor Tensor::cumsum(int dim) const NT_PROFILE(); Tensor ret; - ret._tensor = torch::cumsum(_tensor, dim); + ret.setTensor(torch::cumsum(_tensor, dim)); return ret; } @@ -500,7 +482,16 @@ Tensor Tensor::sum() const NT_PROFILE(); Tensor ret; - ret._tensor = _tensor.sum(); + ret.setTensor(_tensor.sum()); + return ret; +} + +Tensor Tensor::sum(const std::vector &dims) const +{ + NT_PROFILE(); + + Tensor ret; + ret.setTensor(torch::sum(_tensor, torch::OptionalArrayRef(dims))); return ret; } @@ -516,7 +507,7 @@ Tensor Tensor::grad() const NT_PROFILE(); Tensor ret; - ret._tensor = _tensor.grad(); + ret.setTensor(_tensor.grad()); return ret; } @@ -525,7 +516,7 @@ Tensor Tensor::sin(const Tensor &t) NT_PROFILE(); Tensor ret; - ret._tensor = torch::sin(t._tensor); + ret.setTensor(torch::sin(t._tensor)); return ret; } @@ -534,7 +525,7 @@ Tensor Tensor::cos(const Tensor &t) NT_PROFILE(); Tensor ret; - ret._tensor = torch::cos(t._tensor); + ret.setTensor(torch::cos(t._tensor)); return ret; } diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt new file mode 100644 index 0000000..3642ca2 --- /dev/null +++ b/python/CMakeLists.txt @@ -0,0 +1,9 @@ + +pybind11_add_module( + pyNuTens MODULE + pyNuTens.cpp +) + +target_link_libraries( pyNuTens PUBLIC tensor propagator ) + +install( TARGETS pyNuTens DESTINATION pyNuTens/) \ No newline at end of file diff --git a/python/pyNuTens-test.py b/python/pyNuTens-test.py new file mode 100644 index 0000000..c4cfdc9 --- /dev/null +++ b/python/pyNuTens-test.py @@ -0,0 +1,134 @@ +import torch +import pyNuTens as nt +from pyNuTens import tensor +from pyNuTens.tensor import Tensor +import matplotlib.pyplot as plt +import typing + +N_ENERGIES = 10000 + +def build_PMNS(theta12: Tensor, theta13: Tensor, theta23: Tensor, deltaCP: Tensor): + """ Construct a PMNS matrix in the usual parameterisation """ + # set up the three matrices to build the PMNS matrix + M1 = nt.tensor.zeros([1, 3, 3], nt.dtype.scalar_type.complex_float, nt.dtype.device_type.cpu, True) + M2 = nt.tensor.zeros([1, 3, 3], nt.dtype.scalar_type.complex_float, nt.dtype.device_type.cpu, True) + M3 = nt.tensor.zeros([1, 3, 3], nt.dtype.scalar_type.complex_float, nt.dtype.device_type.cpu, True) + + M1.set_value([0, 0, 0], 1.0) + M1.set_value([0, 1, 1], tensor.cos(theta23)) + M1.set_value([0, 1, 2], tensor.sin(theta23)) + M1.set_value([0, 2, 1], -tensor.sin(theta23)) + M1.set_value([0, 2, 2], tensor.cos(theta23)) + M1.requires_grad(True) + + M2.set_value([0, 1, 1], 1.0) + M2.set_value([0, 0, 0], tensor.cos(theta13)) + M2.set_value([0, 0, 2], tensor.mul(tensor.sin(theta13), tensor.exp(tensor.scale(deltaCP, -1.0J)))) + M2.set_value([0, 2, 0], -tensor.mul(tensor.sin(theta13), tensor.exp(tensor.scale(deltaCP, 1.0J)))) + M2.set_value([0, 2, 2], tensor.cos(theta13)) + M2.requires_grad(True) + + M3.set_value([0, 2, 2], 1.0) + M3.set_value([0, 0, 0], tensor.cos(theta12)) + M3.set_value([0, 0, 1], tensor.sin(theta12)) + M3.set_value([0, 1, 0], -tensor.sin(theta12)) + M3.set_value([0, 1, 1], tensor.cos(theta12)) + M3.requires_grad(True) + + # Build PMNS + PMNS = tensor.matmul(M1, tensor.matmul(M2, M3)) + PMNS.requires_grad(True) + + return PMNS + + +## First we build up a tensor to contain the test energies +energies = nt.tensor.ones([N_ENERGIES, 1], nt.dtype.scalar_type.complex_float, nt.dtype.device_type.cpu, False) + +for i in range(N_ENERGIES): + energies.set_value([i,0], 1.0 + i*0.2) + +energies.requires_grad(True) + +## define tensors with oscillation parameters +theta23 = Tensor([0.82], nt.dtype.scalar_type.complex_float, nt.dtype.device_type.cpu, True) +theta13 = Tensor([0.15], nt.dtype.scalar_type.complex_float, nt.dtype.device_type.cpu, True) +theta12 = Tensor([0.58], nt.dtype.scalar_type.complex_float, nt.dtype.device_type.cpu, True) +deltaCP = Tensor([1.5], nt.dtype.scalar_type.complex_float, nt.dtype.device_type.cpu, True) + +## make the matrix +PMNS = build_PMNS(theta12, theta13, theta23, deltaCP) + +## set the mass tensor +masses = nt.tensor.zeros([1,3], nt.dtype.scalar_type.float, nt.dtype.device_type.cpu, True) + +masses.set_value([0,0], 0.0) +masses.set_value([0,1], 0.00868) +masses.set_value([0,2], 0.0501) + +## print info about the parameters +print("PMNS: ") +print(PMNS.to_string()) + +print("\nMasses: ") +print(masses.to_string()) +print() + +## set up the propagator object +propagator = nt.propagator.Propagator(3, 295000.0) +matter_solver = nt.propagator.ConstDensitySolver(3, 2.79) + +propagator.set_PMNS(PMNS) +propagator.set_masses(masses) +#propagator.set_matter_solver(matter_solver) + +## run! +probabilities = propagator.calculate_probabilities(energies) + +## print out some test values +prob_sum = tensor.scale(tensor.sum(probabilities, [0]), 1.0 / float(N_ENERGIES)) +print("energy integrated probabilities: ") +print(prob_sum.to_string()) + +## check that the autograd functionality works +mu_survival_prob = prob_sum.get_values([1,1]) +print("mu survival prob:") +print(mu_survival_prob.to_string()) + +mu_survival_prob.backward() +print("theta_13 grad: ") +print(theta13.grad().to_string()) + + +## make plots of the oscillation probabilities +energy_list = [] +e_survival_prob_list = [] +mu_survival_prob_list = [] +tau_survival_prob_list = [] + +mu_to_e_prob_list = [] + +for i in range(N_ENERGIES): + energy_list.append(energies.get_value([i, 0])) + e_survival_prob_list.append(probabilities.get_value([i, 0, 0])) + mu_survival_prob_list.append(probabilities.get_value([i, 1, 1])) + tau_survival_prob_list.append(probabilities.get_value([i, 2, 2])) + + mu_to_e_prob_list.append(probabilities.get_value([i, 1, 0])) + +plt.plot(energy_list, e_survival_prob_list, label = "electron") +plt.plot(energy_list, mu_survival_prob_list, label = "muon") +plt.plot(energy_list, tau_survival_prob_list, label = "tau") +plt.xlabel("Energy [MeV]") +plt.ylabel("Survival probability") +plt.legend() +plt.show() +plt.savefig("survival_probs.png") + +plt.clf() +plt.plot(energy_list, mu_to_e_prob_list, label = "numu -> nue") +plt.xlabel("Energy [MeV]") +plt.ylabel("Oscillation probability") +plt.legend() +plt.show() +plt.savefig("oscillation_probs.png") \ No newline at end of file diff --git a/python/pyNuTens.cpp b/python/pyNuTens.cpp new file mode 100644 index 0000000..6e96015 --- /dev/null +++ b/python/pyNuTens.cpp @@ -0,0 +1,157 @@ +// pybind11 stuff +#include +#include +#include +#include + +#include + +// nuTens stuff +#include +#include +#include +#include + +namespace py = pybind11; + +void initTensor(py::module & /*m*/); +void initPropagator(py::module & /*m*/); +void initDtypes(py::module & /*m*/); + +// initialise the top level module "pyNuTens" +// NOLINTNEXTLINE +PYBIND11_MODULE(pyNuTens, m) +{ + initTensor(m); + initPropagator(m); + initDtypes(m); +} + +void initTensor(py::module &m) +{ + auto m_tensor = m.def_submodule("tensor"); + + py::class_(m_tensor, "Tensor") + .def(py::init()) // <- default constructor + .def(py::init, NTdtypes::scalarType, NTdtypes::deviceType, bool>()) + + // property setters + .def("dtype", &Tensor::dType, py::return_value_policy::reference, "Set the data type of the tensor") + .def("device", &Tensor::device, py::return_value_policy::reference, "Set the device that the tensor lives on") + .def("requires_grad", &Tensor::requiresGrad, py::return_value_policy::reference, + "Set Whether or not this tensor requires gradient to be calculated") + .def("has_batch_dim", &Tensor::hasBatchDim, py::return_value_policy::reference, + "Set Whether or not the first dimension should be interpreted as a batch dim for this tensor") + + // utilities + .def("to_string", &Tensor::toString, "print out a summary of this tensor to a string") + .def("add_batch_dim", &Tensor::addBatchDim, py::return_value_policy::reference, + "Add a batch dimension to the start of this tensor if it doesn't have one already") + + // setters + .def("set_value", py::overload_cast(&Tensor::setValue), + "Set a value at a specific index of this tensor") + .def("set_value", + py::overload_cast> &, const Tensor &>(&Tensor::setValue), + "Set a value at a specific index of this tensor") + .def("set_value", py::overload_cast &, float>(&Tensor::setValue), + "Set a value at a specific index of this tensor") + .def("set_value", py::overload_cast &, std::complex>(&Tensor::setValue), + "Set a value at a specific index of this tensor") + + // getters + .def("get_shape", &Tensor::getShape, "Get the shape of this tensor") + .def("get_values", &Tensor::getValues, "Get the subset of values in this tensor at a specified location") + .def("get_value", &Tensor::getVariantValue, "Get the data stored at a particular index of the tensor") + + // complex number stuff + .def("real", &Tensor::real, "Get real part of a complex tensor") + .def("imag", &Tensor::imag, "Get imaginary part of a complex tensor") + .def("conj", &Tensor::conj, "Get complex conjugate of a complex tensor") + .def("angle", &Tensor::angle, "Get element-wise phases of a complex tensor") + .def("abs", &Tensor::abs, "Get element-wise magnitudes of a complex tensor") + + // gradient stuff + .def("backward", &Tensor::backward, py::call_guard(), + "Do the backward propagation from this tensor") + .def("grad", &Tensor::grad, "Get the accumulated gradient stored in this tensor after calling backward()") + + // operator overloads + .def(-py::self); + + ; // end of Tensor non-static functions + + // Tensor creation functions + m_tensor.def("eye", &Tensor::eye, "Create a tensor initialised with an identity matrix"); + m_tensor.def("rand", &Tensor::rand, "Create a tensor initialised with random values"); + m_tensor.def("diag", &Tensor::diag, "Create a tensor with specified values along the diagonal"); + m_tensor.def("ones", &Tensor::ones, "Create a tensor initialised with ones"); + m_tensor.def("zeros", &Tensor::zeros, "Create a tensor initialised with zeros"); + + // maffs + m_tensor.def("matmul", &Tensor::matmul, "Matrix multiplication"); + m_tensor.def("outer", &Tensor::outer, "Tensor outer product"); + m_tensor.def("mul", &Tensor::mul, "Element-wise multiplication"); + m_tensor.def("div", &Tensor::div, "Element-wise division"); + m_tensor.def("pow", py::overload_cast(&Tensor::pow), "Raise to scalar power"); + m_tensor.def("pow", py::overload_cast>(&Tensor::pow), "Raise to scalar power"); + m_tensor.def("exp", &Tensor::exp, "Take exponential"); + m_tensor.def("transpose", &Tensor::transpose, "Get the matrix transpose"); + m_tensor.def("scale", py::overload_cast(&Tensor::scale), "Scalar multiplication"); + m_tensor.def("scale", py::overload_cast>(&Tensor::scale), + "Scalar multiplication"); + m_tensor.def("sin", &Tensor::sin, "Element-wise trigonometric sine function"); + m_tensor.def("cos", &Tensor::cos, "Element-wise trigonometric cosine function"); + m_tensor.def("sum", py::overload_cast(&Tensor::sum), "Get the sum of all values in a tensor"); + m_tensor.def("sum", py::overload_cast &>(&Tensor::sum), + "Get the sum of all values in a tensor"); + m_tensor.def("cumsum", py::overload_cast(&Tensor::cumsum), + "Get the cumulative sum over some dimension"); + // m_tensor.def("eig", &Tensor::eig. "calculate eigenvalues") <- Will need to define some additional fn to return + // tuple of values +} + +void initPropagator(py::module &m) +{ + auto m_propagator = m.def_submodule("propagator"); + + py::class_(m_propagator, "Propagator") + .def(py::init()) + .def("calculate_probabilities", &Propagator::calculateProbs, + "Calculate the oscillation probabilities for neutrinos of specified energies") + .def("set_matter_solver", &Propagator::setMatterSolver, + "Set the matter effect solver that the propagator should use") + .def("set_masses", &Propagator::setMasses, "Set the neutrino mass state eigenvalues") + .def("set_PMNS", py::overload_cast(&Propagator::setPMNS), + "Set the PMNS matrix that the propagator should use") + .def("set_PMNS", py::overload_cast &, float>(&Propagator::setPMNS), + "Set the PMNS matrix that the propagator should use") + .def("set_PMNS", py::overload_cast &, std::complex>(&Propagator::setPMNS), + "Set the PMNS matrix that the propagator should use"); + + py::class_>(m_propagator, "BaseSolver"); + + py::class_, BaseMatterSolver>( + m_propagator, "ConstDensitySolver") + .def(py::init()); +} + +void initDtypes(py::module &m) +{ + auto m_dtypes = m.def_submodule("dtype"); + + py::enum_(m_dtypes, "scalar_type") + .value("int", NTdtypes::scalarType::kInt) + .value("float", NTdtypes::scalarType::kFloat) + .value("double", NTdtypes::scalarType::kDouble) + .value("complex_float", NTdtypes::scalarType::kComplexFloat) + .value("complex_double", NTdtypes::scalarType::kComplexDouble) + + ; + + py::enum_(m_dtypes, "device_type") + .value("cpu", NTdtypes::deviceType::kCPU) + .value("gpu", NTdtypes::deviceType::kGPU) + + ; +} \ No newline at end of file diff --git a/tests/tensor-basic.cpp b/tests/tensor-basic.cpp index 3b0c22c..1b0608f 100644 --- a/tests/tensor-basic.cpp +++ b/tests/tensor-basic.cpp @@ -31,7 +31,7 @@ int main() tensorFloat.setValue({2, 2}, 8.0); std::cout << "real: " << std::endl << tensorFloat.real() << std::endl; std::cout << "Middle value: " << tensorFloat.getValue({1, 1}) << std::endl; - std::cout << "tensorFloat({'...', 1}) = " << tensorFloat.getValue({1, "..."}) << std::endl; + std::cout << "tensorFloat({'...', 1}) = " << tensorFloat.getValues({1, "..."}) << std::endl; Tensor realSquared = Tensor::matmul(tensorFloat, tensorFloat); std::cout << "Squared: " << std::endl;