Skip to content

Commit

Permalink
Merge pull request #41 from ewanwm/feature_improve_tensor_interface
Browse files Browse the repository at this point in the history
Feature improve tensor interface
  • Loading branch information
ewanwm authored Aug 8, 2024
2 parents 6211f2a + 08e02b0 commit 41ae3ec
Show file tree
Hide file tree
Showing 11 changed files with 172 additions and 152 deletions.
2 changes: 1 addition & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ enable_testing()
find_package(Torch REQUIRED)
find_package(Protobuf REQUIRED)
message("Torch cxx flags: ${TORCH_CXX_FLAGS}")
set(CMAKE_CXX_FLAGS "-Wabi-tag ${CMAKE_CXX_FLAGS} ${TORCH_CXX_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${TORCH_CXX_FLAGS}")

include(cmake/CPM.cmake)

Expand Down
71 changes: 22 additions & 49 deletions benchmarks/benchmarks.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,9 @@
Tensor buildPMNS(const Tensor &theta12, const Tensor &theta13, const Tensor &theta23, const Tensor &deltaCP)
{
// set up the three matrices to build the PMNS matrix
Tensor M1;
Tensor M2;
Tensor M3;
M1.zeros({1, 3, 3}, NTdtypes::kComplexFloat).requiresGrad(false);
M2.zeros({1, 3, 3}, NTdtypes::kComplexFloat).requiresGrad(false);
M3.zeros({1, 3, 3}, NTdtypes::kComplexFloat).requiresGrad(false);
Tensor M1 = Tensor::zeros({1, 3, 3}, NTdtypes::kComplexFloat).requiresGrad(false);
Tensor M2 = Tensor::zeros({1, 3, 3}, NTdtypes::kComplexFloat).requiresGrad(false);
Tensor M3 = Tensor::zeros({1, 3, 3}, NTdtypes::kComplexFloat).requiresGrad(false);

M1.setValue({0, 0, 0}, 1.0);
M1.setValue({0, 1, 1}, Tensor::cos(theta23));
Expand Down Expand Up @@ -42,16 +39,14 @@ Tensor buildPMNS(const Tensor &theta12, const Tensor &theta13, const Tensor &the
return PMNS;
}

static void batchedOscProbs(const Propagator &prop, Tensor &energies, int batchSize, int nBatches)
static void batchedOscProbs(const Propagator &prop, int batchSize, int nBatches)
{
for (int _ = 0; _ < nBatches; _++)
{
// set random energy values
for (int i = 0; i < batchSize; i++)
{
// set to random energy between 0 and 10000.0 MeV
energies.setValue({i, 0}, ((float)std::rand() / (float)RAND_MAX) * 10000.0);
}

Tensor energies =
Tensor::scale(Tensor::rand({batchSize, 1}).dType(NTdtypes::kFloat).requiresGrad(false), 10000.0) +
Tensor({100.0});

// calculate the osc probabilities
// static_cast<void> to discard the return value that we're not supposed to discard :)
Expand All @@ -63,23 +58,12 @@ static void BM_vacuumOscillations(benchmark::State &state)
{

// set up the inputs
Tensor energies;
energies.zeros({state.range(0), 1}, NTdtypes::kFloat).requiresGrad(false);

Tensor masses;
masses.ones({1, 3}, NTdtypes::kFloat).requiresGrad(false);
masses.setValue({0, 0}, 0.1);
masses.setValue({0, 1}, 0.2);
masses.setValue({0, 2}, 0.3);

Tensor theta23;
Tensor theta13;
Tensor theta12;
Tensor deltaCP;
theta23.ones({1}, NTdtypes::kComplexFloat).requiresGrad(false).setValue({0}, 0.23);
theta13.ones({1}, NTdtypes::kComplexFloat).requiresGrad(false).setValue({0}, 0.13);
theta12.ones({1}, NTdtypes::kComplexFloat).requiresGrad(false).setValue({0}, 0.12);
deltaCP.ones({1}, NTdtypes::kComplexFloat).requiresGrad(false).setValue({0}, 0.5);
Tensor masses = Tensor({0.1, 0.2, 0.3}, NTdtypes::kFloat).requiresGrad(false).addBatchDim();

Tensor theta23 = Tensor({0.23}).dType(NTdtypes::kComplexFloat).requiresGrad(false);
Tensor theta13 = Tensor({0.13}).dType(NTdtypes::kComplexFloat).requiresGrad(false);
Tensor theta12 = Tensor({0.12}).dType(NTdtypes::kComplexFloat).requiresGrad(false);
Tensor deltaCP = Tensor({0.5}).dType(NTdtypes::kComplexFloat).requiresGrad(false);

Tensor PMNS = buildPMNS(theta12, theta13, theta23, deltaCP);

Expand All @@ -94,31 +78,20 @@ static void BM_vacuumOscillations(benchmark::State &state)
for (auto _ : state)
{
// This code gets timed
batchedOscProbs(vacuumProp, energies, state.range(0), state.range(1));
batchedOscProbs(vacuumProp, state.range(0), state.range(1));
}
}

static void BM_constMatterOscillations(benchmark::State &state)
{

// set up the inputs
Tensor energies;
energies.zeros({state.range(0), 1}, NTdtypes::kFloat).requiresGrad(false);

Tensor masses;
masses.ones({1, 3}, NTdtypes::kFloat).requiresGrad(false);
masses.setValue({0, 0}, 0.1);
masses.setValue({0, 1}, 0.2);
masses.setValue({0, 2}, 0.3);

Tensor theta23;
Tensor theta13;
Tensor theta12;
Tensor deltaCP;
theta23.ones({1}, NTdtypes::kComplexFloat).requiresGrad(false).setValue({0}, 0.23);
theta13.ones({1}, NTdtypes::kComplexFloat).requiresGrad(false).setValue({0}, 0.13);
theta12.ones({1}, NTdtypes::kComplexFloat).requiresGrad(false).setValue({0}, 0.12);
deltaCP.ones({1}, NTdtypes::kComplexFloat).requiresGrad(false).setValue({0}, 0.5);
Tensor masses = Tensor({0.1, 0.2, 0.3}, NTdtypes::kFloat).requiresGrad(false).addBatchDim();

Tensor theta23 = Tensor({0.23}).dType(NTdtypes::kComplexFloat).requiresGrad(false);
Tensor theta13 = Tensor({0.13}).dType(NTdtypes::kComplexFloat).requiresGrad(false);
Tensor theta12 = Tensor({0.12}).dType(NTdtypes::kComplexFloat).requiresGrad(false);
Tensor deltaCP = Tensor({0.5}).dType(NTdtypes::kComplexFloat).requiresGrad(false);

Tensor PMNS = buildPMNS(theta12, theta13, theta23, deltaCP);

Expand All @@ -135,7 +108,7 @@ static void BM_constMatterOscillations(benchmark::State &state)
for (auto _ : state)
{
// This code gets timed
batchedOscProbs(matterProp, energies, state.range(0), state.range(1));
batchedOscProbs(matterProp, state.range(0), state.range(1));
}
}

Expand Down
8 changes: 2 additions & 6 deletions nuTens/propagator/const-density-solver.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,21 +3,17 @@
void ConstDensityMatterSolver::calculateEigenvalues(const Tensor &energies, Tensor &eigenvectors, Tensor &eigenvalues)
{
NT_PROFILE();
Tensor hamiltonian;
hamiltonian.zeros({energies.getBatchDim(), nGenerations, nGenerations}, NTdtypes::kComplexFloat);
Tensor hamiltonian = Tensor::zeros({energies.getBatchDim(), nGenerations, nGenerations}, NTdtypes::kComplexFloat);

for (int i = 0; i < nGenerations; i++)
{
for (int j = 0; j < nGenerations; j++)
{
hamiltonian.setValue({"...", i, j},
Tensor::div(diagMassMatrix.getValue({0, i, j}), energies.getValue({"...", 0})) -
Tensor::div(diagMassMatrix.getValue({i, j}), energies.getValue({"...", 0})) -
electronOuter.getValue({i, j}));
}
}

eigenvectors.zeros({1, nGenerations, nGenerations}, NTdtypes::kComplexFloat).requiresGrad(false);
eigenvalues.zeros({1, nGenerations, nGenerations}, NTdtypes::kComplexFloat).requiresGrad(false);

Tensor::eig(hamiltonian, eigenvectors, eigenvalues);
}
15 changes: 7 additions & 8 deletions nuTens/propagator/const-density-solver.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ class ConstDensityMatterSolver : public BaseMatterSolver
/// @arg density The electron density of the material to propagate in
ConstDensityMatterSolver(int nGenerations, float density) : nGenerations(nGenerations), density(density)
{
diagMassMatrix.zeros({1, nGenerations, nGenerations}, NTdtypes::kFloat);
diagMassMatrix = Tensor::zeros({1, nGenerations, nGenerations}, NTdtypes::kFloat);
};

/// @name Setters
Expand All @@ -59,17 +59,16 @@ class ConstDensityMatterSolver : public BaseMatterSolver
/// @param newMasses The new masses
inline void setMasses(const Tensor &newMasses) override
{
assert((newMasses.getNdim() == 2) && (newMasses.hasBatchDim()));
NT_PROFILE();

masses = newMasses;

Tensor m = masses.getValue({0, "..."});
Tensor diag = Tensor::scale(Tensor::mul(m, m), 0.5);

// construct the diagonal mass^2 matrix used in the hamiltonian
diagMassMatrix.requiresGrad(false);
for (int i = 0; i < nGenerations; i++)
{
auto m_i = masses.getValue<float>({0, i});
diagMassMatrix.setValue({0, i, i}, m_i * m_i / 2.0);
};
diagMassMatrix.requiresGrad(true);
diagMassMatrix = Tensor::diag(diag).requiresGrad(true);
}

/// @}
Expand Down
12 changes: 7 additions & 5 deletions nuTens/propagator/propagator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,11 @@ Tensor Propagator::calculateProbs(const Tensor &energies) const
// matrix, otherwise just use the "raw" ones
if (_matterSolver != nullptr)
{
Tensor eigenVals;
Tensor eigenVecs;
Tensor eigenVals =
Tensor::zeros({1, _nGenerations, _nGenerations}, NTdtypes::kComplexFloat).requiresGrad(false);
Tensor eigenVecs =
Tensor::zeros({1, _nGenerations, _nGenerations}, NTdtypes::kComplexFloat).requiresGrad(false);

_matterSolver->calculateEigenvalues(energies, eigenVecs, eigenVals);
Tensor effectiveMassesSq = Tensor::mul(eigenVals, Tensor::scale(energies, 2.0));
Tensor effectivePMNS = Tensor::matmul(_pmnsMatrix, eigenVecs);
Expand All @@ -31,9 +34,8 @@ Tensor Propagator::_calculateProbs(const Tensor &energies, const Tensor &massesS
{
NT_PROFILE();

Tensor weightMatrix;
weightMatrix.ones({energies.getBatchDim(), _nGenerations, _nGenerations}, NTdtypes::kComplexFloat)
.requiresGrad(false);
Tensor weightMatrix = Tensor::ones({energies.getBatchDim(), _nGenerations, _nGenerations}, NTdtypes::kComplexFloat)
.requiresGrad(false);

Tensor weightVector = Tensor::exp(
Tensor::div(Tensor::scale(massesSq, std::complex<float>(-1.0J) * _baseline), Tensor::scale(energies, 2.0)));
Expand Down
2 changes: 1 addition & 1 deletion nuTens/tensors/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ IF(TORCH_FOUND)
target_compile_definitions(tensor PUBLIC USE_PYTORCH)
ENDIF()

IF(USE_PCH)
IF(NT_USE_PCH)
target_link_libraries(tensor PUBLIC nuTens-pch)

ELSE()
Expand Down
91 changes: 59 additions & 32 deletions nuTens/tensors/tensor.hpp
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
#pragma once

#include <any>
#include <cassert>
#include <complex>
#include <iostream>
#include <map>
Expand All @@ -25,51 +26,63 @@ class Tensor
* @brief Basic tensor class
*
* Tensor defines a basic interface for creating and manipulating tensors.
* To create tensors you should use the Initialisers. These can be used on
* To create tensors you should use the Constructors. These can be used on
* their own or chained together with the Setters to create the desired
* tensor.
*
* For example
* \code{.cpp}
* Tensor t;
* t.ones({3,3}).dType(NTdtypes::kFloat).device(NTdtypes::kGPU);
* Tensor = ones({3,3}).dType(NTdtypes::kFloat).device(NTdtypes::kGPU);
* \endcode
* will get you a 3x3 tensor of floats that lives on the GPU.
* This is equivalent to
* \code{.cpp}
* Tensor t;
* t.ones({3,3}, NTdtypes::kFloat, NTdtypes::kGPU);
* Tensor = ones({3,3}, NTdtypes::kFloat, NTdtypes::kGPU);
* \endcode
*/

public:
using indexType = std::variant<int, std::string>;

/// @name Initialisers
/// Use these methods to initialise the tensor
/// @name Constructors
/// Use these methods to construct tensors
/// @{

/// @brief Initialise this tensor with ones
/// @arg length The length of the intitalised tensor
/// @arg type The data type of the initialised tensor
Tensor &ones(int length, NTdtypes::scalarType type, NTdtypes::deviceType device = NTdtypes::kCPU,
bool requiresGrad = true);
/// @brief Initialise this tensor with ones
/// @brief Default constructor with no initialisation
Tensor(){};

/// @brief Construct a 1-d array with specified values
/// @arg values The values to include in the tensor
Tensor(std::vector<float> values, NTdtypes::scalarType type = NTdtypes::kFloat,
NTdtypes::deviceType device = NTdtypes::kCPU, bool requiresGrad = true);

/// @brief Construct an identity tensor (has to be a 2d square tensor)
/// @arg n The size of one of the sides of the tensor
/// @arg type The data type of the tensor
static Tensor eye(int n, NTdtypes::scalarType type = NTdtypes::kFloat, NTdtypes::deviceType device = NTdtypes::kCPU,
bool requiresGrad = true);

/// @brief Construct a tensor with entries randomly initialised in the range [0, 1]
/// @arg shape The desired shape of the intitalised tensor
/// @arg type The data type of the tensor
static Tensor rand(const std::vector<long int> &shape, NTdtypes::scalarType type = NTdtypes::kFloat,
NTdtypes::deviceType device = NTdtypes::kCPU, bool requiresGrad = true);

/// @brief Construct a tensor diag values along the diagonal, and zero elsewhere
/// @arg diag A 1-d tensor which represents the desired diagonal values
static Tensor diag(const Tensor &diag);

/// @brief Construct a tensor with ones
/// @arg shape The desired shape of the intitalised tensor
/// @arg type The data type of the initialised tensor
Tensor &ones(const std::vector<long int> &shape, NTdtypes::scalarType type,
NTdtypes::deviceType device = NTdtypes::kCPU, bool requiresGrad = true);

/// @brief Initialise this tensor with zeros
/// @arg length The length of the intitalised tensor
/// @arg type The data type of the initialised tensor
Tensor &zeros(int length, NTdtypes::scalarType type, NTdtypes::deviceType device = NTdtypes::kCPU,
bool requiresGrad = true);
/// @brief Initialise this tensor with zeros
/// @arg type The data type of the tensor
static Tensor ones(const std::vector<long int> &shape, NTdtypes::scalarType type = NTdtypes::kFloat,
NTdtypes::deviceType device = NTdtypes::kCPU, bool requiresGrad = true);

/// @brief Construct a tensor with zeros
/// @arg shape The desired shape of the intitalised tensor
/// @arg type The data type of the initialised tensor
Tensor &zeros(const std::vector<long int> &shape, NTdtypes::scalarType type,
NTdtypes::deviceType device = NTdtypes::kCPU, bool requiresGrad = true);
/// @arg type The data type of the tensor
static Tensor zeros(const std::vector<long int> &shape, NTdtypes::scalarType type = NTdtypes::kFloat,
NTdtypes::deviceType device = NTdtypes::kCPU, bool requiresGrad = true);

/// @}

Expand All @@ -81,8 +94,17 @@ class Tensor
Tensor &device(NTdtypes::deviceType device);
/// @brief Set whether the tensor requires a gradient
Tensor &requiresGrad(bool reqGrad);
/// @brief Set whether or not the first dimension should be interpreted as a batch dimension
inline Tensor &hasBatchDim(bool hasBatchDim)
{
_hasBatchDim = hasBatchDim;
return *this;
};
/// @}

/// @brief If the tensor does not already have a batch dimension (as set by hasBatchDim()) this will add one
Tensor &addBatchDim();

/// @name Matrix Arithmetic
/// Generally there are static functions with the pattern <function>(Mat1,
/// Mat2) which will return a new matrix and inline equivalents with the
Expand Down Expand Up @@ -275,8 +297,19 @@ class Tensor
/// @brief Get the shape of the tensor
[[nodiscard]] std::vector<int> getShape() const;

/// Get the name of the backend library used to deal with tensors
static std::string getTensorLibrary();

private:
bool _hasBatchDim = false;

// ###################################################
// ########## Tensor library specific stuff ##########
// ###################################################

// Defining this here as it has to be in a header due to using template :(
#if USE_PYTORCH
public:
/// @brief Get the value at a particular index of the tensor
/// @arg indices The indices of the value to set
template <typename T> inline T getValue(const std::vector<int> &indices)
Expand All @@ -301,13 +334,7 @@ class Tensor

return _tensor.item<T>();
}
#endif

/// Get the name of the backend library used to deal with tensors
static std::string getTensorLibrary();

#if USE_PYTORCH
public:
[[nodiscard]] inline const torch::Tensor &getTensor() const
{

Expand Down
Loading

0 comments on commit 41ae3ec

Please sign in to comment.