From 57a6e7a3531d6e742708bf28a74c730f7aba8919 Mon Sep 17 00:00:00 2001 From: Ewan Miller Date: Thu, 25 Jul 2024 22:29:35 -0400 Subject: [PATCH] do what the linter says (has already fixed a potential bug with the device and scalar type maps in torch-tensor.cpp --- nuTens/logging.hpp | 12 ++-- nuTens/nuTens-pch.hpp | 2 +- nuTens/propagator/const-density-solver.hpp | 2 +- nuTens/propagator/propagator.cpp | 13 +++-- nuTens/tensors/tensor.hpp | 2 + nuTens/tensors/torch-tensor.cpp | 67 +++++++++++++--------- tests/barger-propagator.hpp | 66 ++++++++++++++++----- tests/tensor-basic.cpp | 26 ++++----- tests/test-utils.hpp | 2 +- tests/two-flavour-const-matter.cpp | 10 ++-- tests/two-flavour-vacuum.cpp | 3 +- 11 files changed, 132 insertions(+), 73 deletions(-) diff --git a/nuTens/logging.hpp b/nuTens/logging.hpp index 5830950..258735b 100644 --- a/nuTens/logging.hpp +++ b/nuTens/logging.hpp @@ -45,22 +45,22 @@ // value at runtime see // https://github.com/gabime/spdlog/wiki/1.-QuickStart#:~:text=Notice%20that%20spdlog%3A%3Aset_level%20is%20also%20necessary%20to%20print%20out%20debug%20or%20trace%20messages. #if NT_LOG_LEVEL == NT_LOG_LEVEL_TRACE -static spdlog::level::level_enum runtimeLogLevel = spdlog::level::trace; +const static spdlog::level::level_enum runtimeLogLevel = spdlog::level::trace; #elif NT_LOG_LEVEL == NT_LOG_LEVEL_DEBUG -static spdlog::level::level_enum runtimeLogLevel = spdlog::level::debug; +const static spdlog::level::level_enum runtimeLogLevel = spdlog::level::debug; #elif NT_LOG_LEVEL == NT_LOG_LEVEL_INFO -static spdlog::level::level_enum runtimeLogLevel = spdlog::level::info; +const static spdlog::level::level_enum runtimeLogLevel = spdlog::level::info; #elif NT_LOG_LEVEL == NT_LOG_LEVEL_WARNING -static spdlog::level::level_enum runtimeLogLevel = spdlog::level::warning; +const static spdlog::level::level_enum runtimeLogLevel = spdlog::level::warning; #elif NT_LOG_LEVEL == NT_LOG_LEVEL_ERROR -static spdlog::level::level_enum runtimeLogLevel = spdlog::level::error; +const static spdlog::level::level_enum runtimeLogLevel = spdlog::level::error; #elif NT_LOG_LEVEL == NT_LOG_LEVEL_SILENT -static spdlog::level::level_enum runtimeLogLevel = spdlog::level::off; +const static spdlog::level::level_enum runtimeLogLevel = spdlog::level::off; #endif diff --git a/nuTens/nuTens-pch.hpp b/nuTens/nuTens-pch.hpp index 68b8478..d9fcf0b 100644 --- a/nuTens/nuTens-pch.hpp +++ b/nuTens/nuTens-pch.hpp @@ -1,6 +1,6 @@ #pragma once -#include +#include #include #include diff --git a/nuTens/propagator/const-density-solver.hpp b/nuTens/propagator/const-density-solver.hpp index e84d529..c5c4a9c 100644 --- a/nuTens/propagator/const-density-solver.hpp +++ b/nuTens/propagator/const-density-solver.hpp @@ -62,7 +62,7 @@ class ConstDensityMatterSolver : public BaseMatterSolver diagMassMatrix.requiresGrad(false); for (int i = 0; i < nGenerations; i++) { - float m_i = masses.getValue({0, i}); + auto m_i = masses.getValue({0, i}); diagMassMatrix.setValue({0, i, i}, m_i * m_i / 2.0); }; diagMassMatrix.requiresGrad(true); diff --git a/nuTens/propagator/propagator.cpp b/nuTens/propagator/propagator.cpp index b15122c..5b83cee 100644 --- a/nuTens/propagator/propagator.cpp +++ b/nuTens/propagator/propagator.cpp @@ -2,22 +2,27 @@ Tensor Propagator::calculateProbs(const Tensor &energies) const { + Tensor ret; + // if a matter solver was specified, use effective values for masses and PMNS // matrix, otherwise just use the "raw" ones if (_matterSolver != nullptr) { - Tensor eigenVals, eigenVecs; + Tensor eigenVals; + Tensor eigenVecs; _matterSolver->calculateEigenvalues(energies, eigenVecs, eigenVals); Tensor effectiveMassesSq = Tensor::mul(eigenVals, Tensor::scale(energies, 2.0)); Tensor effectivePMNS = Tensor::matmul(_PMNSmatrix, eigenVecs); - return _calculateProbs(energies, effectiveMassesSq, effectivePMNS); + ret = _calculateProbs(energies, effectiveMassesSq, effectivePMNS); } else { - return _calculateProbs(energies, Tensor::mul(_masses, _masses), _PMNSmatrix); + ret = _calculateProbs(energies, Tensor::mul(_masses, _masses), _PMNSmatrix); } + + return ret; } Tensor Propagator::_calculateProbs(const Tensor &energies, const Tensor &massesSq, const Tensor &PMNS) const @@ -27,7 +32,7 @@ Tensor Propagator::_calculateProbs(const Tensor &energies, const Tensor &massesS .requiresGrad(false); Tensor weightVector = - Tensor::exp(Tensor::div(Tensor::scale(massesSq, -1.0j * _baseline), Tensor::scale(energies, 2.0))); + Tensor::exp(Tensor::div(Tensor::scale(massesSq, -1.0J * _baseline), Tensor::scale(energies, 2.0))); for (int i = 0; i < _nGenerations; i++) { diff --git a/nuTens/tensors/tensor.hpp b/nuTens/tensors/tensor.hpp index fe55c6a..b9395c5 100644 --- a/nuTens/tensors/tensor.hpp +++ b/nuTens/tensors/tensor.hpp @@ -37,6 +37,8 @@ class Tensor */ public: + typedef std::variant indexType; + /// @name Initialisers /// Use these methods to initialise the tensor /// @{ diff --git a/nuTens/tensors/torch-tensor.cpp b/nuTens/tensors/torch-tensor.cpp index d42eb9c..02e32ba 100644 --- a/nuTens/tensors/torch-tensor.cpp +++ b/nuTens/tensors/torch-tensor.cpp @@ -2,14 +2,15 @@ #include // map between the data types used in nuTens and those used by pytorch -std::map scalarTypeMap = {{NTdtypes::kFloat, torch::kFloat}, - {NTdtypes::kDouble, torch::kDouble}, - {NTdtypes::kComplexFloat, torch::kComplexFloat}, - {NTdtypes::kComplexDouble, torch::kComplexDouble}}; +const static std::map scalarTypeMap = { + {NTdtypes::kFloat, torch::kFloat}, + {NTdtypes::kDouble, torch::kDouble}, + {NTdtypes::kComplexFloat, torch::kComplexFloat}, + {NTdtypes::kComplexDouble, torch::kComplexDouble}}; // map between the device types used in nuTens and those used by pytorch -std::map deviceTypeMap = {{NTdtypes::kCPU, torch::kCPU}, - {NTdtypes::kGPU, torch::kCUDA}}; +const static std::map deviceTypeMap = {{NTdtypes::kCPU, torch::kCPU}, + {NTdtypes::kGPU, torch::kCUDA}}; std::string Tensor::getTensorLibrary() { @@ -18,9 +19,10 @@ std::string Tensor::getTensorLibrary() Tensor &Tensor::ones(int length, NTdtypes::scalarType type, NTdtypes::deviceType device, bool requiresGrad) { - _tensor = torch::ones( - length, - torch::TensorOptions().dtype(scalarTypeMap[type]).device(deviceTypeMap[device]).requires_grad(requiresGrad)); + _tensor = torch::ones(length, torch::TensorOptions() + .dtype(scalarTypeMap.at(type)) + .device(deviceTypeMap.at(device)) + .requires_grad(requiresGrad)); return *this; } @@ -28,34 +30,35 @@ Tensor &Tensor::ones(int length, NTdtypes::scalarType type, NTdtypes::deviceType Tensor &Tensor::ones(const std::vector &shape, NTdtypes::scalarType type, NTdtypes::deviceType device, bool requiresGrad) { - _tensor = torch::ones( - c10::IntArrayRef(shape), - torch::TensorOptions().dtype(scalarTypeMap[type]).device(deviceTypeMap[device]).requires_grad(requiresGrad)); + _tensor = torch::ones(c10::IntArrayRef(shape), torch::TensorOptions() + .dtype(scalarTypeMap.at(type)) + .device(deviceTypeMap.at(device)) + .requires_grad(requiresGrad)); return *this; } Tensor &Tensor::zeros(int length, NTdtypes::scalarType type, NTdtypes::deviceType device, bool requiresGrad) { - _tensor = torch::zeros(length, scalarTypeMap[type]); + _tensor = torch::zeros(length, scalarTypeMap.at(type)); return *this; } Tensor &Tensor::zeros(const std::vector &shape, NTdtypes::scalarType type, NTdtypes::deviceType device, bool requiresGrad) { - _tensor = torch::zeros(c10::IntArrayRef(shape), scalarTypeMap[type]); + _tensor = torch::zeros(c10::IntArrayRef(shape), scalarTypeMap.at(type)); return *this; } Tensor &Tensor::dType(NTdtypes::scalarType type) { - _tensor = _tensor.to(scalarTypeMap[type]); + _tensor = _tensor.to(scalarTypeMap.at(type)); return *this; } Tensor &Tensor::device(NTdtypes::deviceType device) { - _tensor = _tensor.to(deviceTypeMap[device]); + _tensor = _tensor.to(deviceTypeMap.at(device)); return *this; } @@ -65,15 +68,19 @@ Tensor &Tensor::requiresGrad(bool reqGrad) return *this; } -Tensor Tensor::getValue(const std::vector> &indices) const +Tensor Tensor::getValue(const std::vector &indices) const { std::vector indicesVec; - for (size_t i = 0; i < indices.size(); i++) + for (const Tensor::indexType &i : indices) { - if (const int *index = std::get_if(&indices[i])) + if (const int *index = std::get_if(&i)) + { indicesVec.push_back(at::indexing::TensorIndex(*index)); - else if (const std::string *index = std::get_if(&indices[i])) + } + else if (const std::string *index = std::get_if(&i)) + { indicesVec.push_back(at::indexing::TensorIndex((*index).c_str())); + } else { assert(false && "ERROR: Unsupported index type"); @@ -91,15 +98,19 @@ void Tensor::setValue(const Tensor &indices, const Tensor &value) _tensor.index_put_({indices._tensor}, value._tensor); } -void Tensor::setValue(const std::vector> &indices, const Tensor &value) +void Tensor::setValue(const std::vector &indices, const Tensor &value) { std::vector indicesVec; - for (size_t i = 0; i < indices.size(); i++) + for (const Tensor::indexType &i : indices) { - if (const int *index = std::get_if(&indices[i])) + if (const int *index = std::get_if(&i)) + { indicesVec.push_back(at::indexing::TensorIndex(*index)); - else if (const std::string *index = std::get_if(&indices[i])) + } + else if (const std::string *index = std::get_if(&i)) + { indicesVec.push_back(at::indexing::TensorIndex((*index).c_str())); + } else { assert(false && "ERROR: Unsupported index type"); @@ -113,9 +124,9 @@ void Tensor::setValue(const std::vector> &indices void Tensor::setValue(const std::vector &indices, float value) { std::vector indicesVec; - for (size_t i = 0; i < indices.size(); i++) + for (const int &i : indices) { - indicesVec.push_back(at::indexing::TensorIndex(indices[i])); + indicesVec.push_back(at::indexing::TensorIndex(i)); } _tensor.index_put_(indicesVec, value); @@ -124,9 +135,9 @@ void Tensor::setValue(const std::vector &indices, float value) void Tensor::setValue(const std::vector &indices, std::complex value) { std::vector indicesVec; - for (size_t i = 0; i < indices.size(); i++) + for (const int &i : indices) { - indicesVec.push_back(at::indexing::TensorIndex(indices[i])); + indicesVec.push_back(at::indexing::TensorIndex(i)); } _tensor.index_put_(indicesVec, c10::complex(value.real(), value.imag())); diff --git a/tests/barger-propagator.hpp b/tests/barger-propagator.hpp index 9b63df9..993db9e 100644 --- a/tests/barger-propagator.hpp +++ b/tests/barger-propagator.hpp @@ -1,6 +1,6 @@ #pragma once -#include +#include #include #include @@ -35,13 +35,13 @@ class TwoFlavourBarger }; // characteristic length in vacuum - inline float lv(float energy) + inline const float lv(float energy) { return 4.0 * M_PI * energy / (_m1 * _m1 - _m2 * _m2); } // characteristic length in matter - inline float lm() + inline const float lm() { return 2.0 * M_PI / (Constants::Groot2 * _density); } @@ -49,20 +49,36 @@ class TwoFlavourBarger // calculate the modified rotation angle inline float calculateEffectiveAngle(float energy) { + float ret; + if (_density > 0.0) - return std::atan2(std::sin(2.0 * _theta), (std::cos(2.0 * _theta) - lv(energy) / lm())) / 2.0; + { + ret = std::atan2(std::sin(2.0 * _theta), (std::cos(2.0 * _theta) - lv(energy) / lm())) / 2.0; + } else - return _theta; + { + ret = _theta; + } + + return ret; } // calculate the modified delta M^2 inline float calculateEffectiveDm2(float energy) { + float ret; + if (_density > 0.0) - return (_m1 * _m1 - _m2 * _m2) * std::sqrt(1.0 - 2.0 * (lv(energy) / lm()) * std::cos(2.0 * _theta) + - (lv(energy) / lm()) * (lv(energy) / lm())); + { + ret = (_m1 * _m1 - _m2 * _m2) * std::sqrt(1.0 - 2.0 * (lv(energy) / lm()) * std::cos(2.0 * _theta) + + (lv(energy) / lm()) * (lv(energy) / lm())); + } else - return (_m1 * _m1 - _m2 * _m2); + { + ret = (_m1 * _m1 - _m2 * _m2); + } + + return ret; } // get the good old 2 flavour PMNS matrix entries @@ -76,18 +92,30 @@ class TwoFlavourBarger std::cerr << " you supplied alpha = " << alpha << ", " << "beta = " << beta << std::endl; std::cerr << " " << __FILE__ << ": " << __LINE__ << std::endl; + + throw; } + float ret; + float gamma = calculateEffectiveAngle(energy); if (alpha == 0 && beta == 0) - return std::cos(gamma); + { + ret = std::cos(gamma); + } else if (alpha == 1 && beta == 1) - return std::cos(gamma); + { + ret = std::cos(gamma); + } else if (alpha == 0 && beta == 1) - return std::sin(gamma); + { + ret = std::sin(gamma); + } else if (alpha == 1 && beta == 0) - return -std::sin(gamma); + { + ret = -std::sin(gamma); + } else { @@ -95,6 +123,8 @@ class TwoFlavourBarger std::cerr << __FILE__ << ":" << __LINE__ << std::endl; throw; } + + return ret; } // get the good old 2 flavour vacuum oscillation probability @@ -111,6 +141,8 @@ class TwoFlavourBarger throw; } + float ret; + // get the effective oscillation parameters // if in vacuum (_density <= 0.0) these should just return the "raw" values float gamma = calculateEffectiveAngle(energy); @@ -124,9 +156,15 @@ class TwoFlavourBarger float onAxis = 1.0 - offAxis; if (alpha == beta) - return onAxis; + { + ret = onAxis; + } else - return offAxis; + { + ret = offAxis; + } + + return ret; } private: diff --git a/tests/tensor-basic.cpp b/tests/tensor-basic.cpp index 28311b6..db26407 100644 --- a/tests/tensor-basic.cpp +++ b/tests/tensor-basic.cpp @@ -39,17 +39,17 @@ int main() std::cout << "Complex float: " << std::endl; Tensor tensorComplex; tensorComplex.zeros({3, 3}, NTdtypes::kComplexFloat).requiresGrad(false); - tensorComplex.setValue({0, 0}, std::complex(0.0j)); - tensorComplex.setValue({0, 1}, std::complex(1.0j)); - tensorComplex.setValue({0, 2}, std::complex(2.0j)); + tensorComplex.setValue({0, 0}, std::complex(0.0J)); + tensorComplex.setValue({0, 1}, std::complex(1.0J)); + tensorComplex.setValue({0, 2}, std::complex(2.0J)); - tensorComplex.setValue({1, 0}, std::complex(3.0j)); - tensorComplex.setValue({1, 1}, std::complex(4.0j)); - tensorComplex.setValue({1, 2}, std::complex(5.0j)); + tensorComplex.setValue({1, 0}, std::complex(3.0J)); + tensorComplex.setValue({1, 1}, std::complex(4.0J)); + tensorComplex.setValue({1, 2}, std::complex(5.0J)); - tensorComplex.setValue({2, 0}, std::complex(6.0j)); - tensorComplex.setValue({2, 1}, std::complex(7.0j)); - tensorComplex.setValue({2, 2}, std::complex(8.0j)); + tensorComplex.setValue({2, 0}, std::complex(6.0J)); + tensorComplex.setValue({2, 1}, std::complex(7.0J)); + tensorComplex.setValue({2, 2}, std::complex(8.0J)); std::cout << "real: " << std::endl << tensorComplex.real() << std::endl; std::cout << "imag: " << std::endl << tensorComplex.imag() << std::endl << std::endl; @@ -119,10 +119,10 @@ int main() Tensor complexGradTest; complexGradTest.zeros({2, 2}, NTdtypes::kComplexFloat); - complexGradTest.setValue({0, 0}, std::complex(0.0 + 0.0j)); - complexGradTest.setValue({0, 1}, std::complex(0.0 + 1.0j)); - complexGradTest.setValue({1, 0}, std::complex(1.0 + 0.0j)); - complexGradTest.setValue({1, 1}, std::complex(1.0 + 1.0j)); + complexGradTest.setValue({0, 0}, std::complex(0.0 + 0.0J)); + complexGradTest.setValue({0, 1}, std::complex(0.0 + 1.0J)); + complexGradTest.setValue({1, 0}, std::complex(1.0 + 0.0J)); + complexGradTest.setValue({1, 1}, std::complex(1.0 + 1.0J)); complexGradTest.requiresGrad(true); Tensor complexGradSquared = Tensor::matmul(complexGradTest, complexGradTest).sum(); diff --git a/tests/test-utils.hpp b/tests/test-utils.hpp index fb8cce2..95e1ffa 100644 --- a/tests/test-utils.hpp +++ b/tests/test-utils.hpp @@ -1,6 +1,6 @@ #pragma once -#include +#include #include diff --git a/tests/two-flavour-const-matter.cpp b/tests/two-flavour-const-matter.cpp index a0d7a3c..7442160 100644 --- a/tests/two-flavour-const-matter.cpp +++ b/tests/two-flavour-const-matter.cpp @@ -6,7 +6,8 @@ using namespace Testing; int main() { - float m1 = 1.0, m2 = 2.0; + float m1 = 1.0; + float m2 = 2.0; float energy = 100.0; float density = 2.6; @@ -50,7 +51,8 @@ int main() tensorSolver.setPMNS(PMNS); tensorSolver.setMasses(masses); - Tensor eigenVals, eigenVecs; + Tensor eigenVals; + Tensor eigenVecs; tensorSolver.calculateEigenvalues(energies, eigenVecs, eigenVals); std::cout << "######## theta = " << theta << " ########" << std::endl; @@ -59,8 +61,8 @@ int main() // expect std::cout << "tensorSolver eigenvals: " << std::endl; std::cout << eigenVals << std::endl; - float calcV1 = eigenVals.getValue({0, 0}); - float calcV2 = eigenVals.getValue({0, 1}); + auto calcV1 = eigenVals.getValue({0, 0}); + auto calcV2 = eigenVals.getValue({0, 1}); float effDm2 = (calcV1 - calcV2) * 2.0 * energy; TEST_EXPECTED(effDm2, bargerProp.calculateEffectiveDm2(energy), "effective dM^2 for theta == " << theta, diff --git a/tests/two-flavour-vacuum.cpp b/tests/two-flavour-vacuum.cpp index 8c01bd5..b724028 100644 --- a/tests/two-flavour-vacuum.cpp +++ b/tests/two-flavour-vacuum.cpp @@ -6,7 +6,8 @@ using namespace Testing; int main() { - float m1 = 0.1, m2 = 0.5; + float m1 = 0.1; + float m2 = 0.5; float energy = 1.0; float baseline = 0.5;