Skip to content

Commit

Permalink
do what the linter says (has already fixed a potential bug with the d…
Browse files Browse the repository at this point in the history
…evice and scalar type maps in torch-tensor.cpp
  • Loading branch information
ewanwm committed Jul 26, 2024
1 parent b356bfe commit 57a6e7a
Show file tree
Hide file tree
Showing 11 changed files with 132 additions and 73 deletions.
12 changes: 6 additions & 6 deletions nuTens/logging.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -45,22 +45,22 @@
// value at runtime see
// https://github.com/gabime/spdlog/wiki/1.-QuickStart#:~:text=Notice%20that%20spdlog%3A%3Aset_level%20is%20also%20necessary%20to%20print%20out%20debug%20or%20trace%20messages.
#if NT_LOG_LEVEL == NT_LOG_LEVEL_TRACE
static spdlog::level::level_enum runtimeLogLevel = spdlog::level::trace;
const static spdlog::level::level_enum runtimeLogLevel = spdlog::level::trace;

#elif NT_LOG_LEVEL == NT_LOG_LEVEL_DEBUG
static spdlog::level::level_enum runtimeLogLevel = spdlog::level::debug;
const static spdlog::level::level_enum runtimeLogLevel = spdlog::level::debug;

#elif NT_LOG_LEVEL == NT_LOG_LEVEL_INFO
static spdlog::level::level_enum runtimeLogLevel = spdlog::level::info;
const static spdlog::level::level_enum runtimeLogLevel = spdlog::level::info;

#elif NT_LOG_LEVEL == NT_LOG_LEVEL_WARNING
static spdlog::level::level_enum runtimeLogLevel = spdlog::level::warning;
const static spdlog::level::level_enum runtimeLogLevel = spdlog::level::warning;

#elif NT_LOG_LEVEL == NT_LOG_LEVEL_ERROR
static spdlog::level::level_enum runtimeLogLevel = spdlog::level::error;
const static spdlog::level::level_enum runtimeLogLevel = spdlog::level::error;

#elif NT_LOG_LEVEL == NT_LOG_LEVEL_SILENT
static spdlog::level::level_enum runtimeLogLevel = spdlog::level::off;
const static spdlog::level::level_enum runtimeLogLevel = spdlog::level::off;

#endif

Expand Down
2 changes: 1 addition & 1 deletion nuTens/nuTens-pch.hpp
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#pragma once

#include <math.h>
#include <cmath>

#include <any>
#include <complex>
Expand Down
2 changes: 1 addition & 1 deletion nuTens/propagator/const-density-solver.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ class ConstDensityMatterSolver : public BaseMatterSolver
diagMassMatrix.requiresGrad(false);
for (int i = 0; i < nGenerations; i++)
{
float m_i = masses.getValue<float>({0, i});
auto m_i = masses.getValue<float>({0, i});
diagMassMatrix.setValue({0, i, i}, m_i * m_i / 2.0);
};
diagMassMatrix.requiresGrad(true);
Expand Down
13 changes: 9 additions & 4 deletions nuTens/propagator/propagator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,22 +2,27 @@

Tensor Propagator::calculateProbs(const Tensor &energies) const
{
Tensor ret;

// if a matter solver was specified, use effective values for masses and PMNS
// matrix, otherwise just use the "raw" ones
if (_matterSolver != nullptr)
{
Tensor eigenVals, eigenVecs;
Tensor eigenVals;
Tensor eigenVecs;
_matterSolver->calculateEigenvalues(energies, eigenVecs, eigenVals);
Tensor effectiveMassesSq = Tensor::mul(eigenVals, Tensor::scale(energies, 2.0));
Tensor effectivePMNS = Tensor::matmul(_PMNSmatrix, eigenVecs);

return _calculateProbs(energies, effectiveMassesSq, effectivePMNS);
ret = _calculateProbs(energies, effectiveMassesSq, effectivePMNS);
}

else
{
return _calculateProbs(energies, Tensor::mul(_masses, _masses), _PMNSmatrix);
ret = _calculateProbs(energies, Tensor::mul(_masses, _masses), _PMNSmatrix);
}

return ret;
}

Tensor Propagator::_calculateProbs(const Tensor &energies, const Tensor &massesSq, const Tensor &PMNS) const
Expand All @@ -27,7 +32,7 @@ Tensor Propagator::_calculateProbs(const Tensor &energies, const Tensor &massesS
.requiresGrad(false);

Tensor weightVector =
Tensor::exp(Tensor::div(Tensor::scale(massesSq, -1.0j * _baseline), Tensor::scale(energies, 2.0)));
Tensor::exp(Tensor::div(Tensor::scale(massesSq, -1.0J * _baseline), Tensor::scale(energies, 2.0)));

for (int i = 0; i < _nGenerations; i++)
{
Expand Down
2 changes: 2 additions & 0 deletions nuTens/tensors/tensor.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,8 @@ class Tensor
*/

public:
typedef std::variant<int, std::string> indexType;

/// @name Initialisers
/// Use these methods to initialise the tensor
/// @{
Expand Down
67 changes: 39 additions & 28 deletions nuTens/tensors/torch-tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,15 @@
#include <nuTens/tensors/tensor.hpp>

// map between the data types used in nuTens and those used by pytorch
std::map<NTdtypes::scalarType, c10::ScalarType> scalarTypeMap = {{NTdtypes::kFloat, torch::kFloat},
{NTdtypes::kDouble, torch::kDouble},
{NTdtypes::kComplexFloat, torch::kComplexFloat},
{NTdtypes::kComplexDouble, torch::kComplexDouble}};
const static std::map<NTdtypes::scalarType, c10::ScalarType> scalarTypeMap = {
{NTdtypes::kFloat, torch::kFloat},
{NTdtypes::kDouble, torch::kDouble},
{NTdtypes::kComplexFloat, torch::kComplexFloat},
{NTdtypes::kComplexDouble, torch::kComplexDouble}};

// map between the device types used in nuTens and those used by pytorch
std::map<NTdtypes::deviceType, c10::DeviceType> deviceTypeMap = {{NTdtypes::kCPU, torch::kCPU},
{NTdtypes::kGPU, torch::kCUDA}};
const static std::map<NTdtypes::deviceType, c10::DeviceType> deviceTypeMap = {{NTdtypes::kCPU, torch::kCPU},
{NTdtypes::kGPU, torch::kCUDA}};

std::string Tensor::getTensorLibrary()
{
Expand All @@ -18,44 +19,46 @@ std::string Tensor::getTensorLibrary()

Tensor &Tensor::ones(int length, NTdtypes::scalarType type, NTdtypes::deviceType device, bool requiresGrad)
{
_tensor = torch::ones(
length,
torch::TensorOptions().dtype(scalarTypeMap[type]).device(deviceTypeMap[device]).requires_grad(requiresGrad));
_tensor = torch::ones(length, torch::TensorOptions()
.dtype(scalarTypeMap.at(type))
.device(deviceTypeMap.at(device))
.requires_grad(requiresGrad));

return *this;
}

Tensor &Tensor::ones(const std::vector<long int> &shape, NTdtypes::scalarType type, NTdtypes::deviceType device,
bool requiresGrad)
{
_tensor = torch::ones(
c10::IntArrayRef(shape),
torch::TensorOptions().dtype(scalarTypeMap[type]).device(deviceTypeMap[device]).requires_grad(requiresGrad));
_tensor = torch::ones(c10::IntArrayRef(shape), torch::TensorOptions()
.dtype(scalarTypeMap.at(type))
.device(deviceTypeMap.at(device))
.requires_grad(requiresGrad));
return *this;
}

Tensor &Tensor::zeros(int length, NTdtypes::scalarType type, NTdtypes::deviceType device, bool requiresGrad)
{
_tensor = torch::zeros(length, scalarTypeMap[type]);
_tensor = torch::zeros(length, scalarTypeMap.at(type));
return *this;
}

Tensor &Tensor::zeros(const std::vector<long int> &shape, NTdtypes::scalarType type, NTdtypes::deviceType device,
bool requiresGrad)
{
_tensor = torch::zeros(c10::IntArrayRef(shape), scalarTypeMap[type]);
_tensor = torch::zeros(c10::IntArrayRef(shape), scalarTypeMap.at(type));
return *this;
}

Tensor &Tensor::dType(NTdtypes::scalarType type)
{
_tensor = _tensor.to(scalarTypeMap[type]);
_tensor = _tensor.to(scalarTypeMap.at(type));
return *this;
}

Tensor &Tensor::device(NTdtypes::deviceType device)
{
_tensor = _tensor.to(deviceTypeMap[device]);
_tensor = _tensor.to(deviceTypeMap.at(device));
return *this;
}

Expand All @@ -65,15 +68,19 @@ Tensor &Tensor::requiresGrad(bool reqGrad)
return *this;
}

Tensor Tensor::getValue(const std::vector<std::variant<int, std::string>> &indices) const
Tensor Tensor::getValue(const std::vector<Tensor::indexType> &indices) const
{
std::vector<at::indexing::TensorIndex> indicesVec;
for (size_t i = 0; i < indices.size(); i++)
for (const Tensor::indexType &i : indices)
{
if (const int *index = std::get_if<int>(&indices[i]))
if (const int *index = std::get_if<int>(&i))
{
indicesVec.push_back(at::indexing::TensorIndex(*index));
else if (const std::string *index = std::get_if<std::string>(&indices[i]))
}
else if (const std::string *index = std::get_if<std::string>(&i))
{
indicesVec.push_back(at::indexing::TensorIndex((*index).c_str()));
}
else
{
assert(false && "ERROR: Unsupported index type");
Expand All @@ -91,15 +98,19 @@ void Tensor::setValue(const Tensor &indices, const Tensor &value)
_tensor.index_put_({indices._tensor}, value._tensor);
}

void Tensor::setValue(const std::vector<std::variant<int, std::string>> &indices, const Tensor &value)
void Tensor::setValue(const std::vector<Tensor::indexType> &indices, const Tensor &value)
{
std::vector<at::indexing::TensorIndex> indicesVec;
for (size_t i = 0; i < indices.size(); i++)
for (const Tensor::indexType &i : indices)
{
if (const int *index = std::get_if<int>(&indices[i]))
if (const int *index = std::get_if<int>(&i))
{
indicesVec.push_back(at::indexing::TensorIndex(*index));
else if (const std::string *index = std::get_if<std::string>(&indices[i]))
}
else if (const std::string *index = std::get_if<std::string>(&i))
{
indicesVec.push_back(at::indexing::TensorIndex((*index).c_str()));
}
else
{
assert(false && "ERROR: Unsupported index type");
Expand All @@ -113,9 +124,9 @@ void Tensor::setValue(const std::vector<std::variant<int, std::string>> &indices
void Tensor::setValue(const std::vector<int> &indices, float value)
{
std::vector<at::indexing::TensorIndex> indicesVec;
for (size_t i = 0; i < indices.size(); i++)
for (const int &i : indices)
{
indicesVec.push_back(at::indexing::TensorIndex(indices[i]));
indicesVec.push_back(at::indexing::TensorIndex(i));
}

_tensor.index_put_(indicesVec, value);
Expand All @@ -124,9 +135,9 @@ void Tensor::setValue(const std::vector<int> &indices, float value)
void Tensor::setValue(const std::vector<int> &indices, std::complex<float> value)
{
std::vector<at::indexing::TensorIndex> indicesVec;
for (size_t i = 0; i < indices.size(); i++)
for (const int &i : indices)
{
indicesVec.push_back(at::indexing::TensorIndex(indices[i]));
indicesVec.push_back(at::indexing::TensorIndex(i));
}

_tensor.index_put_(indicesVec, c10::complex<float>(value.real(), value.imag()));
Expand Down
66 changes: 52 additions & 14 deletions tests/barger-propagator.hpp
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#pragma once

#include <math.h>
#include <cmath>

#include <iostream>
#include <nuTens/propagator/constants.hpp>

Check failure on line 6 in tests/barger-propagator.hpp

View workflow job for this annotation

GitHub Actions / cpp-linter

tests/barger-propagator.hpp:6:10 [clang-diagnostic-error]

'nuTens/propagator/constants.hpp' file not found
Expand Down Expand Up @@ -35,34 +35,50 @@ class TwoFlavourBarger
};

// characteristic length in vacuum
inline float lv(float energy)
inline const float lv(float energy)

Check warning on line 38 in tests/barger-propagator.hpp

View workflow job for this annotation

GitHub Actions / cpp-linter

tests/barger-propagator.hpp:38:5 [readability-const-return-type]

return type 'const float' is 'const'-qualified at the top level, which may reduce code readability without improving const correctness

Check warning on line 38 in tests/barger-propagator.hpp

View workflow job for this annotation

GitHub Actions / cpp-linter

tests/barger-propagator.hpp:38:24 [modernize-use-trailing-return-type]

use a trailing return type for this function

Check warning on line 38 in tests/barger-propagator.hpp

View workflow job for this annotation

GitHub Actions / cpp-linter

tests/barger-propagator.hpp:38:24 [readability-make-member-function-const]

method 'lv' can be made const
{
return 4.0 * M_PI * energy / (_m1 * _m1 - _m2 * _m2);
}

// characteristic length in matter
inline float lm()
inline const float lm()

Check warning on line 44 in tests/barger-propagator.hpp

View workflow job for this annotation

GitHub Actions / cpp-linter

tests/barger-propagator.hpp:44:5 [readability-const-return-type]

return type 'const float' is 'const'-qualified at the top level, which may reduce code readability without improving const correctness

Check warning on line 44 in tests/barger-propagator.hpp

View workflow job for this annotation

GitHub Actions / cpp-linter

tests/barger-propagator.hpp:44:24 [modernize-use-trailing-return-type]

use a trailing return type for this function
{
return 2.0 * M_PI / (Constants::Groot2 * _density);
}

// calculate the modified rotation angle
inline float calculateEffectiveAngle(float energy)

Check warning on line 50 in tests/barger-propagator.hpp

View workflow job for this annotation

GitHub Actions / cpp-linter

tests/barger-propagator.hpp:50:18 [modernize-use-trailing-return-type]

use a trailing return type for this function
{
float ret;

Check warning on line 52 in tests/barger-propagator.hpp

View workflow job for this annotation

GitHub Actions / cpp-linter

tests/barger-propagator.hpp:52:15 [cppcoreguidelines-init-variables]

variable 'ret' is not initialized

if (_density > 0.0)
return std::atan2(std::sin(2.0 * _theta), (std::cos(2.0 * _theta) - lv(energy) / lm())) / 2.0;
{
ret = std::atan2(std::sin(2.0 * _theta), (std::cos(2.0 * _theta) - lv(energy) / lm())) / 2.0;
}
else
return _theta;
{
ret = _theta;
}

return ret;
}

// calculate the modified delta M^2
inline float calculateEffectiveDm2(float energy)

Check warning on line 67 in tests/barger-propagator.hpp

View workflow job for this annotation

GitHub Actions / cpp-linter

tests/barger-propagator.hpp:67:18 [modernize-use-trailing-return-type]

use a trailing return type for this function
{
float ret;

Check warning on line 69 in tests/barger-propagator.hpp

View workflow job for this annotation

GitHub Actions / cpp-linter

tests/barger-propagator.hpp:69:15 [cppcoreguidelines-init-variables]

variable 'ret' is not initialized

if (_density > 0.0)
return (_m1 * _m1 - _m2 * _m2) * std::sqrt(1.0 - 2.0 * (lv(energy) / lm()) * std::cos(2.0 * _theta) +
(lv(energy) / lm()) * (lv(energy) / lm()));
{
ret = (_m1 * _m1 - _m2 * _m2) * std::sqrt(1.0 - 2.0 * (lv(energy) / lm()) * std::cos(2.0 * _theta) +
(lv(energy) / lm()) * (lv(energy) / lm()));
}
else
return (_m1 * _m1 - _m2 * _m2);
{
ret = (_m1 * _m1 - _m2 * _m2);
}

return ret;
}

// get the good old 2 flavour PMNS matrix entries
Expand All @@ -76,25 +92,39 @@ class TwoFlavourBarger
std::cerr << " you supplied alpha = " << alpha << ", "
<< "beta = " << beta << std::endl;
std::cerr << " " << __FILE__ << ": " << __LINE__ << std::endl;

throw;
}

float ret;

float gamma = calculateEffectiveAngle(energy);

if (alpha == 0 && beta == 0)
return std::cos(gamma);
{
ret = std::cos(gamma);
}
else if (alpha == 1 && beta == 1)
return std::cos(gamma);
{
ret = std::cos(gamma);
}
else if (alpha == 0 && beta == 1)
return std::sin(gamma);
{
ret = std::sin(gamma);
}
else if (alpha == 1 && beta == 0)
return -std::sin(gamma);
{
ret = -std::sin(gamma);
}

else
{
std::cerr << "ERROR: how did you get here????" << std::endl;
std::cerr << __FILE__ << ":" << __LINE__ << std::endl;
throw;
}

return ret;
}

// get the good old 2 flavour vacuum oscillation probability
Expand All @@ -111,6 +141,8 @@ class TwoFlavourBarger
throw;
}

float ret;

// get the effective oscillation parameters
// if in vacuum (_density <= 0.0) these should just return the "raw" values
float gamma = calculateEffectiveAngle(energy);
Expand All @@ -124,9 +156,15 @@ class TwoFlavourBarger
float onAxis = 1.0 - offAxis;

if (alpha == beta)
return onAxis;
{
ret = onAxis;
}
else
return offAxis;
{
ret = offAxis;
}

return ret;
}

private:
Expand Down
Loading

1 comment on commit 57a6e7a

@github-actions
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Cpp-Linter Report ⚠️

Some files did not pass the configured checks!

clang-tidy reports: 128 concern(s)
  • tests/barger-propagator.hpp:6:10: error: [clang-diagnostic-error]

    'nuTens/propagator/constants.hpp' file not found

    #include <nuTens/propagator/constants.hpp>
             ^
    /home/runner/work/nuTens/nuTens/tests/barger-propagator.hpp:28:93: warning: 999.9 is a magic number; consider replacing it with a named constant [cppcoreguidelines-avoid-magic-numbers,readability-magic-numbers]
        inline void setParams(float m1, float m2, float theta, float baseline, float density = -999.9)
                                                                                                ^
  • tests/barger-propagator.hpp:38:5: warning: [readability-const-return-type]

    return type 'const float' is 'const'-qualified at the top level, which may reduce code readability without improving const correctness

        inline const float lv(float energy)
        ^
    note: this fix will not be applied because it overlaps with another fix
  • tests/barger-propagator.hpp:38:24: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        inline const float lv(float energy)
               ~~~~~~~~~~~ ^
               auto                         -> const float
  • tests/barger-propagator.hpp:38:24: warning: [readability-make-member-function-const]

    method 'lv' can be made const

        inline const float lv(float energy)
                           ^
                                            const
    /home/runner/work/nuTens/nuTens/tests/barger-propagator.hpp:40:16: warning: narrowing conversion from 'double' to 'float' [bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions]
            return 4.0 * M_PI * energy / (_m1 * _m1 - _m2 * _m2);
                   ^
    /home/runner/work/nuTens/nuTens/tests/barger-propagator.hpp:40:16: warning: 4.0 is a magic number; consider replacing it with a named constant [cppcoreguidelines-avoid-magic-numbers,readability-magic-numbers]
  • tests/barger-propagator.hpp:44:5: warning: [readability-const-return-type]

    return type 'const float' is 'const'-qualified at the top level, which may reduce code readability without improving const correctness

        inline const float lm()
        ^
    note: this fix will not be applied because it overlaps with another fix
  • tests/barger-propagator.hpp:44:24: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        inline const float lm()
               ~~~~~~~~~~~ ^
               auto             -> const float
  • tests/barger-propagator.hpp:50:18: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        inline float calculateEffectiveAngle(float energy)
               ~~~~~ ^
               auto                                        -> float
  • tests/barger-propagator.hpp:52:15: warning: [cppcoreguidelines-init-variables]

    variable 'ret' is not initialized

            float ret;
                  ^
                      = NAN
    /home/runner/work/nuTens/nuTens/tests/barger-propagator.hpp:56:19: warning: narrowing conversion from 'double' to 'float' [bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions]
                ret = std::atan2(std::sin(2.0 * _theta), (std::cos(2.0 * _theta) - lv(energy) / lm())) / 2.0;
                      ^
    /home/runner/work/nuTens/nuTens/tests/barger-propagator.hpp:56:39: warning: 2.0 is a magic number; consider replacing it with a named constant [cppcoreguidelines-avoid-magic-numbers,readability-magic-numbers]
                ret = std::atan2(std::sin(2.0 * _theta), (std::cos(2.0 * _theta) - lv(energy) / lm())) / 2.0;
                                          ^
    /home/runner/work/nuTens/nuTens/tests/barger-propagator.hpp:56:64: warning: 2.0 is a magic number; consider replacing it with a named constant [cppcoreguidelines-avoid-magic-numbers,readability-magic-numbers]
                ret = std::atan2(std::sin(2.0 * _theta), (std::cos(2.0 * _theta) - lv(energy) / lm())) / 2.0;
                                                                   ^
    /home/runner/work/nuTens/nuTens/tests/barger-propagator.hpp:56:102: warning: 2.0 is a magic number; consider replacing it with a named constant [cppcoreguidelines-avoid-magic-numbers,readability-magic-numbers]
                ret = std::atan2(std::sin(2.0 * _theta), (std::cos(2.0 * _theta) - lv(energy) / lm())) / 2.0;
                                                                                                         ^
  • tests/barger-propagator.hpp:67:18: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        inline float calculateEffectiveDm2(float energy)
               ~~~~~ ^
               auto                                      -> float
  • tests/barger-propagator.hpp:69:15: warning: [cppcoreguidelines-init-variables]

    variable 'ret' is not initialized

            float ret;
                  ^
                      = NAN
    /home/runner/work/nuTens/nuTens/tests/barger-propagator.hpp:73:19: warning: narrowing conversion from 'double' to 'float' [bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions]
                ret = (_m1 * _m1 - _m2 * _m2) * std::sqrt(1.0 - 2.0 * (lv(energy) / lm()) * std::cos(2.0 * _theta) +
                      ^
    /home/runner/work/nuTens/nuTens/tests/barger-propagator.hpp:73:61: warning: 2.0 is a magic number; consider replacing it with a named constant [cppcoreguidelines-avoid-magic-numbers,readability-magic-numbers]
                ret = (_m1 * _m1 - _m2 * _m2) * std::sqrt(1.0 - 2.0 * (lv(energy) / lm()) * std::cos(2.0 * _theta) +
                                                                ^
    /home/runner/work/nuTens/nuTens/tests/barger-propagator.hpp:73:98: warning: 2.0 is a magic number; consider replacing it with a named constant [cppcoreguidelines-avoid-magic-numbers,readability-magic-numbers]
                ret = (_m1 * _m1 - _m2 * _m2) * std::sqrt(1.0 - 2.0 * (lv(energy) / lm()) * std::cos(2.0 * _theta) +
                                                                                                     ^
  • tests/barger-propagator.hpp:85:18: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        inline float getPMNSelement(float energy, int alpha, int beta)
               ~~~~~ ^
               auto                                                    -> float
  • tests/barger-propagator.hpp:99:15: warning: [cppcoreguidelines-init-variables]

    variable 'ret' is not initialized

            float ret;
                  ^
                      = NAN
  • tests/barger-propagator.hpp:104:9: warning: [bugprone-branch-clone]

    repeated branch in conditional chain

            {
            ^
    /home/runner/work/nuTens/nuTens/tests/barger-propagator.hpp:106:10: note: end of the original
            }
             ^
    /home/runner/work/nuTens/nuTens/tests/barger-propagator.hpp:108:9: note: clone 1 starts here
            {
            ^
  • tests/barger-propagator.hpp:131:18: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        inline float calculateProb(float energy, int alpha, int beta)
               ~~~~~ ^
               auto                                                   -> float
  • tests/barger-propagator.hpp:144:15: warning: [cppcoreguidelines-init-variables]

    variable 'ret' is not initialized

            float ret;
                  ^
                      = NAN
    /home/runner/work/nuTens/nuTens/tests/barger-propagator.hpp:152:27: warning: narrowing conversion from 'double' to 'float' [bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions]
            float sin2Gamma = std::sin(2.0 * gamma);
                              ^
    /home/runner/work/nuTens/nuTens/tests/barger-propagator.hpp:152:36: warning: 2.0 is a magic number; consider replacing it with a named constant [cppcoreguidelines-avoid-magic-numbers,readability-magic-numbers]
            float sin2Gamma = std::sin(2.0 * gamma);
                                       ^
    /home/runner/work/nuTens/nuTens/tests/barger-propagator.hpp:153:24: warning: narrowing conversion from 'double' to 'float' [bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions]
            float sinPhi = std::sin(dM2 * _baseline / (4.0 * energy));
                           ^
    /home/runner/work/nuTens/nuTens/tests/barger-propagator.hpp:153:52: warning: 4.0 is a magic number; consider replacing it with a named constant [cppcoreguidelines-avoid-magic-numbers,readability-magic-numbers]
            float sinPhi = std::sin(dM2 * _baseline / (4.0 * energy));
                                                       ^
    /home/runner/work/nuTens/nuTens/tests/barger-propagator.hpp:156:24: warning: narrowing conversion from 'double' to 'float' [bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions]
            float onAxis = 1.0 - offAxis;
                           ^
  • tests/test-utils.hpp:14:7: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

    float relativeDiff(float f1, float f2)
    ~~~~~ ^
    auto                                   -> float
  • tests/test-utils.hpp:28:9: warning: [cppcoreguidelines-macro-usage]

    function-like macro 'TEST_EXPECTED' used; consider a 'constexpr' template function

    #define TEST_EXPECTED(value, expectation, varName, threshold)                                                          \
            ^
  • tests/test-utils.hpp:30:57: warning: [bugprone-macro-parentheses]

    macro argument should be enclosed in parentheses

            if (Testing::relativeDiff(value, expectation) > threshold)                                                     \
                                                            ^
                                                            (        )
  • tests/test-utils.hpp:32:36: warning: [bugprone-macro-parentheses]

    macro argument should be enclosed in parentheses

                std::cerr << "bad " << varName << std::endl;                                                               \
                                       ^
                                       (      )
  • tests/test-utils.hpp:33:37: warning: [bugprone-macro-parentheses]

    macro argument should be enclosed in parentheses

                std::cerr << "Got: " << value;                                                                             \
                                        ^
                                        (    )
  • tests/test-utils.hpp:34:44: warning: [bugprone-macro-parentheses]

    macro argument should be enclosed in parentheses

                std::cerr << "; Expected: " << expectation;                                                                \
                                               ^
                                               (          )
  • nuTens/logging.hpp:12:9: warning: [cppcoreguidelines-macro-usage]

    macro 'NT_LOG_LEVEL_TRACE' used to declare a constant; consider using a 'constexpr' constant

    #define NT_LOG_LEVEL_TRACE 0
            ^
  • nuTens/logging.hpp:13:9: warning: [cppcoreguidelines-macro-usage]

    macro 'NT_LOG_LEVEL_DEBUG' used to declare a constant; consider using a 'constexpr' constant

    #define NT_LOG_LEVEL_DEBUG 1
            ^
  • nuTens/logging.hpp:14:9: warning: [cppcoreguidelines-macro-usage]

    macro 'NT_LOG_LEVEL_INFO' used to declare a constant; consider using a 'constexpr' constant

    #define NT_LOG_LEVEL_INFO 2
            ^
  • nuTens/logging.hpp:15:9: warning: [cppcoreguidelines-macro-usage]

    macro 'NT_LOG_LEVEL_WARNING' used to declare a constant; consider using a 'constexpr' constant

    #define NT_LOG_LEVEL_WARNING 3
            ^
  • nuTens/logging.hpp:16:9: warning: [cppcoreguidelines-macro-usage]

    macro 'NT_LOG_LEVEL_ERROR' used to declare a constant; consider using a 'constexpr' constant

    #define NT_LOG_LEVEL_ERROR 4
            ^
  • nuTens/logging.hpp:17:9: warning: [cppcoreguidelines-macro-usage]

    macro 'NT_LOG_LEVEL_SILENT' used to declare a constant; consider using a 'constexpr' constant

    #define NT_LOG_LEVEL_SILENT 5
            ^
  • nuTens/logging.hpp:21:9: warning: [cppcoreguidelines-macro-usage]

    macro 'SPDLOG_ACTIVE_LEVEL' used to declare a constant; consider using a 'constexpr' constant

    #define SPDLOG_ACTIVE_LEVEL SPDLOG_LEVEL_TRACE
            ^
  • nuTens/logging.hpp:41:10: error: [clang-diagnostic-error]

    'spdlog/spdlog.h' file not found

    #include "spdlog/spdlog.h"
             ^
  • nuTens/logging.hpp:67:23: warning: [cppcoreguidelines-avoid-non-const-global-variables]

    variable 'once' is non-const and globally accessible, consider making it const

    static std::once_flag once;
                          ^
  • nuTens/logging.hpp:84:9: warning: [cppcoreguidelines-macro-usage]

    variadic macro 'NT_TRACE' used; consider using a 'constexpr' variadic template function

    #define NT_TRACE(...)                                                                                                  \
            ^
  • nuTens/logging.hpp:92:9: warning: [cppcoreguidelines-macro-usage]

    variadic macro 'NT_DEBUG' used; consider using a 'constexpr' variadic template function

    #define NT_DEBUG(...)                                                                                                  \
            ^
  • nuTens/logging.hpp:100:9: warning: [cppcoreguidelines-macro-usage]

    variadic macro 'NT_INFO' used; consider using a 'constexpr' variadic template function

    #define NT_INFO(...)                                                                                                   \
            ^
  • nuTens/logging.hpp:108:9: warning: [cppcoreguidelines-macro-usage]

    variadic macro 'NT_WARN' used; consider using a 'constexpr' variadic template function

    #define NT_WARN(...)                                                                                                   \
            ^
  • nuTens/logging.hpp:116:9: warning: [cppcoreguidelines-macro-usage]

    variadic macro 'NT_ERROR' used; consider using a 'constexpr' variadic template function

    #define NT_ERROR(...)                                                                                                  \
            ^
  • nuTens/nuTens-pch.hpp:9:10: error: [clang-diagnostic-error]

    'nuTens/logging.hpp' file not found

    #include <nuTens/logging.hpp>
             ^
  • nuTens/tensors/tensor.hpp:7:10: error: [clang-diagnostic-error]

    'nuTens/tensors/dtypes.hpp' file not found

    #include <nuTens/tensors/dtypes.hpp>
             ^
  • nuTens/tensors/tensor.hpp:40:5: warning: [modernize-use-using]

    use 'using' instead of 'typedef'

        typedef std::variant<int, std::string> indexType;
        ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
        using indexType = int
  • nuTens/tensors/tensor.hpp:49:13: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        Tensor &ones(int length, NTdtypes::scalarType type, NTdtypes::deviceType device = NTdtypes::kCPU,
        ~~~~~~~~^
        auto 
  • nuTens/tensors/tensor.hpp:54:13: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        Tensor &ones(const std::vector<long int> &shape, NTdtypes::scalarType type,
        ~~~~~~~~^
        auto 
  • nuTens/tensors/tensor.hpp:60:13: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        Tensor &zeros(int length, NTdtypes::scalarType type, NTdtypes::deviceType device = NTdtypes::kCPU,
        ~~~~~~~~^
        auto 
  • nuTens/tensors/tensor.hpp:65:13: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        Tensor &zeros(const std::vector<long int> &shape, NTdtypes::scalarType type,
        ~~~~~~~~^
        auto 
  • nuTens/tensors/tensor.hpp:73:13: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        Tensor &dType(NTdtypes::scalarType type);
        ~~~~~~~~^
        auto                                     -> Tensor &
  • nuTens/tensors/tensor.hpp:75:13: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        Tensor &device(NTdtypes::deviceType device);
        ~~~~~~~~^
        auto                                        -> Tensor &
  • nuTens/tensors/tensor.hpp:77:13: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        Tensor &requiresGrad(bool reqGrad);
        ~~~~~~~~^
        auto                               -> Tensor &
  • nuTens/tensors/tensor.hpp:89:19: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        static Tensor matmul(const Tensor &t1, const Tensor &t2);
               ~~~~~~ ^
               auto                                              -> Tensor
  • nuTens/tensors/tensor.hpp:94:19: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        static Tensor outer(const Tensor &t1, const Tensor &t2);
               ~~~~~~ ^
               auto                                             -> Tensor
  • nuTens/tensors/tensor.hpp:99:19: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        static Tensor mul(const Tensor &t1, const Tensor &t2);
               ~~~~~~ ^
               auto                                           -> Tensor
  • nuTens/tensors/tensor.hpp:104:19: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        static Tensor div(const Tensor &t1, const Tensor &t2);
               ~~~~~~ ^
               auto                                           -> Tensor
  • nuTens/tensors/tensor.hpp:109:19: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        static Tensor pow(const Tensor &t, float s);
               ~~~~~~ ^
               auto                                 -> Tensor
  • nuTens/tensors/tensor.hpp:113:19: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        static Tensor pow(const Tensor &t, std::complex<float> s);
               ~~~~~~ ^
               auto                                               -> Tensor
  • nuTens/tensors/tensor.hpp:117:19: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        static Tensor exp(const Tensor &t);
               ~~~~~~ ^
               auto                        -> Tensor
  • nuTens/tensors/tensor.hpp:123:19: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        static Tensor transpose(const Tensor &t, int dim1, int dim2);
               ~~~~~~ ^
               auto                                                  -> Tensor
  • nuTens/tensors/tensor.hpp:128:19: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        static Tensor scale(const Tensor &t, float s);
               ~~~~~~ ^
               auto                                   -> Tensor
  • nuTens/tensors/tensor.hpp:132:19: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        static Tensor scale(const Tensor &t, std::complex<float> s);
               ~~~~~~ ^
               auto                                                 -> Tensor
  • nuTens/tensors/tensor.hpp:189:10: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        bool operator==(const Tensor &rhs) const;
        ~~~~ ^
        auto                                     -> bool
  • nuTens/tensors/tensor.hpp:190:10: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        bool operator!=(const Tensor &rhs) const;
        ~~~~ ^
        auto                                     -> bool
  • nuTens/tensors/tensor.hpp:191:12: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        Tensor operator+(const Tensor &rhs) const;
        ~~~~~~ ^
        auto                                      -> Tensor
  • nuTens/tensors/tensor.hpp:192:12: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        Tensor operator-(const Tensor &rhs) const;
        ~~~~~~ ^
        auto                                      -> Tensor
  • nuTens/tensors/tensor.hpp:193:12: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        Tensor operator-() const;
        ~~~~~~ ^
        auto                     -> Tensor
  • nuTens/tensors/tensor.hpp:197:12: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        Tensor real() const;
        ~~~~~~ ^
        auto                -> Tensor
  • nuTens/tensors/tensor.hpp:199:12: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        Tensor imag() const;
        ~~~~~~ ^
        auto                -> Tensor
  • nuTens/tensors/tensor.hpp:202:12: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        Tensor conj() const;
        ~~~~~~ ^
        auto                -> Tensor
  • nuTens/tensors/tensor.hpp:204:12: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        Tensor abs() const;
        ~~~~~~ ^
        auto               -> Tensor
  • nuTens/tensors/tensor.hpp:206:12: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        Tensor angle() const;
        ~~~~~~ ^
        auto                 -> Tensor
  • nuTens/tensors/tensor.hpp:210:12: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        Tensor cumsum(int dim) const;
        ~~~~~~ ^
        auto                         -> Tensor
  • nuTens/tensors/tensor.hpp:213:12: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        Tensor sum() const;
        ~~~~~~ ^
        auto               -> Tensor
  • nuTens/tensors/tensor.hpp:224:12: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        Tensor grad() const;
        ~~~~~~ ^
        auto                -> Tensor
  • nuTens/tensors/tensor.hpp:233:19: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        static Tensor sin(const Tensor &t);
               ~~~~~~ ^
               auto                        -> Tensor
  • nuTens/tensors/tensor.hpp:237:19: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        static Tensor cos(const Tensor &t);
               ~~~~~~ ^
               auto                        -> Tensor
  • nuTens/tensors/tensor.hpp:243:26: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        friend std::ostream &operator<<(std::ostream &stream, const Tensor &tensor)
               ~~~~~~~~~~~~~~^
               auto                                                                 -> std::ostream &
  • nuTens/tensors/tensor.hpp:249:17: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        std::string toString() const;
        ~~~~~~~~~~~ ^
        auto                         -> std::string
  • nuTens/tensors/tensor.hpp:255:19: warning: [readability-avoid-const-params-in-decls]

    parameter 1 is const-qualified in the function declaration; const-qualification of parameters only has an effect in function definitions

        void setValue(const std::vector<std::variant<int, std::string>> &indices, const Tensor &value);
                      ^
  • nuTens/tensors/tensor.hpp:261:12: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        Tensor getValue(const std::vector<std::variant<int, std::string>> &indices) const;
        ~~~~~~ ^
        auto                                                                              -> Tensor
  • nuTens/tensors/tensor.hpp:261:21: warning: [readability-avoid-const-params-in-decls]

    parameter 1 is const-qualified in the function declaration; const-qualification of parameters only has an effect in function definitions

        Tensor getValue(const std::vector<std::variant<int, std::string>> &indices) const;
                        ^
  • nuTens/tensors/tensor.hpp:264:12: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        size_t getNdim() const;
        ~~~~~~ ^
        auto                   -> size_t
  • nuTens/tensors/tensor.hpp:267:9: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        int getBatchDim() const;
        ~~~ ^
        auto                    -> int
  • nuTens/tensors/tensor.hpp:270:22: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        std::vector<int> getShape() const;
                         ^
  • nuTens/tensors/tensor.hpp:295:24: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        static std::string getTensorLibrary();
               ~~~~~~~~~~~ ^
               auto                           -> std::string
  • nuTens/propagator/base-matter-solver.hpp:3:10: error: [clang-diagnostic-error]

    'nuTens/tensors/tensor.hpp' file not found

    #include <nuTens/tensors/tensor.hpp>
             ^
  • nuTens/propagator/propagator.hpp:4:10: error: [clang-diagnostic-error]

    'nuTens/propagator/base-matter-solver.hpp' file not found

    #include <nuTens/propagator/base-matter-solver.hpp>
             ^
  • nuTens/propagator/propagator.hpp:30:5: warning: [cppcoreguidelines-pro-type-member-init]

    constructor does not initialize these fields: _PMNSmatrix, _masses, _matterSolver

        Propagator(int nGenerations, float baseline) : _baseline(baseline), _nGenerations(nGenerations){};
        ^
  • nuTens/propagator/propagator.hpp:34:12: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        Tensor calculateProbs(const Tensor &energies) const;
               ^
  • nuTens/propagator/propagator.hpp:98:12: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

        Tensor _calculateProbs(const Tensor &energies, const Tensor &masses, const Tensor &PMNS) const;
               ^
  • nuTens/propagator/propagator.hpp:100:3: warning: [readability-redundant-access-specifiers]

    redundant access specifier has the same accessibility as the previous access specifier

      private:
      ^~~~~~~~
    /home/runner/work/nuTens/nuTens/nuTens/propagator/propagator.hpp:95:3: note: previously declared here
      private:
      ^
  • nuTens/propagator/propagator.hpp:101:12: warning: [bugprone-reserved-identifier]

    declaration uses identifier '_PMNSmatrix', which is a reserved identifier

        Tensor _PMNSmatrix;
               ^~~~~~~~~~~
               PMNSmatrix
  • nuTens/propagator/const-density-solver.hpp:3:10: error: [clang-diagnostic-error]

    'nuTens/propagator/base-matter-solver.hpp' file not found

    #include <nuTens/propagator/base-matter-solver.hpp>
             ^
  • nuTens/propagator/const-density-solver.hpp:35:5: warning: [cppcoreguidelines-pro-type-member-init]

    constructor does not initialize these fields: PMNS, masses, diagMassMatrix, electronOuter

        ConstDensityMatterSolver(int nGenerations, float density) : nGenerations(nGenerations), density(density)
        ^
  • nuTens/propagator/const-density-solver.hpp:57:17: warning: [readability-make-member-function-const]

    method 'setMasses' can be made const

        inline void setMasses(const Tensor &newMasses)
                    ^
                                                       const
  • tests/two-flavour-const-matter.cpp:1:10: error: [clang-diagnostic-error]

    'nuTens/propagator/const-density-solver.hpp' file not found

    #include <nuTens/propagator/const-density-solver.hpp>
             ^
  • tests/two-flavour-const-matter.cpp:7:5: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

    int main()
    ~~~ ^
    auto       -> int
    /home/runner/work/nuTens/nuTens/tests/two-flavour-const-matter.cpp:10:16: warning: 2.0 is a magic number; consider replacing it with a named constant [cppcoreguidelines-avoid-magic-numbers,readability-magic-numbers]
        float m2 = 2.0;
                   ^
    /home/runner/work/nuTens/nuTens/tests/two-flavour-const-matter.cpp:12:21: warning: 2.6 is a magic number; consider replacing it with a named constant [cppcoreguidelines-avoid-magic-numbers,readability-magic-numbers]
        float density = 2.6;
                        ^
  • tests/two-flavour-const-matter.cpp:15:12: warning: [cppcoreguidelines-init-variables]

    variable 'masses' is not initialized

        Tensor masses;
               ^
                      = 0
  • tests/two-flavour-const-matter.cpp:21:12: warning: [cppcoreguidelines-init-variables]

    variable 'energies' is not initialized

        Tensor energies;
               ^
                        = 0
  • tests/two-flavour-const-matter.cpp:28:30: warning: [cppcoreguidelines-init-variables]

    variable 'tensorSolver' is not initialized

        ConstDensityMatterSolver tensorSolver(2, density);
                                 ^
                                              = 0
  • tests/two-flavour-const-matter.cpp:32:22: warning: [cppcoreguidelines-init-variables]

    variable 'bargerProp' is not initialized

        TwoFlavourBarger bargerProp;
                         ^
                                    = 0
    /home/runner/work/nuTens/nuTens/tests/two-flavour-const-matter.cpp:36:26: warning: 20 is a magic number; consider replacing it with a named constant [cppcoreguidelines-avoid-magic-numbers,readability-magic-numbers]
        for (int i = 0; i <= 20; i++)
                             ^
  • tests/two-flavour-const-matter.cpp:38:15: warning: [cppcoreguidelines-init-variables]

    variable 'theta' is not initialized

            float theta = (-1.0 + 2.0 * (float)i / 20.0) * 0.49 * M_PI;
                  ^
                        = NAN
  • tests/two-flavour-const-matter.cpp:43:16: warning: [cppcoreguidelines-init-variables]

    variable 'PMNS' is not initialized

            Tensor PMNS;
                   ^
                        = 0
  • tests/two-flavour-const-matter.cpp:54:16: warning: [cppcoreguidelines-init-variables]

    variable 'eigenVals' is not initialized

            Tensor eigenVals;
                   ^
                             = 0
  • tests/two-flavour-const-matter.cpp:55:16: warning: [cppcoreguidelines-init-variables]

    variable 'eigenVecs' is not initialized

            Tensor eigenVecs;
                   ^
                             = 0
  • tests/two-flavour-const-matter.cpp:66:15: warning: [cppcoreguidelines-init-variables]

    variable 'effDm2' is not initialized

            float effDm2 = (calcV1 - calcV2) * 2.0 * energy;
                  ^
                         = NAN
  • tests/barger.cpp:2:10: error: [clang-diagnostic-error]

    'tests/barger-propagator.hpp' file not found

    #include <tests/barger-propagator.hpp>
             ^
  • tests/barger.cpp:9:5: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

    int main()
    ~~~ ^
    auto       -> int
    /home/runner/work/nuTens/nuTens/tests/barger.cpp:11:22: warning: 500.0 is a magic number; consider replacing it with a named constant [cppcoreguidelines-avoid-magic-numbers,readability-magic-numbers]
        float baseline = 500.0;
                         ^
  • tests/barger.cpp:13:22: warning: [cppcoreguidelines-init-variables]

    variable 'bargerProp' is not initialized

        TwoFlavourBarger bargerProp;
                         ^
                                    = 0
    /home/runner/work/nuTens/nuTens/tests/barger.cpp:22:25: warning: 100 is a magic number; consider replacing it with a named constant [cppcoreguidelines-avoid-magic-numbers,readability-magic-numbers]
        for (int i = 0; i < 100; i++)
                            ^
    /home/runner/work/nuTens/nuTens/tests/barger.cpp:24:24: warning: narrowing conversion from 'double' to 'float' [bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions]
            float energy = (float)i * 10.0;
                           ^
    /home/runner/work/nuTens/nuTens/tests/barger.cpp:24:35: warning: 10.0 is a magic number; consider replacing it with a named constant [cppcoreguidelines-avoid-magic-numbers,readability-magic-numbers]
            float energy = (float)i * 10.0;
                                      ^
    /home/runner/work/nuTens/nuTens/tests/barger.cpp:43:25: warning: 100 is a magic number; consider replacing it with a named constant [cppcoreguidelines-avoid-magic-numbers,readability-magic-numbers]
        for (int i = 0; i < 100; i++)
                            ^
    /home/runner/work/nuTens/nuTens/tests/barger.cpp:45:24: warning: narrowing conversion from 'double' to 'float' [bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions]
            float energy = (float)i * 10.0;
                           ^
    /home/runner/work/nuTens/nuTens/tests/barger.cpp:45:35: warning: 10.0 is a magic number; consider replacing it with a named constant [cppcoreguidelines-avoid-magic-numbers,readability-magic-numbers]
            float energy = (float)i * 10.0;
                                      ^
  • tests/tensor-basic.cpp:2:10: error: [clang-diagnostic-error]

    'nuTens/tensors/dtypes.hpp' file not found

    #include <nuTens/tensors/dtypes.hpp>
             ^
  • tests/tensor-basic.cpp:10:5: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

    int main()
    ~~~ ^
    auto       -> int
  • tests/tensor-basic.cpp:16:12: warning: [cppcoreguidelines-init-variables]

    variable 'tensorFloat' is not initialized

        Tensor tensorFloat;
               ^
                           = 0
  • tests/tensor-basic.cpp:33:12: warning: [cppcoreguidelines-init-variables]

    variable 'realSquared' is not initialized

        Tensor realSquared = Tensor::matmul(tensorFloat, tensorFloat);
               ^
                           = 0
  • tests/tensor-basic.cpp:40:12: warning: [cppcoreguidelines-init-variables]

    variable 'tensorComplex' is not initialized

        Tensor tensorComplex;
               ^
                             = 0
  • tests/tensor-basic.cpp:69:12: warning: [cppcoreguidelines-init-variables]

    variable 'imagSquared' is not initialized

        Tensor imagSquared = Tensor::matmul(tensorComplex, tensorComplex);
               ^
                           = 0
  • tests/tensor-basic.cpp:84:12: warning: [cppcoreguidelines-init-variables]

    variable 'ones' is not initialized

        Tensor ones;
               ^
                    = 0
  • tests/tensor-basic.cpp:86:12: warning: [cppcoreguidelines-init-variables]

    variable 'twos' is not initialized

        Tensor twos = ones + ones;
               ^
                    = 0
  • tests/tensor-basic.cpp:103:12: warning: [cppcoreguidelines-init-variables]

    variable 'ones_scaleTest' is not initialized

        Tensor ones_scaleTest;
               ^
                              = 0
  • tests/tensor-basic.cpp:105:12: warning: [cppcoreguidelines-init-variables]

    variable 'threes' is not initialized

        Tensor threes = Tensor::scale(ones_scaleTest, 3.0).sum();
               ^
                      = 0
  • tests/tensor-basic.cpp:107:12: warning: [cppcoreguidelines-init-variables]

    variable 'grad' is not initialized

        Tensor grad = ones_scaleTest.grad();
               ^
                    = 0
  • tests/tensor-basic.cpp:120:12: warning: [cppcoreguidelines-init-variables]

    variable 'complexGradTest' is not initialized

        Tensor complexGradTest;
               ^
                               = 0
  • tests/tensor-basic.cpp:128:12: warning: [cppcoreguidelines-init-variables]

    variable 'complexGradSquared' is not initialized

        Tensor complexGradSquared = Tensor::matmul(complexGradTest, complexGradTest).sum();
               ^
                                  = 0
  • tests/two-flavour-vacuum.cpp:1:10: error: [clang-diagnostic-error]

    'nuTens/propagator/propagator.hpp' file not found

    #include <nuTens/propagator/propagator.hpp>
             ^
  • tests/two-flavour-vacuum.cpp:7:5: warning: [modernize-use-trailing-return-type]

    use a trailing return type for this function

    int main()
    ~~~ ^
    auto       -> int
    /home/runner/work/nuTens/nuTens/tests/two-flavour-vacuum.cpp:9:16: warning: 0.1 is a magic number; consider replacing it with a named constant [cppcoreguidelines-avoid-magic-numbers,readability-magic-numbers]
        float m1 = 0.1;
                   ^
    /home/runner/work/nuTens/nuTens/tests/two-flavour-vacuum.cpp:10:16: warning: 0.5 is a magic number; consider replacing it with a named constant [cppcoreguidelines-avoid-magic-numbers,readability-magic-numbers]
        float m2 = 0.5;
                   ^
    /home/runner/work/nuTens/nuTens/tests/two-flavour-vacuum.cpp:12:22: warning: 0.5 is a magic number; consider replacing it with a named constant [cppcoreguidelines-avoid-magic-numbers,readability-magic-numbers]
        float baseline = 0.5;
                         ^
  • tests/two-flavour-vacuum.cpp:14:12: warning: [cppcoreguidelines-init-variables]

    variable 'masses' is not initialized

        Tensor masses;
               ^
                      = 0
  • tests/two-flavour-vacuum.cpp:20:12: warning: [cppcoreguidelines-init-variables]

    variable 'energies' is not initialized

        Tensor energies;
               ^
                        = 0
  • tests/two-flavour-vacuum.cpp:25:16: warning: [cppcoreguidelines-init-variables]

    variable 'tensorPropagator' is not initialized

        Propagator tensorPropagator(2, baseline);
                   ^
                                    = 0
  • tests/two-flavour-vacuum.cpp:29:22: warning: [cppcoreguidelines-init-variables]

    variable 'bargerProp' is not initialized

        TwoFlavourBarger bargerProp;
                         ^
                                    = 0
    /home/runner/work/nuTens/nuTens/tests/two-flavour-vacuum.cpp:33:25: warning: 20 is a magic number; consider replacing it with a named constant [cppcoreguidelines-avoid-magic-numbers,readability-magic-numbers]
        for (int i = 0; i < 20; i++)
                            ^
  • tests/two-flavour-vacuum.cpp:35:15: warning: [cppcoreguidelines-init-variables]

    variable 'theta' is not initialized

            float theta = (-1.0 + 2.0 * (float)i / 20.0) * 0.49 * M_PI;
                  ^
                        = NAN
  • tests/two-flavour-vacuum.cpp:40:16: warning: [cppcoreguidelines-init-variables]

    variable 'PMNS' is not initialized

            Tensor PMNS;
                   ^
                        = 0
  • tests/two-flavour-vacuum.cpp:50:16: warning: [cppcoreguidelines-init-variables]

    variable 'probabilities' is not initialized

            Tensor probabilities = tensorPropagator.calculateProbs(energies);
                   ^
                                 = 0
  • nuTens/tensors/torch-tensor.cpp:2:10: error: [clang-diagnostic-error]

    'nuTens/tensors/tensor.hpp' file not found

    #include <nuTens/tensors/tensor.hpp>
             ^
  • nuTens/propagator/propagator.cpp:1:10: error: [clang-diagnostic-error]

    'nuTens/propagator/propagator.hpp' file not found

    #include <nuTens/propagator/propagator.hpp>
             ^
  • nuTens/propagator/const-density-solver.cpp:1:10: error: [clang-diagnostic-error]

    'nuTens/propagator/const-density-solver.hpp' file not found

    #include <nuTens/propagator/const-density-solver.hpp>
             ^

Have any feedback or feature suggestions? Share it here.

Please sign in to comment.