Skip to content

Commit

Permalink
L I N T
Browse files Browse the repository at this point in the history
  • Loading branch information
ewanwm committed Sep 19, 2024
1 parent 0add289 commit c89745c
Showing 1 changed file with 27 additions and 8 deletions.
35 changes: 27 additions & 8 deletions benchmarks/benchmarks.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,23 @@
#include <nuTens/propagator/propagator.hpp>
#include <nuTens/tensors/tensor.hpp>

// The random seed to use for the RNG
// want this to be fixed for reproducibility
int randSeed = 123;

// set the PMNS parameters to use
// Will very likely change the benchmark so that energies are fixed
// and these get randomised but for now just set them here
float m1 = 0.1;
float m2 = 0.2;
float m3 = 0.3;

float th12 = 0.12;
float th23 = 0.23;
float th13 = 0.13;

float dcp = 0.5;

Tensor buildPMNS(const Tensor &theta12, const Tensor &theta13, const Tensor &theta23, const Tensor &deltaCP)
{
// set up the three matrices to build the PMNS matrix
Expand Down Expand Up @@ -40,7 +57,7 @@ Tensor buildPMNS(const Tensor &theta12, const Tensor &theta13, const Tensor &the
return PMNS;
}

static void batchedOscProbs(const Propagator &prop, int batchSize, int nBatches)
static void batchedOscProbs(const Propagator &prop, long batchSize, long nBatches)
{
for (int _ = 0; _ < nBatches; _++)
{
Expand All @@ -59,12 +76,12 @@ static void BM_vacuumOscillations(benchmark::State &state)
{

// set up the inputs
Tensor masses = Tensor({0.1, 0.2, 0.3}, NTdtypes::kFloat).requiresGrad(false).addBatchDim();
Tensor masses = Tensor({m1, m2, m3}, NTdtypes::kFloat).requiresGrad(false).addBatchDim();

Tensor theta23 = Tensor({0.23}).dType(NTdtypes::kComplexFloat).requiresGrad(false);
Tensor theta13 = Tensor({0.13}).dType(NTdtypes::kComplexFloat).requiresGrad(false);
Tensor theta12 = Tensor({0.12}).dType(NTdtypes::kComplexFloat).requiresGrad(false);
Tensor deltaCP = Tensor({0.5}).dType(NTdtypes::kComplexFloat).requiresGrad(false);
Tensor theta23 = Tensor({th23}).dType(NTdtypes::kComplexFloat).requiresGrad(false);
Tensor theta13 = Tensor({th13}).dType(NTdtypes::kComplexFloat).requiresGrad(false);
Tensor theta12 = Tensor({th12}).dType(NTdtypes::kComplexFloat).requiresGrad(false);
Tensor deltaCP = Tensor({dcp}).dType(NTdtypes::kComplexFloat).requiresGrad(false);

Tensor PMNS = buildPMNS(theta12, theta13, theta23, deltaCP);

Expand All @@ -74,7 +91,7 @@ static void BM_vacuumOscillations(benchmark::State &state)
vacuumProp.setMasses(masses);

// seed the random number generator for the energies
std::srand(123);
std::srand(randSeed);

for (auto _ : state)
{
Expand Down Expand Up @@ -104,8 +121,10 @@ static void BM_constMatterOscillations(benchmark::State &state)
matterProp.setMatterSolver(matterSolver);

// seed the random number generator for the energies
std::srand(123);
std::srand(randSeed);

// linter gets angry about this as _ is never used :)))
// NOLINT
for (auto _ : state)
{
// This code gets timed
Expand Down

0 comments on commit c89745c

Please sign in to comment.