Skip to content

Commit

Permalink
Merge pull request #58 from ewanwm/feature_more_relaxed_linter
Browse files Browse the repository at this point in the history
Chill the linter out a bit
  • Loading branch information
ewanwm authored Sep 19, 2024
2 parents 8b8c1cb + 0b2df14 commit d96240c
Show file tree
Hide file tree
Showing 5 changed files with 75 additions and 21 deletions.
8 changes: 5 additions & 3 deletions .github/workflows/cpp-linter.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ jobs:

- name: Configure CMake
# Configure CMake so we can get a compile_commands.json so that clang-tidy won't be angry about missing headers any more
run: cmake -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DCMAKE_PREFIX_PATH=`python3 -c 'import torch;print(torch.utils.cmake_prefix_path)'` -B ${{github.workspace}} -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}}
run: cmake -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DNT_ENABLE_BENCHMARKING=ON -DCMAKE_PREFIX_PATH=`python3 -c 'import torch;print(torch.utils.cmake_prefix_path)'` -B ${{github.workspace}} -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}}

## horrible little hack to put spdlog include files somewhere that clang-tidy can find them
## it doesn't seem to like headers included by cpm
Expand All @@ -43,10 +43,12 @@ jobs:
with:
style: "Microsoft"
ignore: '.github|_deps|CMakeFiles|spdlog*'
format-review: true
tidy-review: true
step-summary: true
files-changed-only: true
thread-comments: true

thread-comments: false
file-annotations: false
tidy-checks: 'boost-*,bugprone-*,performance-*,readability-*,portability-*,modernize-*,clang-analyzer-*,cppcoreguidelines-*,-modernize-use-trailing-return-type,-modernize-use-emplace,-readability-redundant-access-specifiers'

- name: Fail fast?!
Expand Down
55 changes: 40 additions & 15 deletions benchmarks/benchmarks.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,23 @@
#include <nuTens/propagator/propagator.hpp>
#include <nuTens/tensors/tensor.hpp>

// The random seed to use for the RNG
// want this to be fixed for reproducibility
const int randSeed = 123;

// set the PMNS parameters to use
// Will very likely change the benchmark so that energies are fixed
// and these get randomised but for now just set them here
const float m1 = 0.1;
const float m2 = 0.2;
const float m3 = 0.3;

const float th12 = 0.12;
const float th23 = 0.23;
const float th13 = 0.13;

const float dcp = 0.5;

Tensor buildPMNS(const Tensor &theta12, const Tensor &theta13, const Tensor &theta23, const Tensor &deltaCP)
{
// set up the three matrices to build the PMNS matrix
Expand All @@ -20,8 +37,9 @@ Tensor buildPMNS(const Tensor &theta12, const Tensor &theta13, const Tensor &the

M2.setValue({0, 1, 1}, 1.0);
M2.setValue({0, 0, 0}, Tensor::cos(theta13));
M2.setValue({0, 0, 2}, Tensor::mul(Tensor::sin(theta13), Tensor::exp(Tensor::scale(deltaCP, -1.0J))));
M2.setValue({0, 2, 0}, -Tensor::mul(Tensor::sin(theta13), Tensor::exp(Tensor::scale(deltaCP, 1.0J))));
std::complex<float> i(0.0, 1.0);
M2.setValue({0, 0, 2}, Tensor::mul(Tensor::sin(theta13), Tensor::exp(Tensor::scale(deltaCP, -i))));
M2.setValue({0, 2, 0}, -Tensor::mul(Tensor::sin(theta13), Tensor::exp(Tensor::scale(deltaCP, i))));
M2.setValue({0, 2, 2}, Tensor::cos(theta13));
M2.requiresGrad(true);

Expand All @@ -39,7 +57,7 @@ Tensor buildPMNS(const Tensor &theta12, const Tensor &theta13, const Tensor &the
return PMNS;
}

static void batchedOscProbs(const Propagator &prop, int batchSize, int nBatches)
static void batchedOscProbs(const Propagator &prop, long batchSize, long nBatches)
{
for (int _ = 0; _ < nBatches; _++)
{
Expand All @@ -58,12 +76,12 @@ static void BM_vacuumOscillations(benchmark::State &state)
{

// set up the inputs
Tensor masses = Tensor({0.1, 0.2, 0.3}, NTdtypes::kFloat).requiresGrad(false).addBatchDim();
Tensor masses = Tensor({m1, m2, m3}, NTdtypes::kFloat).requiresGrad(false).addBatchDim();

Tensor theta23 = Tensor({0.23}).dType(NTdtypes::kComplexFloat).requiresGrad(false);
Tensor theta13 = Tensor({0.13}).dType(NTdtypes::kComplexFloat).requiresGrad(false);
Tensor theta12 = Tensor({0.12}).dType(NTdtypes::kComplexFloat).requiresGrad(false);
Tensor deltaCP = Tensor({0.5}).dType(NTdtypes::kComplexFloat).requiresGrad(false);
Tensor theta23 = Tensor({th23}).dType(NTdtypes::kComplexFloat).requiresGrad(false);
Tensor theta13 = Tensor({th13}).dType(NTdtypes::kComplexFloat).requiresGrad(false);
Tensor theta12 = Tensor({th12}).dType(NTdtypes::kComplexFloat).requiresGrad(false);
Tensor deltaCP = Tensor({dcp}).dType(NTdtypes::kComplexFloat).requiresGrad(false);

Tensor PMNS = buildPMNS(theta12, theta13, theta23, deltaCP);

Expand All @@ -73,8 +91,10 @@ static void BM_vacuumOscillations(benchmark::State &state)
vacuumProp.setMasses(masses);

// seed the random number generator for the energies
std::srand(123);
std::srand(randSeed);

// linter gets angry about this as _ is never used :)))
// NOLINTNEXTLINE
for (auto _ : state)
{
// This code gets timed
Expand All @@ -86,12 +106,12 @@ static void BM_constMatterOscillations(benchmark::State &state)
{

// set up the inputs
Tensor masses = Tensor({0.1, 0.2, 0.3}, NTdtypes::kFloat).requiresGrad(false).addBatchDim();
Tensor masses = Tensor({m1, m2, m3}, NTdtypes::kFloat).requiresGrad(false).addBatchDim();

Tensor theta23 = Tensor({0.23}).dType(NTdtypes::kComplexFloat).requiresGrad(false);
Tensor theta13 = Tensor({0.13}).dType(NTdtypes::kComplexFloat).requiresGrad(false);
Tensor theta12 = Tensor({0.12}).dType(NTdtypes::kComplexFloat).requiresGrad(false);
Tensor deltaCP = Tensor({0.5}).dType(NTdtypes::kComplexFloat).requiresGrad(false);
Tensor theta23 = Tensor({th23}).dType(NTdtypes::kComplexFloat).requiresGrad(false);
Tensor theta13 = Tensor({th13}).dType(NTdtypes::kComplexFloat).requiresGrad(false);
Tensor theta12 = Tensor({th12}).dType(NTdtypes::kComplexFloat).requiresGrad(false);
Tensor deltaCP = Tensor({dcp}).dType(NTdtypes::kComplexFloat).requiresGrad(false);

Tensor PMNS = buildPMNS(theta12, theta13, theta23, deltaCP);

Expand All @@ -103,8 +123,10 @@ static void BM_constMatterOscillations(benchmark::State &state)
matterProp.setMatterSolver(matterSolver);

// seed the random number generator for the energies
std::srand(123);
std::srand(randSeed);

// linter gets angry about this as _ is never used :)))
// NOLINTNEXTLINE
for (auto _ : state)
{
// This code gets timed
Expand All @@ -113,10 +135,13 @@ static void BM_constMatterOscillations(benchmark::State &state)
}

// Register the function as a benchmark
// NOLINTNEXTLINE
BENCHMARK(BM_vacuumOscillations)->Name("Vacuum Oscillations")->Args({1 << 10, 1 << 10});

// Register the function as a benchmark
// NOLINTNEXTLINE
BENCHMARK(BM_constMatterOscillations)->Name("Const Density Oscillations")->Args({1 << 10, 1 << 10});

// Run the benchmark
// NOLINTNEXTLINE
BENCHMARK_MAIN();
2 changes: 1 addition & 1 deletion nuTens/propagator/const-density-solver.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ class ConstDensityMatterSolver : public BaseMatterSolver
/// shape should look like {Nbatches, 1, 1}.
/// @param[out] eigenvectors The returned eigenvectors
/// @param[out] eigenvalues The corresponding eigenvalues
void calculateEigenvalues(const Tensor &energies, Tensor &eigenvectors, Tensor &eigenvalues);
void calculateEigenvalues(const Tensor &energies, Tensor &eigenvectors, Tensor &eigenvalues) override;

private:
Tensor PMNS;
Expand Down
10 changes: 9 additions & 1 deletion nuTens/tensors/tensor.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ class Tensor

/// @brief Construct a 1-d array with specified values
/// @arg values The values to include in the tensor
Tensor(std::vector<float> values, NTdtypes::scalarType type = NTdtypes::kFloat,
Tensor(const std::vector<float> &values, NTdtypes::scalarType type = NTdtypes::kFloat,
NTdtypes::deviceType device = NTdtypes::kCPU, bool requiresGrad = true);

/// @brief Construct an identity tensor (has to be a 2d square tensor)
Expand Down Expand Up @@ -164,10 +164,18 @@ class Tensor
/// @arg s The scalar
/// @arg t The tensor
static Tensor scale(const Tensor &t, float s);
/// @brief Scale a matrix by some scalar
/// @arg s The scalar
/// @arg t The tensor
static Tensor scale(const Tensor &t, double s);
/// @brief Scale a matrix by some complex scalar
/// @arg s The scalar
/// @arg t The tensor
static Tensor scale(const Tensor &t, std::complex<float> s);
/// @brief Scale a matrix by some complex scalar
/// @arg s The scalar
/// @arg t The tensor
static Tensor scale(const Tensor &t, std::complex<double> s);

// ############################################
// ################ Inlines ###################
Expand Down
21 changes: 20 additions & 1 deletion nuTens/tensors/torch-tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,8 @@ std::string Tensor::getTensorLibrary()
return "PyTorch";
}

Tensor::Tensor(std::vector<float> values, NTdtypes::scalarType type, NTdtypes::deviceType device, bool requiresGrad)
Tensor::Tensor(const std::vector<float> &values, NTdtypes::scalarType type, NTdtypes::deviceType device,
bool requiresGrad)
{
NT_PROFILE();

Expand Down Expand Up @@ -298,6 +299,15 @@ Tensor Tensor::scale(const Tensor &t, float s)
return ret;
}

Tensor Tensor::scale(const Tensor &t, double s)
{
NT_PROFILE();

Tensor ret;
ret.setTensor(torch::multiply(t._tensor, s));
return ret;
}

Tensor Tensor::scale(const Tensor &t, std::complex<float> s)
{
NT_PROFILE();
Expand All @@ -307,6 +317,15 @@ Tensor Tensor::scale(const Tensor &t, std::complex<float> s)
return ret;
}

Tensor Tensor::scale(const Tensor &t, std::complex<double> s)
{
NT_PROFILE();

Tensor ret;
ret.setTensor(torch::multiply(t._tensor, c10::complex<double>(s.real(), s.imag())));
return ret;
}

void Tensor::matmul_(const Tensor &t2)
{
NT_PROFILE();
Expand Down

0 comments on commit d96240c

Please sign in to comment.