Skip to content

Commit eb1b2fc

Browse files
Abhigyan AcherjeeAbhigyan Acherjee
authored andcommitted
Added benchmark plots, amended RooFitBinned benchmarks, and added bash script
1 parent d479d99 commit eb1b2fc

File tree

4 files changed

+157
-3
lines changed

4 files changed

+157
-3
lines changed

root/roofit/roofit/CMakeLists.txt

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,8 @@ if(cuda)
1919
endif()
2020

2121
file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/benchRooFitBackends_make_plot.py DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
22+
file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/compare_benchmarks.py DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
23+
file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/run_benchmarks.sh DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
2224

2325
RB_ADD_GBENCHMARK(benchCodeSquashAD
2426
benchCodeSquashAD.cxx

root/roofit/roofit/RooFitBinnedBenchmarks.cxx

Lines changed: 64 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -26,10 +26,48 @@ namespace {
2626
const std::vector<int> nBinsVector {5, 10, 15};
2727
const int nBinsForChannelScan = 10;
2828
const int nChannelsForBinScan = 1;
29-
const std::vector<int> nCPUVector {1, 2, 3};
29+
//const std::vector<int> nCPUVector {1, 2, 3};
30+
const std::vector<int> nCPUVector {1};
31+
32+
////default evaluation backend
33+
//constexpr auto evalBackend = RooFit::EvalBackend::Value::Legacy;
34+
std::string evalBackend="cpu";
35+
//RooFit::EvalBackend::Value evalBackend = RooFit::EvalBackend::Value::Legacy;
3036

31-
constexpr auto evalBackend = RooFit::EvalBackend::Value::Cpu;
37+
// Function to parse enum from string
38+
// RooFit::EvalBackend::Value parseEvalBackend(const std::string& str)
39+
// {
40+
// if (str == "Cpu")
41+
// {
42+
// return RooFit::EvalBackend::Value::Cpu;
43+
// }
44+
// else if (str == "Codegen")
45+
// {
46+
// return RooFit::EvalBackend::Value::Codegen;
47+
// }
48+
// else if (str == "CodegenNoGrad")
49+
// {
50+
// return RooFit::EvalBackend::Value::CodegenNoGrad;
51+
// }
52+
// else if (str == "Legacy")
53+
// {
54+
// return RooFit::EvalBackend::Value::Legacy;
55+
// }
56+
// else {
57+
// throw std::invalid_argument("Invalid evalBackend value");
58+
// }
59+
// }
3260

61+
// // Command-line flag to set evalBackend
62+
// void CustomArguments(benchmark::internal::Benchmark* b)
63+
// {
64+
// // Define a command-line argument to specify evalBackend
65+
// static std::vector<std::string> evalBackendValues = {"Cpu", "Codegen","CodegenNoGrad","Legacy"}; // Add more values if needed
66+
// for (const auto& value : evalBackendValues)
67+
// {
68+
// b->Arg(value);
69+
// }
70+
// }
3371
auto const timeUnit = benchmark::kMillisecond;
3472

3573
void setupRooMsgService() {
@@ -274,4 +312,27 @@ BENCHMARK(BM_RooFit_BinnedTestMinos)
274312
//####################################################################
275313
//############## RUN #################################################
276314

277-
BENCHMARK_MAIN();
315+
////BENCHMARK_MAIN();
316+
int main(int argc, char** argv)
317+
{
318+
319+
benchmark::Initialize(&argc, argv);
320+
321+
for (int i = 1; i < argc; ++i)
322+
{
323+
if (std::string(argv[i]) == "-b")
324+
{
325+
if (i + 1 < argc)
326+
{
327+
// Set the evalBackend value from the next command-line argument
328+
evalBackend = argv[i+1];//parseEvalBackend(argv[i + 1]);
329+
}
330+
else
331+
{
332+
std::cerr << "Missing value for --evalBackend argument" << std::endl;
333+
return 1;
334+
}
335+
}
336+
}
337+
benchmark::RunSpecifiedBenchmarks();
338+
}
Lines changed: 64 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,64 @@
1+
import pandas as pd
2+
import csv
3+
import matplotlib.pyplot as plt
4+
import numpy as np
5+
6+
def parse_and_writer(csv_file_path,name):
7+
with open(csv_file_path, 'r', newline='') as csvfile:
8+
reader = csv.reader(csvfile)
9+
for _ in range(8):
10+
next(reader)
11+
12+
filtered_csv_file_path=""+name+".csv"
13+
with open(filtered_csv_file_path, 'w', newline='') as filtered_csvfile:
14+
writer = csv.writer(filtered_csvfile)
15+
for row in reader:
16+
writer.writerow(row)
17+
18+
parse_and_writer("./out_codegen.csv","codegen")
19+
parse_and_writer("./out_codegen_ngrad.csv","codegen_ngrad")
20+
parse_and_writer("./out_cpu.csv","cpu")
21+
parse_and_writer("./out_legacy.csv","legacy")
22+
23+
codegen_df=pd.read_csv("codegen.csv")
24+
codegen_nograd_df=pd.read_csv("codegen_ngrad.csv")
25+
legacy_df=pd.read_csv("legacy.csv")
26+
cpu_df=pd.read_csv("cpu.csv")
27+
28+
29+
30+
# Plotting
31+
plt.figure(figsize=(10, 6))
32+
33+
x = np.arange(len(codegen_df['name'].unique()))
34+
35+
36+
37+
for i, benchmark in enumerate(codegen_df['name'].unique()):
38+
39+
codegen_time = codegen_df.loc[codegen_df['name'] == benchmark, 'real_time']
40+
codegen_nograd_time = codegen_nograd_df.loc[codegen_nograd_df['name'] == benchmark, 'real_time']
41+
cpu_time = cpu_df.loc[cpu_df['name'] == benchmark, 'real_time']
42+
legacy_time = legacy_df.loc[legacy_df['name'] == benchmark, 'real_time']
43+
44+
plt.bar(x[i]-0.10, codegen_time, width=0.15, align='center', label='codegen',color='lightblue')
45+
plt.bar(x[i], codegen_nograd_time, width=0.15, align='edge', label='codegen_nograd',color='navy')
46+
plt.bar(x[i]+0.15, cpu_time, width=0.15, align='edge', label='cpu',color='cyan')
47+
plt.bar(x[i]+0.30, legacy_time, width=0.15, align='edge', label='legacy',color='gray')
48+
49+
50+
# Customize legend
51+
legend_labels = ['codegen', 'codegen_nograd', 'cpu','legacy']
52+
legend_colors = ['lightblue', 'navy', 'cyan','gray']
53+
legend_handles = [plt.Rectangle((0,0),1,1, color=color) for color in legend_colors]
54+
plt.legend(legend_handles, legend_labels)
55+
56+
plt.yscale('log')
57+
58+
plt.xlabel('Benchmark')
59+
plt.ylabel('Time (milliseconds)')
60+
plt.title('Comparison of Benchmarks for Different Evaluation Backends')
61+
plt.xticks(x, rotation=90)
62+
plt.tight_layout()
63+
plt.savefig('comparision_plot.jpg')
64+
plt.show()

root/roofit/roofit/run_benchmarks.sh

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
#!/bin/bash
2+
3+
# Function to run a benchmark script and wait until the CSV file is generated
4+
# to make it executable chmod +x run_benchmarks.sh
5+
#then run it using ./run_benchmarks.sh
6+
#!/bin/bash
7+
8+
# Function to run the benchmark command and wait for CSV file to be generated
9+
run_benchmark() {
10+
echo "Running benchmark: $1"
11+
$1 &
12+
local pid=$!
13+
while [ ! -f $2 ]; do
14+
sleep 1
15+
done
16+
wait $pid
17+
echo "CSV file generated: $2"
18+
}
19+
20+
# Run benchmarks
21+
run_benchmark "./benchRooFitBinned -b codegen --benchmark_out_format=csv --benchmark_out=out_codegen.csv" "out_codegen.csv"
22+
run_benchmark "./benchRooFitBinned -b codegen_no_grad --benchmark_out_format=csv --benchmark_out=out_codegen_ngrad.csv" "out_codegen_ngrad.csv"
23+
run_benchmark "./benchRooFitBinned -b legacy --benchmark_out_format=csv --benchmark_out=out_legacy.csv" "out_legacy.csv"
24+
run_benchmark "./benchRooFitBinned -b cpu --benchmark_out_format=csv --benchmark_out=out_cpu.csv" "out_cpu.csv"
25+
26+
# Run Python script
27+
python3 compare_benchmarks.py

0 commit comments

Comments
 (0)