From 5a52a4211651126d1c059bc5ef1b9eeb13b08832 Mon Sep 17 00:00:00 2001 From: Ivo Anjo Date: Thu, 8 Aug 2024 09:21:07 +0100 Subject: [PATCH] [NO-TICKET] Don't put results in benchmarking folder directly **What does this PR do?** This PR tweaks the configuration for benchmark output results to place result files in the current directory the benchmark gets executed from, instead of placing the results in the benchmarks directory. **Motivation:** In #3810 we standardized the results output file name in benchmarks to match the benchmark name. In that PR, we used `__FILE__` as a prefix. This changed where results are placed: where previously they were placed in the current folder where the benchmarks were run from (often the root of the repo), with this PR, they started getting placed in the benchmarks directory. This clashes with our `validate_benchmarks_spec.rb` that look for files in those directories, e.g. running a benchmark and then running the test suite will make the test suite fail which is somewhat annoying. While I could've changed the tests to filter out results files, I also find it useful to place the results where I'm executing the benchmarks from, as it makes organization easier (you just run the benchmark from where you want and you get the result there ). **Additional Notes:** N/A **How to test the change?** Run the benchmarks and confirm the results file gets placed in the current folder! :) --- benchmarks/library_gem_loading.rb | 2 +- benchmarks/profiler_allocation.rb | 4 ++-- benchmarks/profiler_gc.rb | 12 ++++++------ benchmarks/profiler_hold_resume_interruptions.rb | 2 +- benchmarks/profiler_http_transport.rb | 2 +- benchmarks/profiler_memory_sample_serialize.rb | 2 +- benchmarks/profiler_sample_loop_v2.rb | 2 +- benchmarks/profiler_sample_serialize.rb | 2 +- benchmarks/tracing_trace.rb | 14 +++++++------- 9 files changed, 21 insertions(+), 21 deletions(-) diff --git a/benchmarks/library_gem_loading.rb b/benchmarks/library_gem_loading.rb index 9ec496bd121..bd84abd04a5 100644 --- a/benchmarks/library_gem_loading.rb +++ b/benchmarks/library_gem_loading.rb @@ -34,7 +34,7 @@ def benchmark_gem_loading raise unless status.success? end - x.save! "#{__FILE__}-results.json" unless VALIDATE_BENCHMARK_MODE + x.save! "#{File.basename(__FILE__)}-results.json" unless VALIDATE_BENCHMARK_MODE x.compare! end RUBY diff --git a/benchmarks/profiler_allocation.rb b/benchmarks/profiler_allocation.rb index e2cced33c6f..c7d6c1144ee 100644 --- a/benchmarks/profiler_allocation.rb +++ b/benchmarks/profiler_allocation.rb @@ -28,7 +28,7 @@ def run_benchmark x.report('Allocations (baseline)', 'BasicObject.new') - x.save! "#{__FILE__}-results.json" unless VALIDATE_BENCHMARK_MODE + x.save! "#{File.basename(__FILE__)}-results.json" unless VALIDATE_BENCHMARK_MODE x.compare! end @@ -50,7 +50,7 @@ def run_benchmark x.report("Allocations (#{ENV['CONFIG']})", 'BasicObject.new') - x.save! "#{__FILE__}-results.json" unless VALIDATE_BENCHMARK_MODE + x.save! "#{File.basename(__FILE__)}-results.json" unless VALIDATE_BENCHMARK_MODE x.compare! end end diff --git a/benchmarks/profiler_gc.rb b/benchmarks/profiler_gc.rb index 89a6bfb007b..e8347c01000 100644 --- a/benchmarks/profiler_gc.rb +++ b/benchmarks/profiler_gc.rb @@ -43,7 +43,7 @@ def run_benchmark Datadog::Profiling::Collectors::ThreadContext::Testing._native_sample_after_gc(@collector) end - x.save! "#{__FILE__}-results.json" unless VALIDATE_BENCHMARK_MODE + x.save! "#{File.basename(__FILE__)}-results.json" unless VALIDATE_BENCHMARK_MODE x.compare! end @@ -69,7 +69,7 @@ def run_benchmark @recorder.serialize end - x.save! "#{__FILE__}-results.json" unless VALIDATE_BENCHMARK_MODE + x.save! "#{File.basename(__FILE__)}-results.json" unless VALIDATE_BENCHMARK_MODE x.compare! end @@ -81,7 +81,7 @@ def run_benchmark x.report('Major GC runs (profiling disabled)', 'GC.start') - x.save! "#{__FILE__}-results.json" unless VALIDATE_BENCHMARK_MODE + x.save! "#{File.basename(__FILE__)}-results.json" unless VALIDATE_BENCHMARK_MODE x.compare! end @@ -100,7 +100,7 @@ def run_benchmark x.report('Major GC runs (profiling enabled)', 'GC.start') - x.save! "#{__FILE__}-results.json" unless VALIDATE_BENCHMARK_MODE + x.save! "#{File.basename(__FILE__)}-results.json" unless VALIDATE_BENCHMARK_MODE x.compare! end @@ -114,7 +114,7 @@ def run_benchmark x.report('Allocations (profiling disabled)', 'Object.new') - x.save! "#{__FILE__}-results.json" unless VALIDATE_BENCHMARK_MODE + x.save! "#{File.basename(__FILE__)}-results.json" unless VALIDATE_BENCHMARK_MODE x.compare! end @@ -133,7 +133,7 @@ def run_benchmark x.report('Allocations (profiling enabled)', 'Object.new') - x.save! "#{__FILE__}-results.json" unless VALIDATE_BENCHMARK_MODE + x.save! "#{File.basename(__FILE__)}-results.json" unless VALIDATE_BENCHMARK_MODE x.compare! end diff --git a/benchmarks/profiler_hold_resume_interruptions.rb b/benchmarks/profiler_hold_resume_interruptions.rb index a9257209b8d..e5fdf7e426a 100644 --- a/benchmarks/profiler_hold_resume_interruptions.rb +++ b/benchmarks/profiler_hold_resume_interruptions.rb @@ -28,7 +28,7 @@ def run_benchmark Datadog::Profiling::Collectors::CpuAndWallTimeWorker._native_resume_signals end - x.save! "#{__FILE__}-results.json" unless VALIDATE_BENCHMARK_MODE + x.save! "#{File.basename(__FILE__)}-results.json" unless VALIDATE_BENCHMARK_MODE x.compare! end end diff --git a/benchmarks/profiler_http_transport.rb b/benchmarks/profiler_http_transport.rb index 6388047990f..15704375145 100644 --- a/benchmarks/profiler_http_transport.rb +++ b/benchmarks/profiler_http_transport.rb @@ -86,7 +86,7 @@ def run_benchmark run_once end - x.save! "#{__FILE__}-results.json" unless VALIDATE_BENCHMARK_MODE + x.save! "#{File.basename(__FILE__)}-results.json" unless VALIDATE_BENCHMARK_MODE x.compare! end end diff --git a/benchmarks/profiler_memory_sample_serialize.rb b/benchmarks/profiler_memory_sample_serialize.rb index b378aa719bf..a1ac2b85f33 100644 --- a/benchmarks/profiler_memory_sample_serialize.rb +++ b/benchmarks/profiler_memory_sample_serialize.rb @@ -83,7 +83,7 @@ def run_benchmark recorder.serialize end - x.save! "#{__FILE__}-results.json" unless VALIDATE_BENCHMARK_MODE + x.save! "#{File.basename(__FILE__)}-results.json" unless VALIDATE_BENCHMARK_MODE x.compare! end end diff --git a/benchmarks/profiler_sample_loop_v2.rb b/benchmarks/profiler_sample_loop_v2.rb index ca72feb440a..22f9d81a83d 100644 --- a/benchmarks/profiler_sample_loop_v2.rb +++ b/benchmarks/profiler_sample_loop_v2.rb @@ -51,7 +51,7 @@ def run_benchmark Datadog::Profiling::Collectors::ThreadContext::Testing._native_sample(@collector, PROFILER_OVERHEAD_STACK_THREAD) end - x.save! "#{__FILE__}-results.json" unless VALIDATE_BENCHMARK_MODE + x.save! "#{File.basename(__FILE__)}-results.json" unless VALIDATE_BENCHMARK_MODE x.compare! end diff --git a/benchmarks/profiler_sample_serialize.rb b/benchmarks/profiler_sample_serialize.rb index 05b1d3d4adc..4ddb77760d0 100644 --- a/benchmarks/profiler_sample_serialize.rb +++ b/benchmarks/profiler_sample_serialize.rb @@ -53,7 +53,7 @@ def run_benchmark nil end - x.save! "#{__FILE__}-results.json" unless VALIDATE_BENCHMARK_MODE + x.save! "#{File.basename(__FILE__)}-results.json" unless VALIDATE_BENCHMARK_MODE x.compare! end diff --git a/benchmarks/tracing_trace.rb b/benchmarks/tracing_trace.rb index 4d17ed7aa07..0ed2c2c4066 100644 --- a/benchmarks/tracing_trace.rb +++ b/benchmarks/tracing_trace.rb @@ -46,7 +46,7 @@ def trace(x, depth) trace(x, 10) trace(x, 100) - x.save! "#{__FILE__}-results.json" unless VALIDATE_BENCHMARK_MODE + x.save! "#{File.basename(__FILE__)}-results.json" unless VALIDATE_BENCHMARK_MODE x.compare! end end @@ -74,7 +74,7 @@ def trace(x, depth) trace(x, 10) trace(x, 100) - x.save! "#{__FILE__}-results.json" unless VALIDATE_BENCHMARK_MODE + x.save! "#{File.basename(__FILE__)}-results.json" unless VALIDATE_BENCHMARK_MODE x.compare! end end @@ -88,7 +88,7 @@ def benchmark_to_digest trace.to_digest end - x.save! "#{__FILE__}-results.json" unless VALIDATE_BENCHMARK_MODE + x.save! "#{File.basename(__FILE__)}-results.json" unless VALIDATE_BENCHMARK_MODE x.compare! end end @@ -103,7 +103,7 @@ def benchmark_log_correlation Datadog::Tracing.log_correlation end - x.save! "#{__FILE__}-results.json" unless VALIDATE_BENCHMARK_MODE + x.save! "#{File.basename(__FILE__)}-results.json" unless VALIDATE_BENCHMARK_MODE x.compare! end end @@ -119,7 +119,7 @@ def benchmark_to_digest_continue Datadog::Tracing.continue_trace!(digest) end - x.save! "#{__FILE__}-results.json" unless VALIDATE_BENCHMARK_MODE + x.save! "#{File.basename(__FILE__)}-results.json" unless VALIDATE_BENCHMARK_MODE x.compare! end end @@ -148,7 +148,7 @@ def benchmark_propagation_datadog raise unless extracted_trace_digest end - x.save! "#{__FILE__}-results.json" unless VALIDATE_BENCHMARK_MODE + x.save! "#{File.basename(__FILE__)}-results.json" unless VALIDATE_BENCHMARK_MODE x.compare! end end @@ -171,7 +171,7 @@ def benchmark_propagation_trace_context raise unless extracted_trace_digest end - x.save! "#{__FILE__}-results.json" unless VALIDATE_BENCHMARK_MODE + x.save! "#{File.basename(__FILE__)}-results.json" unless VALIDATE_BENCHMARK_MODE x.compare! end end