Skip to content

Commit

Permalink
DEBUG-2647 Run multiple Ruby micro-benchmark files (#3810)
Browse files Browse the repository at this point in the history
  • Loading branch information
p-datadog committed Aug 5, 2024
1 parent 4fbe269 commit 00299ee
Show file tree
Hide file tree
Showing 14 changed files with 166 additions and 55 deletions.
25 changes: 25 additions & 0 deletions benchmarks/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
# `datadog` Benchmarks

## Adding a New Benchmark File

1. Use one of the following prefixes:

- `library_`
- `profiling_`
- `tracing_`

2. Add the new file to `run_all.sh` in this directory.

3. Depending on the prefix, add the new file to the correct
`validate_benchmarks_spec.rb` as follows:

- `library_` prefix: `spec/validate_benchmarks_spec.rb`
- `profiling_` prefix: `./spec/datadog/profiling/validate_benchmarks_spec.rb`
- `tracing_` prefix: `./spec/datadog/tracing/validate_benchmarks_spec.rb`

## Adding Benchmarks For a New Product

1. Create a `validate_benchmarks_spec.rb` test in the product subdirectory,
using the existing files as a template.

2. Update this README to add the new product in the previous section.
52 changes: 52 additions & 0 deletions benchmarks/library_gem_loading.rb
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
# Used to quickly run benchmark under RSpec as part of the usual test suite, to validate it didn't bitrot
VALIDATE_BENCHMARK_MODE = ENV['VALIDATE_BENCHMARK'] == 'true'

return unless __FILE__ == $PROGRAM_NAME || VALIDATE_BENCHMARK_MODE

require 'open3'

class GemLoadingBenchmark
def benchmark_gem_loading
# This benchmark needs to be run in a clean environment where datadog is
# not loaded yet.
#
# Now that this benchmark is in its own file, it does not need
# to spawn a subprocess IF we would always execute this benchmark
# file by itself.
output, status = Open3.capture2e('bundle', 'exec', 'ruby', stdin_data: <<-RUBY)
raise "Datadog is already loaded" if defined?(::Datadog::Core)
lib = File.expand_path('../lib', '#{__dir__}')
$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
VALIDATE_BENCHMARK_MODE = #{VALIDATE_BENCHMARK_MODE}
require 'benchmark/ips'
Benchmark.ips do |x|
# Gem loading is quite slower than the other microbenchmarks
benchmark_time = VALIDATE_BENCHMARK_MODE ? { time: 0.001, warmup: 0 } : { time: 60, warmup: 5 }
x.config(**benchmark_time)
x.report("Gem loading") do
pid = fork { require 'datadog' }
_, status = Process.wait2(pid)
raise unless status.success?
end
x.save! "#{__FILE__}-results.json" unless VALIDATE_BENCHMARK_MODE
x.compare!
end
RUBY

print output

raise "Benchmark failed with status #{status}: #{output}" unless status.success?
end
end

puts "Current pid is #{Process.pid}"

GemLoadingBenchmark.new.instance_exec do
benchmark_gem_loading
end
4 changes: 2 additions & 2 deletions benchmarks/profiler_allocation.rb
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def run_benchmark

x.report('Allocations (baseline)', 'BasicObject.new')

x.save! 'profiler-allocation-results.json' unless VALIDATE_BENCHMARK_MODE
x.save! "#{__FILE__}-results.json" unless VALIDATE_BENCHMARK_MODE
x.compare!
end

Expand All @@ -53,7 +53,7 @@ def run_benchmark

x.report("Allocations (#{ENV['CONFIG']})", 'BasicObject.new')

x.save! 'profiler-allocation-results.json' unless VALIDATE_BENCHMARK_MODE
x.save! "#{__FILE__}-results.json" unless VALIDATE_BENCHMARK_MODE
x.compare!
end
end
Expand Down
12 changes: 6 additions & 6 deletions benchmarks/profiler_gc.rb
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def run_benchmark
Datadog::Profiling::Collectors::ThreadContext::Testing._native_sample_after_gc(@collector)
end

x.save! 'profiler-gc-results.json' unless VALIDATE_BENCHMARK_MODE
x.save! "#{__FILE__}-results.json" unless VALIDATE_BENCHMARK_MODE
x.compare!
end

Expand All @@ -72,7 +72,7 @@ def run_benchmark
@recorder.serialize
end

x.save! 'profiler-gc-minute-results.json' unless VALIDATE_BENCHMARK_MODE
x.save! "#{__FILE__}-results.json" unless VALIDATE_BENCHMARK_MODE
x.compare!
end

Expand All @@ -85,7 +85,7 @@ def run_benchmark

x.report('Major GC runs (profiling disabled)', 'GC.start')

x.save! 'profiler-gc-integration-results.json' unless VALIDATE_BENCHMARK_MODE
x.save! "#{__FILE__}-results.json" unless VALIDATE_BENCHMARK_MODE
x.compare!
end

Expand All @@ -105,7 +105,7 @@ def run_benchmark

x.report('Major GC runs (profiling enabled)', 'GC.start')

x.save! 'profiler-gc-integration-results.json' unless VALIDATE_BENCHMARK_MODE
x.save! "#{__FILE__}-results.json" unless VALIDATE_BENCHMARK_MODE
x.compare!
end

Expand All @@ -120,7 +120,7 @@ def run_benchmark

x.report('Allocations (profiling disabled)', 'Object.new')

x.save! 'profiler-gc-integration-allocations-results.json' unless VALIDATE_BENCHMARK_MODE
x.save! "#{__FILE__}-results.json" unless VALIDATE_BENCHMARK_MODE
x.compare!
end

Expand All @@ -140,7 +140,7 @@ def run_benchmark

x.report('Allocations (profiling enabled)', 'Object.new')

x.save! 'profiler-gc-integration-allocations-results.json' unless VALIDATE_BENCHMARK_MODE
x.save! "#{__FILE__}-results.json" unless VALIDATE_BENCHMARK_MODE
x.compare!
end

Expand Down
2 changes: 1 addition & 1 deletion benchmarks/profiler_hold_resume_interruptions.rb
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def run_benchmark
Datadog::Profiling::Collectors::CpuAndWallTimeWorker._native_resume_signals
end

x.save! 'profiler_hold_resume_interruptions-results.json' unless VALIDATE_BENCHMARK_MODE
x.save! "#{__FILE__}-results.json" unless VALIDATE_BENCHMARK_MODE
x.compare!
end
end
Expand Down
2 changes: 1 addition & 1 deletion benchmarks/profiler_http_transport.rb
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ def run_benchmark
run_once
end

x.save! 'profiler-http-transport-results.json' unless VALIDATE_BENCHMARK_MODE
x.save! "#{__FILE__}-results.json" unless VALIDATE_BENCHMARK_MODE
x.compare!
end
end
Expand Down
2 changes: 1 addition & 1 deletion benchmarks/profiler_memory_sample_serialize.rb
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ def run_benchmark
recorder.serialize
end

x.save! 'profiler_memory_sample_serialize-results.json' unless VALIDATE_BENCHMARK_MODE
x.save! "#{__FILE__}-results.json" unless VALIDATE_BENCHMARK_MODE
x.compare!
end
end
Expand Down
2 changes: 1 addition & 1 deletion benchmarks/profiler_sample_loop_v2.rb
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ def run_benchmark
Datadog::Profiling::Collectors::ThreadContext::Testing._native_sample(@collector, PROFILER_OVERHEAD_STACK_THREAD)
end

x.save! 'profiler-sample-loop-v2-results.json' unless VALIDATE_BENCHMARK_MODE
x.save! "#{__FILE__}-results.json" unless VALIDATE_BENCHMARK_MODE
x.compare!
end

Expand Down
2 changes: 1 addition & 1 deletion benchmarks/profiler_sample_serialize.rb
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ def run_benchmark
nil
end

x.save! 'profiler_sample_serialize-results.json' unless VALIDATE_BENCHMARK_MODE
x.save! "#{__FILE__}-results.json" unless VALIDATE_BENCHMARK_MODE
x.compare!
end

Expand Down
20 changes: 20 additions & 0 deletions benchmarks/run_all.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
#!/bin/sh

# This script is invoked by benchmarking-platform shell scripts
# to run all of the benchmarks defined in the tracer.

set -ex

for file in \
`dirname "$0"`/library_gem_loading.rb \
`dirname "$0"`/profiler_allocation.rb \
`dirname "$0"`/profiler_gc.rb \
`dirname "$0"`/profiler_hold_resume_interruptions.rb \
`dirname "$0"`/profiler_http_transport.rb \
`dirname "$0"`/profiler_memory_sample_serialize.rb \
`dirname "$0"`/profiler_sample_loop_v2.rb \
`dirname "$0"`/profiler_sample_serialize.rb \
`dirname "$0"`/tracing_trace.rb;
do
bundle exec ruby "$file"
done
35 changes: 0 additions & 35 deletions benchmarks/tracing_trace.rb
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
return unless __FILE__ == $PROGRAM_NAME || VALIDATE_BENCHMARK_MODE

require 'benchmark/ips'
require 'open3'
require 'datadog'

class TracingTraceBenchmark
Expand Down Expand Up @@ -177,39 +176,6 @@ def benchmark_propagation_trace_context
end
end
end

def benchmark_gem_loading
# This benchmark needs to be run in a clean environment where datadog is not loaded yet
output, status = Open3.capture2e('bundle', 'exec', 'ruby', stdin_data: <<-RUBY)
raise "Datadog is already loaded" if defined?(::Datadog::Core)
lib = File.expand_path('../lib', '#{__dir__}')
$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
VALIDATE_BENCHMARK_MODE = #{VALIDATE_BENCHMARK_MODE}
require 'benchmark/ips'
Benchmark.ips do |x|
# Gem loading is quite slower than the other microbenchmarks
benchmark_time = VALIDATE_BENCHMARK_MODE ? { time: 0.001, warmup: 0 } : { time: 60, warmup: 5 }
x.config(**benchmark_time)
x.report("Gem loading") do
pid = fork { require 'datadog' }
_, status = Process.wait2(pid)
raise unless status.success?
end
x.save! "#{__FILE__}-results.json" unless VALIDATE_BENCHMARK_MODE
x.compare!
end
RUBY

print output

raise "Benchmark failed with status #{status}: #{output}" unless status.success?
end
end

puts "Current pid is #{Process.pid}"
Expand All @@ -230,5 +196,4 @@ def run_benchmark(&block)
run_benchmark { benchmark_to_digest_continue }
run_benchmark { benchmark_propagation_datadog }
run_benchmark { benchmark_propagation_trace_context }
run_benchmark { benchmark_gem_loading }
end
10 changes: 5 additions & 5 deletions spec/datadog/profiling/validate_benchmarks_spec.rb
Original file line number Diff line number Diff line change
Expand Up @@ -10,13 +10,13 @@
end

benchmarks_to_validate = [
'profiler_sample_loop_v2',
'profiler_http_transport',
'profiler_sample_serialize',
'profiler_memory_sample_serialize',
'profiler_allocation',
'profiler_gc',
'profiler_hold_resume_interruptions',
'profiler_allocation',
'profiler_http_transport',
'profiler_memory_sample_serialize',
'profiler_sample_loop_v2',
'profiler_sample_serialize',
].freeze

benchmarks_to_validate.each do |benchmark|
Expand Down
21 changes: 19 additions & 2 deletions spec/datadog/tracing/validate_benchmarks_spec.rb
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,24 @@
end
end

describe 'tracing_trace' do
it('runs without raising errors') { expect_in_fork { load './benchmarks/tracing_trace.rb' } }
benchmarks_to_validate = %w[
tracing_trace
]

benchmarks_to_validate.each do |benchmark|
describe benchmark do
it 'runs without raising errors' do
expect_in_fork do
load "./benchmarks/#{benchmark}.rb"
end
end
end
end

# This test validates that we don't forget to add new benchmarks to benchmarks_to_validate
it 'tests all expected benchmarks in the benchmarks folder' do
all_benchmarks = Dir['./benchmarks/tracing_*'].map { |it| it.gsub('./benchmarks/', '').gsub('.rb', '') }

expect(benchmarks_to_validate).to contain_exactly(*all_benchmarks)
end
end
32 changes: 32 additions & 0 deletions spec/validate_benchmarks_spec.rb
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
require 'spec_helper'

RSpec.describe 'Library benchmarks' do
before { skip('Spec requires Ruby VM supporting fork') unless PlatformHelpers.supports_fork? }

around do |example|
ClimateControl.modify('VALIDATE_BENCHMARK' => 'true') do
example.run
end
end

benchmarks_to_validate = %w[
library_gem_loading
]

benchmarks_to_validate.each do |benchmark|
describe benchmark do
it 'runs without raising errors' do
expect_in_fork do
load "./benchmarks/#{benchmark}.rb"
end
end
end
end

# This test validates that we don't forget to add new benchmarks to benchmarks_to_validate
it 'tests all expected benchmarks in the benchmarks folder' do
all_benchmarks = Dir['./benchmarks/library_*'].map { |it| it.gsub('./benchmarks/', '').gsub('.rb', '') }

expect(benchmarks_to_validate).to contain_exactly(*all_benchmarks)
end
end

0 comments on commit 00299ee

Please sign in to comment.