Skip to content

Commit

Permalink
Check resource utilization
Browse files Browse the repository at this point in the history
Signed-off-by: Monthon Klongklaew <monthonk@amazon.com>
  • Loading branch information
monthonk committed Sep 19, 2024
1 parent 7f7e7f0 commit d2f5870
Show file tree
Hide file tree
Showing 6 changed files with 136 additions and 0 deletions.
30 changes: 30 additions & 0 deletions .github/workflows/bench.yml
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,8 @@ jobs:
with:
tool: 'customBiggerIsBetter'
output-file-path: results/output.json
# This is default value, just make it more explicit
benchmark-data-dir-path: dev/bench
alert-threshold: "200%"
fail-on-alert: true
# GitHub API token to make a commit comment
Expand All @@ -78,6 +80,20 @@ jobs:
auto-push: ${{ inputs.publish }}
comment-on-alert: true
max-items-in-chart: 20
- name: Check resource utilization
uses: benchmark-action/github-action-benchmark@v1
with:
tool: 'customSmallerIsBetter'
output-file-path: results/peak_mem_usage.json
benchmark-data-dir-path: dev/bench/peak_mem_usage
alert-threshold: "200%"
fail-on-alert: false
# GitHub API token to make a commit comment
github-token: ${{ secrets.GITHUB_TOKEN }}
# Store the results and deploy GitHub pages automatically if the results are from main branch
auto-push: ${{ inputs.publish }}
comment-on-alert: true
max-items-in-chart: 20

latency-bench:
name: Benchmark (Latency)
Expand Down Expand Up @@ -188,3 +204,17 @@ jobs:
auto-push: ${{ inputs.publish }}
comment-on-alert: true
max-items-in-chart: 20
- name: Check resource utilization
uses: benchmark-action/github-action-benchmark@v1
with:
tool: 'customSmallerIsBetter'
output-file-path: results/peak_mem_usage.json
benchmark-data-dir-path: dev/cache_bench/peak_mem_usage
alert-threshold: "200%"
fail-on-alert: false
# GitHub API token to make a commit comment
github-token: ${{ secrets.GITHUB_TOKEN }}
# Store the results and deploy GitHub pages automatically if the results are from main branch
auto-push: ${{ inputs.publish }}
comment-on-alert: true
max-items-in-chart: 20
14 changes: 14 additions & 0 deletions .github/workflows/bench_s3express.yml
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,20 @@ jobs:
auto-push: ${{ inputs.publish }}
comment-on-alert: true
max-items-in-chart: 20
- name: Check resource utilization
uses: benchmark-action/github-action-benchmark@v1
with:
tool: 'customSmallerIsBetter'
output-file-path: results/peak_mem_usage.json
benchmark-data-dir-path: dev/s3-express/bench/peak_mem_usage
alert-threshold: "200%"
fail-on-alert: false
# GitHub API token to make a commit comment
github-token: ${{ secrets.GITHUB_TOKEN }}
# Store the results and deploy GitHub pages automatically if the results are from main branch
auto-push: ${{ inputs.publish }}
comment-on-alert: true
max-items-in-chart: 20

latency-bench:
name: Benchmark (Latency)
Expand Down
4 changes: 4 additions & 0 deletions mountpoint-s3/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -96,3 +96,7 @@ path = "src/main.rs"
name = "mock-mount-s3"
path = "src/bin/mock-mount-s3.rs"
required-features = ["mountpoint-s3-client/mock"]

[[bin]]
name = "mount-s3-log-analyzer"
path = "src/bin/mount-s3-log-analyzer.rs"
9 changes: 9 additions & 0 deletions mountpoint-s3/scripts/fs_bench.sh
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,9 @@ read_benchmark () {
# run the benchmark
run_fio_job $job_file $bench_file $mount_dir $log_dir

# collect resource utilization metrics (peak memory usage)
cargo run --bin mount-s3-log-analyzer ${log_dir} ${results_dir}/${job_name}_peak_mem.json ${job_name}

cleanup

done
Expand Down Expand Up @@ -220,6 +223,9 @@ write_benchmark () {
# run the benchmark
run_fio_job $job_file $bench_file $mount_dir $log_dir

# collect resource utilization metrics (peak memory usage)
cargo run --bin mount-s3-log-analyzer ${log_dir} ${results_dir}/${job_name}_peak_mem.json ${job_name}

cleanup

done
Expand All @@ -229,4 +235,7 @@ read_benchmark
write_benchmark

# combine all bench results into one json file
echo "Throughput:"
jq -n '[inputs]' ${results_dir}/*_parsed.json | tee ${results_dir}/output.json
echo "Peak memory usage:"
jq -n '[inputs]' ${results_dir}/*_peak_mem.json | tee ${results_dir}/peak_mem_usage.json
6 changes: 6 additions & 0 deletions mountpoint-s3/scripts/fs_cache_bench.sh
Original file line number Diff line number Diff line change
Expand Up @@ -196,11 +196,17 @@ cache_benchmark () {
# run the benchmark
run_fio_job $job_file $bench_file $mount_dir $log_dir

# collect resource utilization metrics (peak memory usage)
cargo run --bin mount-s3-log-analyzer ${log_dir} ${results_dir}/${job_name}_peak_mem.json ${job_name}

cleanup
done
}

cache_benchmark

# combine all bench results into one json file
echo "Throughput:"
jq -n '[inputs]' ${results_dir}/*_parsed.json | tee ${results_dir}/output.json
echo "Peak memory usage:"
jq -n '[inputs]' ${results_dir}/*_peak_mem.json | tee ${results_dir}/peak_mem_usage.json
73 changes: 73 additions & 0 deletions mountpoint-s3/src/bin/mount-s3-log-analyzer.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
//! A helper binary for parsing Mountpoint logs and collecting metrics.
//! Currently, we are only interested in peak memory usage from `process.memory_usage`.
//!
//! This binary is intended only for use in testing and development of Mountpoint.

use std::{
fs::{self, File},
io::{BufRead, BufReader, BufWriter, Write},
path::PathBuf,
};

use clap::Parser;
use serde_json::json;

#[derive(Parser, Debug)]
struct CliArgs {
#[clap(help = "Log directory to analyze", value_name = "LOG_DIRECTORY")]
log_dir: PathBuf,

#[clap(help = "Output JSON file name", value_name = "OUTPUT_FILE")]
out_file: PathBuf,

#[clap(help = "Test name to be reported in JSON file")]
test_name: String,

#[clap(
help = "Log filter string [default: process.memory_usage]",
default_value = "process.memory_usage"
)]
log_filter_str: String,
}

fn main() -> anyhow::Result<()> {
let args = CliArgs::parse();
let paths = fs::read_dir(args.log_dir)?;

let mut metric_values: Vec<u64> = Vec::new();

// collect metrics from all log files in the given directory
for path in paths {
let path = path?;
let file_type = path.file_type()?;
if file_type.is_file() {
let file = File::open(path.path())?;
let reader = BufReader::new(file);

for line in reader.lines().flatten() {
if line.contains(&args.log_filter_str) {
let iter = line.split_whitespace();
if let Some(Ok(value)) = iter.last().map(|last| last.parse::<u64>()) {
metric_values.push(value);
}
}
}
}
}

let max = if let Some(value) = metric_values.iter().max() {
*value as f64 / (1024 * 1024) as f64
} else {
0.0
};
let contents = json!({
"name": args.test_name,
"value": max,
"unit": "MiB",
});
let file = File::create(args.out_file)?;
let mut writer = BufWriter::new(file);
serde_json::to_writer(&mut writer, &contents)?;
writer.flush()?;
Ok(())
}

0 comments on commit d2f5870

Please sign in to comment.