Skip to content

Commit

Permalink
benchmarks: fix minor issues
Browse files Browse the repository at this point in the history
  • Loading branch information
Johannes Wünsche committed Jan 29, 2024
1 parent 1e20471 commit 5f241f6
Show file tree
Hide file tree
Showing 5 changed files with 22 additions and 49 deletions.
4 changes: 2 additions & 2 deletions betree/haura-benchmarks/haura-plots/haura_plots/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -238,10 +238,10 @@ def main():
print(USAGE_HELP)
sys.exit(2)
data = []
with open(f"{sys.argv[1]}/betree-metrics.jsonl", 'r', encoding="UTF-8") as metrics:
data = util.read_jsonl(metrics)

for path in sys.argv[1:]:
with open(f"{path}/betree-metrics.jsonl", 'r', encoding="UTF-8") as metrics:
data = util.read_jsonl(metrics)
# Plot actions
metrics_plots.plot_throughput(data, path)
metrics_plots.plot_tier_usage(data, path)
Expand Down
12 changes: 6 additions & 6 deletions betree/haura-benchmarks/haura-plots/haura_plots/metrics_plots.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,12 +38,12 @@ def plot_throughput(data, path):
def ms_to_string(time):
time: f"{int(time / 1000 / 60)}:{int(time / 1000) % 60:02d}"

epoch_formatted = list(map(ms_to_string, epoch))
axs[tier_id].set_xlabel("runtime (minute:seconds)")
axs[tier_id].set_xticks(epoch, epoch_formatted)
axs[tier_id].locator_params(tight=True, nbins=10)
axs[tier_id].set_ylabel(f"{util.num_to_name(tier_id)}\nMiB/s (I/0)")
label=' | '.join(path.split('/')[-2:])
epoch_formatted = list(map(ms_to_string, epoch))
axs[tier_id].set_xlabel("runtime (minute:seconds)")
axs[tier_id].set_xticks(epoch, epoch_formatted)
axs[tier_id].locator_params(tight=True, nbins=10)
axs[tier_id].set_ylabel(f"{util.num_to_name(tier_id)}\nMiB/s (I/0)")
label=' | '.join(path.split('/')[-2:])
fig.legend(loc="center right",handles=axs[0].get_lines())
# Epoch in seconds
fig.suptitle(f"Haura - {label}", y=0.98) # add title
Expand Down
2 changes: 1 addition & 1 deletion betree/haura-benchmarks/run.sh
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ function ingest() {
(
(
export BETREE__COMPRESSION="None"
run "default" ingest_hdd_none ingest "$ZIP_ARCHIVE"
run "$RUN_IDENT" ingest_hdd_none ingest "$ZIP_ARCHIVE"
)

for level in $(seq 1 16); do
Expand Down
7 changes: 5 additions & 2 deletions betree/haura-benchmarks/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,8 @@ enum Mode {
samples: u64,
min_size: u64,
max_size: u64,
#[structopt(default_value = "0.5")]
ratio: f64,
},
Zip {
n_clients: u32,
Expand Down Expand Up @@ -106,7 +108,7 @@ fn run_all(mode: Mode) -> Result<(), Box<dyn Error>> {
min_size,
max_size,
};
scientific_evaluation::run_read(client, config)?;
scientific_evaluation::run_read_write(client, config, 1.0)?;
control.database.write().sync()?;
}
Mode::EvaluationRW {
Expand All @@ -115,6 +117,7 @@ fn run_all(mode: Mode) -> Result<(), Box<dyn Error>> {
samples,
min_size,
max_size,
ratio,
} => {
let client = control.client(0, b"scientific_evaluation");
let config = scientific_evaluation::EvaluationConfig {
Expand All @@ -124,7 +127,7 @@ fn run_all(mode: Mode) -> Result<(), Box<dyn Error>> {
min_size,
max_size,
};
scientific_evaluation::run_read_write(client, config)?;
scientific_evaluation::run_read_write(client, config, ratio.clamp(0.0, 1.0))?;
control.database.write().sync()?;
}
Mode::Zip {
Expand Down
46 changes: 8 additions & 38 deletions betree/haura-benchmarks/src/scientific_evaluation.rs
Original file line number Diff line number Diff line change
Expand Up @@ -43,41 +43,11 @@ fn prepare_store(client: &mut Client, config: &EvaluationConfig) -> Result<(), B
Ok(())
}

pub fn run_read(mut client: Client, config: EvaluationConfig) -> Result<(), Box<dyn Error>> {
println!("running scientific_evaluation");
// Generate positions to read
let positions = gen_positions(&mut client, &config);
prepare_store(&mut client, &config)?;

let (obj, _info) = client
.object_store
.open_object_with_info(OBJ_NAME)?
.expect("Object was just created, but can't be opened!");

let start = std::time::Instant::now();
let mut buf = vec![0; config.max_size as usize];
let f = std::fs::OpenOptions::new()
.write(true)
.create(true)
.open("evaluation_read.csv")?;
let mut w = std::io::BufWriter::new(f);
w.write_all(b"offset,size,latency_ns\n")?;

for (pos, len) in positions.iter().cycle() {
// Read data as may be done in some evaluation where only parts of a
// database file are read in.
let t = std::time::Instant::now();
obj.read_at(&mut buf[..*len as usize], *pos).unwrap();
w.write_fmt(format_args!("{pos},{len},{}", t.elapsed().as_nanos()))?;
if start.elapsed().as_secs() >= config.runtime {
break;
}
}
w.flush()?;
Ok(())
}

pub fn run_read_write(mut client: Client, config: EvaluationConfig) -> Result<(), Box<dyn Error>> {
pub fn run_read_write(
mut client: Client,
config: EvaluationConfig,
rw: f64,
) -> Result<(), Box<dyn Error>> {
println!("running scientific_evaluation");
// Generate positions to read
let positions = gen_positions(&mut client, &config);
Expand All @@ -100,17 +70,17 @@ pub fn run_read_write(mut client: Client, config: EvaluationConfig) -> Result<()
for (pos, len) in positions.iter().cycle() {
// Read data as may be done in some evaluation where only parts of a
// database file are read in.
if client.rng.gen_bool(0.5) {
if client.rng.gen_bool(rw) {
let t = std::time::Instant::now();
obj.read_at(&mut buf[..*len as usize], *pos).unwrap();
w.write_fmt(format_args!("{pos},{len},{},r", t.elapsed().as_nanos()))?;
w.write_fmt(format_args!("{pos},{len},{},r\n", t.elapsed().as_nanos()))?;
} else {
cursor.seek(std::io::SeekFrom::Start(*pos))?;
let t = std::time::Instant::now();
with_random_bytes(&mut client.rng, *len, 8 * 1024 * 1024, |b| {
cursor.write_all(b)
})?;
w.write_fmt(format_args!("{pos},{len},{},w", t.elapsed().as_nanos()))?;
w.write_fmt(format_args!("{pos},{len},{},w\n", t.elapsed().as_nanos()))?;
}
if start.elapsed().as_secs() >= config.runtime {
break;
Expand Down

0 comments on commit 5f241f6

Please sign in to comment.