Skip to content

Commit

Permalink
Deploy preview for PR #42
Browse files Browse the repository at this point in the history
  • Loading branch information
actions-user committed Dec 5, 2024
1 parent b52bf6f commit 85b0dee
Show file tree
Hide file tree
Showing 14 changed files with 3,601 additions and 1,328 deletions.
51 changes: 49 additions & 2 deletions pr-previews/pr-42/404.html
Original file line number Diff line number Diff line change
Expand Up @@ -439,19 +439,66 @@



<li class="md-nav__item">
<a href="/titiler-cmr/benchmark.html" class="md-nav__link">








<li class="md-nav__item md-nav__item--nested">



<input class="md-nav__toggle md-toggle " type="checkbox" id="__nav_5" >


<label class="md-nav__link" for="__nav_5" id="__nav_5_label" tabindex="0">


<span class="md-ellipsis">
Performance Benchmarks
</span>


<span class="md-nav__icon md-icon"></span>
</label>

<nav class="md-nav" data-md-level="1" aria-labelledby="__nav_5_label" aria-expanded="false">
<label class="md-nav__title" for="__nav_5">
<span class="md-nav__icon md-icon"></span>
Performance Benchmarks
</label>
<ul class="md-nav__list" data-md-scrollfix>







<li class="md-nav__item">
<a href="/titiler-cmr/time_series_performance_benchmarks/" class="md-nav__link">


<span class="md-ellipsis">
Time series performance benchmarks
</span>


</a>
</li>




</ul>
</nav>

</li>



</ul>
</nav>
Expand Down
86 changes: 86 additions & 0 deletions pr-previews/pr-42/benchmark_analysis/benchmark_analysis.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
"""Functions for processing and plotting the time series benchmark results"""

import json
from collections import defaultdict
from typing import Dict

import pandas as pd
import plotly.express as px


def process_benchmark_data() -> Dict[str, pd.DataFrame]:
"""Read the benchmark json and convert into a dictionary of dataframes"""
with open("../benchmark.json", "r") as f:
benchmarks = json.load(f)["benchmarks"]

records = defaultdict(list)
for bench in benchmarks:
record = {
"mean_time": bench["stats"]["mean"],
"stddev": bench["stats"]["stddev"],
**bench["extra_info"],
**bench["params"],
}
record.update(bench["params"])
records[bench["group"]].append(record)

dfs = {
group: pd.DataFrame(records).sort_values(by="bbox_size")
for group, records in records.items()
}

for group, df in dfs.items():
bbox_dims = sorted(
df["bbox_dims"].unique(), key=lambda x: float(x.split("x")[0]) * -1
)

df["bbox_dims"] = pd.Categorical(
df["bbox_dims"], categories=bbox_dims, ordered=True
)

dfs[group] = df

return dfs


def plot_line_with_error_bars(df: pd.DataFrame, **kwargs):
"""Create line plot with vertical error bars"""
fig = px.line(
df,
x="num_timepoints",
y="mean_time",
error_y="stddev",
labels={
"mean_time": "mean response time (seconds)",
"num_timepoints": "number of points in time series",
},
**kwargs,
)

return fig


def plot_error_rate_heatmap(
df: pd.DataFrame,
x: str,
y: str,
z: str,
labels: Dict[str, str],
title: str,
):
"""Plot error rate as a heatmap"""
# Create the pivot table for heatmap
data = df[[x, y, z]].pivot(index=y, columns=x, values=z)

# Create the faceted heatmap using plotly
fig = px.imshow(
data,
labels=labels,
title=title,
)

return fig


# Load and process the data
dfs = process_benchmark_data()
Loading

0 comments on commit 85b0dee

Please sign in to comment.