Skip to content

Commit

Permalink
Small fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
eagarvey-amd committed Oct 17, 2024
1 parent 5815e57 commit 158a672
Show file tree
Hide file tree
Showing 4 changed files with 32 additions and 10 deletions.
Empty file.
2 changes: 1 addition & 1 deletion models/turbine_models/custom_models/torchbench/cmd_opts.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def is_valid_file(arg):
p.add_argument(
"--model_lists",
type=Path,
nargs="*"
nargs="*",
help="path to a JSON list of models to benchmark. One or more paths.",
default=["torchbench_models.json", "timm_models.json", "torchvision_models.json"],
)
Expand Down
37 changes: 30 additions & 7 deletions models/turbine_models/custom_models/torchbench/export.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,9 +148,11 @@ def get_model_and_inputs(model_id, batch_size, tb_dir, tb_args, get_baseline=Fal
return model_name, model, forward_args


'''
"""
Imports models from torchbench model tooling, exports them with turbine AOT, and does simple benchmarking.
'''
"""


@torch.no_grad()
def benchmark_torchbench_model(
model_id,
Expand Down Expand Up @@ -199,7 +201,7 @@ def benchmark_torchbench_model(
)
return vmfb_path

if compare_vs_eager:
if compare_vs_eager:
model_name, model, forward_args, golden, baseline = get_model_and_inputs(
model_id, batch_size, tb_dir, tb_args, get_baseline=True
)
Expand Down Expand Up @@ -316,13 +318,28 @@ def _run_iter(runner, inputs):
res = runner.ctx.modules.compiled_torchbench_model["main"](*inputs)
return res, time.time() - start


def do_compare(shark_results, shark_latency, golden_results, golden_latency):
numerics_pass_fail = np.allclose(shark_results.to_host(), golden_results.clone().cpu().numpy(), rtol=1e-4, atol=1e-4)
numerics_pass_fail = np.allclose(
shark_results.to_host(),
golden_results.clone().cpu().numpy(),
rtol=1e-4,
atol=1e-4,
)
speedup = golden_latency / shark_latency
return speedup, numerics_pass_fail


def run_benchmark(
device, vmfb_path, weights_path, example_args, model_id, csv_path, iters, golden=None, baseline=None,
device,
vmfb_path,
weights_path,
example_args,
model_id,
csv_path,
iters,
golden=None,
baseline=None,
):
if "rocm" in device:
device = "hip" + device.split("rocm")[-1]
Expand All @@ -344,7 +361,13 @@ def run_benchmark(
if os.path.exists(csv_path):
needs_header = False
with open(csv_path, "a") as csvfile:
fieldnames = ["model", "avg_latency", "avg_iter_per_sec", "speedup_over_eager", "numerics"]
fieldnames = [
"model",
"avg_latency",
"avg_iter_per_sec",
"speedup_over_eager",
"numerics",
]
data = [
{
"model": model_id,
Expand Down Expand Up @@ -422,7 +445,7 @@ def run_main(model_id, args, tb_dir, tb_args):
from turbine_models.custom_models.torchbench.cmd_opts import args, unknown
import json

torchbench_models_dict = json.load(args.model_list_json
torchbench_models_dict = json.load(args.model_list_json)
for list in args.model_lists:
torchbench_models_dict = json.load(list)
with open(args.models_json, "r") as f:
Expand Down
3 changes: 1 addition & 2 deletions models/turbine_models/custom_models/torchbench/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,7 @@
"--iree-hip-waves-per-eu=2",
"--iree-execution-model=async-external",
],
"preprocess_default": [
]
"preprocess_default": [],
}
GFX11_flags = {
"all": [
Expand Down

0 comments on commit 158a672

Please sign in to comment.