diff --git a/test/benchmarks/run_tests.sh b/test/benchmarks/run_tests.sh index 075c71deea28..eef90b613478 100755 --- a/test/benchmarks/run_tests.sh +++ b/test/benchmarks/run_tests.sh @@ -39,7 +39,9 @@ function run_make_tests { } function run_python_tests { + python3 "$CDIR/test_experiment_runner.py" python3 "$CDIR/test_benchmark_experiment.py" + python3 "$CDIR/test_benchmark_model.py" } function run_tests { diff --git a/test/benchmarks/test_benchmark_model.py b/test/benchmarks/test_benchmark_model.py new file mode 100644 index 000000000000..9946384be9fe --- /dev/null +++ b/test/benchmarks/test_benchmark_model.py @@ -0,0 +1,18 @@ +import unittest + +from benchmark_model import BenchmarkModel + + +class BenchmarkModelTest(unittest.TestCase): + + def test_to_dict(self): + bm = BenchmarkModel("torchbench or other", "super_deep_model", + "placeholder") + actual = bm.to_dict() + self.assertEqual(2, len(actual)) + self.assertEqual("torchbench or other", actual["suite_name"]) + self.assertEqual("super_deep_model", actual["model_name"]) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/benchmarks/test_experiment_runner.py b/test/benchmarks/test_experiment_runner.py new file mode 100644 index 000000000000..ad0c6d156c9f --- /dev/null +++ b/test/benchmarks/test_experiment_runner.py @@ -0,0 +1,32 @@ +import unittest + +import subprocess + +import experiment_runner + +EXPERIMENT_RUNNER_PY = experiment_runner.__file__ + + +class ExperimentRunnerTest(unittest.TestCase): + + def test_alexnet_dry_run(self): + child = subprocess.run([ + "python", EXPERIMENT_RUNNER_PY, "--dynamo=openxla", "--dynamo=inductor", + "--xla=PJRT", "--xla=None", "--test=eval", "--test=train", + "--suite-name=torchbench", "--accelerator=cpu", "--filter=^alexnet$", + "--dry-run" + ], + capture_output=True, + text=True) + expected_in_stderr = [ + "Number of selected experiment configs: 2", + "Number of selected model configs: 1", + "'--experiment-config={\"accelerator\": \"cpu\", \"xla\": \"PJRT\", \"xla_flags\": null, \"dynamo\": \"openxla\", \"test\": \"eval\"}', '--model-config={\"model_name\": \"alexnet\"}'", + "'--experiment-config={\"accelerator\": \"cpu\", \"xla\": \"PJRT\", \"xla_flags\": null, \"dynamo\": \"openxla\", \"test\": \"train\"}', '--model-config={\"model_name\": \"alexnet\"}'", + ] + for expected in expected_in_stderr: + self.assertTrue(expected in child.stderr) + + +if __name__ == '__main__': + unittest.main()