-
Notifications
You must be signed in to change notification settings - Fork 8
/
Copy pathbenchmarking.py
104 lines (80 loc) · 3.11 KB
/
benchmarking.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
from __future__ import annotations
from typing import List, NamedTuple, Callable, TypeVar
import time
class BenchmarkContext:
def __init__(self) -> None:
self.start()
def start(self) -> None:
self.start_time = time.time()
def elapsed_time(self) -> float:
return time.time() - self.start_time
class BenchmarkInfo(NamedTuple):
name: str
module: str
perform: Callable[[BenchmarkContext], object]
# Argument is path to mypy repo we are benchmarking
prepare: Callable[[str | None], None] | None
compiled_only: bool
min_iterations: int | None
strip_outlier_runs: bool
stable_hash_seed: bool
benchmarks: List[BenchmarkInfo] = []
T = TypeVar("T")
def benchmark(
*,
prepare: Callable[[str | None], None] | None = None,
compiled_only: bool = False,
min_iterations: int | None = None,
strip_outlier_runs: bool = True,
stable_hash_seed: bool = False) -> Callable[[Callable[[], T]], Callable[[], T]]:
"""Define a benchmark.
Args:
prepare: If given, called once before running the benchmark to set up external state.
This does not run in the same process as the actual benchmark so it's mostly useful
for setting up file system state, data files, etc.
compiled_only: This benchmark only runs in compiled mode (no interpreted mode).
strip_outlier_runs: If True (default), aggressively try to strip outlier runs.
Otherwise, no (or few) outlier runs will be removed.
stable_hash_seed: If True, use predictable hash seed in CPython (it still varies
between runs, but it's not random)
"""
def outer_wrapper(func: Callable[[], T]) -> Callable[[], T]:
name = func_name(func)
def wrapper(ctx: BenchmarkContext) -> T:
return func()
benchmark = BenchmarkInfo(
name,
func.__module__,
wrapper,
prepare,
compiled_only,
min_iterations,
strip_outlier_runs,
stable_hash_seed,
)
benchmarks.append(benchmark)
return func
return outer_wrapper
# TODO: Merge with "benchmark"
def benchmark_with_context(
func: Callable[[BenchmarkContext], T]) -> Callable[[BenchmarkContext], T]:
name = func.__name__
if name.startswith('__mypyc_'):
name = name.replace('__mypyc_', '')
name = name.replace('_decorator_helper__', '')
benchmark = BenchmarkInfo(name, func.__module__, func, None, False, None, True, False)
benchmarks.append(benchmark)
return func
def run_once(benchmark_name: str) -> float:
for benchmark in benchmarks:
if benchmark.name == benchmark_name:
context = BenchmarkContext()
benchmark.perform(context)
return context.elapsed_time()
assert False, "unknown benchmark: %r" % benchmark_name
def func_name(func: Callable[..., object]) -> str:
name = func.__name__
if name.startswith('__mypyc_'):
name = name.replace('__mypyc_', '')
name = name.replace('_decorator_helper__', '')
return name