-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcalc_score.py
54 lines (43 loc) · 1.5 KB
/
calc_score.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
import json
import argparse
from report_benchmark import read_benchmark
result_columns = [
'determine_len',
'iterate',
'iterate_single_pass',
'search_lean_filter',
'search_rich_filter',
'select_by_id',
]
def main(args):
filter = json.loads(args.filter) if args.filter else None
df = read_benchmark(args.filename, filter)
df_cmp = read_benchmark(args.filename_cmp, filter)
benchmark = df[df.tool == 'signac'][result_columns].min()
compare = df_cmp[df_cmp.tool == 'signac'][result_columns].min()
# Calculate scores, where a score larger than 1 means the benchmark
# is faster than the comparison.
scores = compare / benchmark
print(scores)
print(scores.min())
if scores.min() < args.pass_above:
raise RuntimeError(
"The measured score ({}) is below the required minimal "
"score ({}).".format(scores.min(), args.pass_above))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'filename', default='benchmark.txt', nargs='?',
help="The collection that contains the benchmark data.")
parser.add_argument(
'filename_cmp', default='compare.txt', nargs='?')
parser.add_argument(
'-f', '--filter', type=str,
help="Select a subset of the data.")
parser.add_argument(
'--pass-above',
type=float,
default=0.90,
help="Specify a minimal score that we need to pass.")
args = parser.parse_args()
main(args)