-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmetrics.py
74 lines (54 loc) · 1.4 KB
/
metrics.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
def calculate_metrics(y_true:set, y_pred:set):
tp = 0
fp = 0
fn = 0
for elem in y_true:
if elem in y_pred:
tp+=1
else:
fn+=1
fp = len(y_pred - y_true)
return tp, fn, fp
def precision(tp, fp):
if tp + fp == 0:
return 1
else:
return tp/(tp+fp)
def recall(tp, fn):
if tp + fn == 0:
return 1
else:
return tp/(tp+fn)
def fscore(precision, recall):
return (2*precision*recall) / (precision+recall)
def metrics(tp, tn, fp, fn):
"""
In NER True negative is not used, so on accuracy also not
"""
p = precision(tp, fp)
r = recall(tp, fn)
if p == 0 and r == 0:
f = 0
else:
f = fscore(p, r)
return p, r, f
def show_eval(tp, fn, fp):
p, r, f = metrics(tp, 0, fp, fn)
print("Precision: {:.2f}".format(p))
print("Recall: {:.2f}".format(r))
print("F1 Score: {:.2f}".format(f))
print("-"*20)
return p, r, f
def eval_results(results, log=True):
bad = []
total_tp, total_fn, total_fp = 0, 0, 0
for y_true, y_pred in results:
tp, fn, fp = calculate_metrics(y_true, y_pred)
total_tp+=tp
total_fn+=fn
total_fp+=fp
if log:
if fn > 0 or fp > 0:
bad.append([sorted(list(y_true)), sorted(list(y_pred))])
show_eval(total_tp, total_fn, total_fp)
return bad