-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmetrics.py
105 lines (94 loc) · 3.78 KB
/
metrics.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
import numpy as np
def f1_score_single(seg, gt):
"""
f1 score between two images (segmented image and a ground truth image)
"""
clusters = np.unique(seg)
# print("clusters: {}".format(clusters))
# print("seg: {}".format(seg))
# print("gt: {}".format(gt))
# print(seg[0][34])
F = 0
for i in range(clusters.shape[0]):
# 'indices' is tuple of two arrays (dimension 0, dimension 1).
# e.g ([x1 x2 ..], [y1 y2 ..])
indices = np.where(seg == clusters[i])
# print("c = {} indices: {}".format(i, indices))
partitions, pcounts = np.unique(gt[indices], return_counts=True)
# print("partitions = {} pcounts: {}".format(partitions, pcounts))
# we know that Precision is the ratio of correctly predicted positive
# observations to the total predicted positive observations, so here we
# consider the putting same points in one cluster in the semgented
# image and ground truth image as correctly predicted cluster.
precision = np.max(pcounts) / partitions.shape[0]
# Recall is the ratio of correctly predicted positive observations to
# the all observations in actual class
max_value_index = np.argmax(pcounts) # the index of the most occurred value.
max_value = partitions[max_value_index] # the most occurred value.
max_value_occurrences = gt[gt==max_value].shape[0]
recall = (np.max(pcounts)) / max_value_occurrences
F += (2 * precision * recall) / (precision + recall)
F /= clusters.shape[0]
return F
def conditional_entropy(seg, gt):
"""
seg : segmented image
gt : ground truth image
"""
clusters = np.unique(seg).tolist()
gt_clusters = np.unique(gt)
H = 0
# for every cluster in seg
for cluster in clusters:
Hi = 0
indices = np.where(seg == cluster)
partitions = gt[indices]
# for every cluster in gt
for gt_cluster in gt_clusters:
nij = partitions[partitions == gt_cluster].shape[0]
ni = indices[0].shape[0]
if nij / ni != 0:
Hi += (nij / ni) * np.log2(nij / ni)
Hi *= -1
H += len(indices)*Hi/len(seg)
return H
def f1_score_all(images_segmented, gt, verbose=False):
"""
images_segmented : segmented images (N, d)
gt : ground truth images (M, d, 2)
"""
if verbose:
print(
"f1_score_all: images_segmented.shape = {}, gt.shape = {}"
.format(images_segmented.shape, gt.shape)
)
N = images_segmented.shape[0]
f1_score_all = []
for i in range(N):
f1_score_M = []
M = gt[i].shape[0]
for m in range(M): # for each human in ground truth data
f = f1_score_single(images_segmented[i], gt[i][m][:, :, 0])
if verbose:
# To report the measures M time for each image i
print("i = {}, m = {}, f1_score = {}".format(i, m, f))
f1_score_M.append(f)
# To report the average of the M trials
f1_score_all.append(np.average(f1_score_M))
f1_score_all = np.array(f1_score_all)
return f1_score_all
def conditional_entropy_all(images_segmented, gt, verbose=False):
N = images_segmented.shape[0]
entropy_N = []
for i in range(N):
M = gt[i].shape[0]
entropy_M = []
for j in range(M):
entropy = conditional_entropy(images_segmented[i], gt[i][j][:, :, 0])
if verbose:
# it was required to report the measures M time for each image I
print("i = {}, m = {}, entropy = {}".format(i, j, entropy))
entropy_M.append(entropy)
# To report the average of the M trials
entropy_N.append(np.average(entropy_M))
return np.array(entropy_N)