This repository has been archived by the owner on Jul 1, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 275
/
Copy pathmeters_accuracy_meter_test.py
195 lines (156 loc) · 7.54 KB
/
meters_accuracy_meter_test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from classy_vision import meters
from classy_vision.meters import AccuracyMeter
from test.generic.meter_test_utils import ClassificationMeterTest
class TestAccuracyMeter(ClassificationMeterTest):
def test_accuracy_meter_registry(self):
accuracy_meter = meters.build_meter({"name": "accuracy", "topk": [1, 2]})
self.assertTrue(isinstance(accuracy_meter, AccuracyMeter))
def test_single_meter_update_and_reset(self):
"""
This test verifies that the meter works as expected on a single
update + reset + same single update.
"""
meter = AccuracyMeter(topk=[1, 2])
# Batchsize = 3, num classes = 3, score is a value in {1, 2,
# 3}...3 is the highest score
model_output = torch.tensor([[3, 2, 1], [3, 1, 2], [1, 3, 2]])
# Class 0 is the correct class for sample 1, class 2 for sample 2, etc
target = torch.tensor([0, 1, 2])
# Only the first sample has top class correct, first and third
# sample have correct class in top 2
expected_value = {"top_1": 1 / 3.0, "top_2": 2 / 3.0}
self.meter_update_and_reset_test(meter, model_output, target, expected_value)
def test_double_meter_update_and_reset(self):
meter = AccuracyMeter(topk=[1, 2])
# Batchsize = 3, num classes = 3, score is a value in {1, 2,
# 3}...3 is the highest score...two batches in this test
model_outputs = [
torch.tensor([[3, 2, 1], [3, 1, 2], [1, 3, 2]]),
torch.tensor([[3, 2, 1], [1, 3, 2], [1, 3, 2]]),
]
# Class 0 is the correct class for sample 1, class 2 for
# sample 2, etc, in both batches
targets = [torch.tensor([0, 1, 2]), torch.tensor([0, 1, 2])]
# First batch has top-1 accuracy of 1/3.0, top-2 accuracy of 2/3.0
# Second batch has top-1 accuracy of 2/3.0, top-2 accuracy of 3/3.0
expected_value = {"top_1": 3 / 6.0, "top_2": 5 / 6.0}
self.meter_update_and_reset_test(meter, model_outputs, targets, expected_value)
def test_single_meter_update_and_reset_onehot(self):
"""
This test verifies that the meter works as expected on a single
update + reset + same single update with onehot target.
"""
meter = AccuracyMeter(topk=[1, 2])
# Batchsize = 3, num classes = 3, score is a value in {1, 2,
# 3}...3 is the highest score
model_output = torch.tensor([[3, 2, 1], [3, 1, 2], [1, 3, 2]])
# Class 0 is the correct class for sample 1, class 2 for sample 2, etc
target = torch.tensor([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
# Only the first sample has top class correct, first and third
# sample have correct class in top 2
expected_value = {"top_1": 1 / 3.0, "top_2": 2 / 3.0}
self.meter_update_and_reset_test(meter, model_output, target, expected_value)
def test_single_meter_update_and_reset_multilabel(self):
"""
This test verifies that the meter works as expected on a single
update + reset + same single update with multilabel target.
"""
meter = AccuracyMeter(topk=[1, 2])
# Batchsize = 7, num classes = 3, score is a value in {1, 2,
# 3}...3 is the highest score
model_output = torch.tensor(
[
[3, 2, 1],
[3, 1, 2],
[1, 3, 2],
[1, 2, 3],
[2, 1, 3],
[2, 3, 1],
[1, 3, 2],
]
)
target = torch.tensor(
[
[1, 1, 0],
[0, 0, 1],
[1, 0, 0],
[0, 0, 1],
[0, 1, 1],
[1, 1, 1],
[1, 0, 1],
]
)
# 1st, 4th, 5th, 6th sample has top class correct, 2nd and 7th have at least
# one correct class in top 2.
expected_value = {"top_1": 4 / 7.0, "top_2": 6 / 7.0}
self.meter_update_and_reset_test(meter, model_output, target, expected_value)
def test_meter_invalid_model_output(self):
meter = AccuracyMeter(topk=[1, 2])
# This model output has 3 dimensions instead of expected 2
model_output = torch.tensor(
[[[3, 2, 1], [1, 2, 3]], [[-1, -3, -4], [-10, -90, -100]]]
)
target = torch.tensor([0, 1, 2])
self.meter_invalid_meter_input_test(meter, model_output, target)
def test_meter_invalid_target(self):
meter = AccuracyMeter(topk=[1, 2])
model_output = torch.tensor([[3, 2, 1], [3, 1, 2], [1, 3, 2]])
# Target has 3 dimensions instead of expected 1 or 2
target = torch.tensor([[[0, 1, 2], [0, 1, 2]]])
self.meter_invalid_meter_input_test(meter, model_output, target)
def test_meter_invalid_topk(self):
meter = AccuracyMeter(topk=[1, 5])
model_output = torch.tensor([[3, 2, 1], [3, 1, 2], [1, 3, 2]])
target = torch.tensor([0, 1, 2])
self.meter_invalid_meter_input_test(meter, model_output, target)
def test_meter_get_set_classy_state_test(self):
# In this test we update meter0 with model_output0 & target0
# and we update meter1 with model_output1 & target1 then
# transfer the state from meter1 to meter0 and validate they
# give same expected value.
# Expected value is the expected value of meter1
meters = [AccuracyMeter(topk=[1, 2]), AccuracyMeter(topk=[1, 2])]
# Batchsize = 3, num classes = 3, score is a value in {1, 2,
# 3}...3 is the highest score
model_outputs = [
torch.tensor([[1, 2, 3], [1, 2, 3], [2, 3, 1]]),
torch.tensor([[3, 2, 1], [3, 1, 2], [1, 3, 2]]),
]
# Class 0 is the correct class for sample 1, class 2 for sample 2, etc
targets = [torch.tensor([0, 1, 2]), torch.tensor([0, 1, 2])]
# Value for second update
expected_value = {"top_1": 1 / 3.0, "top_2": 2 / 3.0}
self.meter_get_set_classy_state_test(
meters, model_outputs, targets, expected_value
)
def test_meter_distributed(self):
# Meter0 will execute on one process, Meter1 on the other
meters = [AccuracyMeter(topk=[1, 2]), AccuracyMeter(topk=[1, 2])]
# Batchsize = 3, num classes = 3, score is a value in {1, 2,
# 3}...3 is the highest score
model_outputs = [
torch.tensor([[3, 2, 1], [3, 1, 2], [1, 3, 2]]), # Meter 0
torch.tensor([[3, 2, 1], [1, 3, 2], [1, 3, 2]]), # Meter 1
torch.tensor([[3, 2, 1], [3, 1, 2], [1, 3, 2]]), # Meter 0
torch.tensor([[3, 2, 1], [1, 3, 2], [1, 3, 2]]), # Meter 1
]
# Class 0 is the correct class for sample 1, class 2 for sample 2, etc
targets = [
torch.tensor([0, 1, 2]), # Meter 0
torch.tensor([0, 1, 2]), # Meter 1
torch.tensor([0, 1, 2]), # Meter 0
torch.tensor([0, 1, 2]), # Meter 1
]
# In first two updates there are 3 correct top-2, 5 correct in top 2
# The same occurs in the second two updates and is added to first
expected_values = [
{"top_1": 3 / 6.0, "top_2": 5 / 6.0}, # After one update to each meter
{"top_1": 6 / 12.0, "top_2": 10 / 12.0}, # After two updates to each meter
]
self.meter_distributed_test(meters, model_outputs, targets, expected_values)