18
18
from monai .transforms import ClipIntensityPercentiles
19
19
from monai .transforms .utils import soft_clip
20
20
from monai .transforms .utils_pytorch_numpy_unification import clip , percentile
21
+ from monai .utils .type_conversion import convert_to_tensor
21
22
from tests .utils import TEST_NDARRAYS , NumpyImageTestCase2D , NumpyImageTestCase3D , assert_allclose
22
23
23
24
25
+ def test_hard_clip_func (im , lower , upper ):
26
+ im_t = convert_to_tensor (im )
27
+ if lower is None :
28
+ upper = percentile (im_t , upper )
29
+ elif upper is None :
30
+ lower = percentile (im_t , lower )
31
+ else :
32
+ lower , upper = percentile (im_t , (lower , upper ))
33
+ return clip (im_t , lower , upper )
34
+
35
+
36
+ def test_soft_clip_func (im , lower , upper ):
37
+ im_t = convert_to_tensor (im )
38
+ if lower is None :
39
+ upper = percentile (im_t , upper )
40
+ elif upper is None :
41
+ lower = percentile (im_t , lower )
42
+ else :
43
+ lower , upper = percentile (im_t , (lower , upper ))
44
+ return soft_clip (im_t , minv = lower , maxv = upper , sharpness_factor = 1.0 , dtype = torch .float32 )
45
+
46
+
24
47
class TestClipIntensityPercentiles2D (NumpyImageTestCase2D ):
25
48
26
49
@parameterized .expand ([[p ] for p in TEST_NDARRAYS ])
27
50
def test_hard_clipping_two_sided (self , p ):
28
51
hard_clipper = ClipIntensityPercentiles (upper = 95 , lower = 5 )
29
52
im = p (self .imt )
30
53
result = hard_clipper (im )
31
- lower , upper = percentile (im , (5 , 95 ))
32
- expected = clip (im , lower , upper )
54
+ expected = test_hard_clip_func (im , 5 , 95 )
33
55
assert_allclose (result , p (expected ), type_test = "tensor" , rtol = 1e-4 , atol = 0 )
34
56
35
57
@parameterized .expand ([[p ] for p in TEST_NDARRAYS ])
36
58
def test_hard_clipping_one_sided_high (self , p ):
37
59
hard_clipper = ClipIntensityPercentiles (upper = 95 , lower = None )
38
60
im = p (self .imt )
39
61
result = hard_clipper (im )
40
- lower , upper = percentile (im , (0 , 95 ))
41
- expected = clip (im , lower , upper )
62
+ expected = test_hard_clip_func (im , 0 , 95 )
42
63
assert_allclose (result , p (expected ), type_test = "tensor" , rtol = 1e-4 , atol = 0 )
43
64
44
65
@parameterized .expand ([[p ] for p in TEST_NDARRAYS ])
45
66
def test_hard_clipping_one_sided_low (self , p ):
46
67
hard_clipper = ClipIntensityPercentiles (upper = None , lower = 5 )
47
68
im = p (self .imt )
48
69
result = hard_clipper (im )
49
- lower , upper = percentile (im , (5 , 100 ))
50
- expected = clip (im , lower , upper )
70
+ expected = test_hard_clip_func (im , 5 , 100 )
51
71
assert_allclose (result , p (expected ), type_test = "tensor" , rtol = 1e-4 , atol = 0 )
52
72
53
73
@parameterized .expand ([[p ] for p in TEST_NDARRAYS ])
54
74
def test_soft_clipping_two_sided (self , p ):
55
75
soft_clipper = ClipIntensityPercentiles (upper = 95 , lower = 5 , sharpness_factor = 1.0 )
56
76
im = p (self .imt )
57
77
result = soft_clipper (im )
58
- lower , upper = percentile (im , (5 , 95 ))
59
- expected = soft_clip (im , sharpness_factor = 1.0 , minv = lower , maxv = upper , dtype = torch .float32 )
60
- # the rtol is set to 1e-6 because the logaddexp function used in softplus is not stable accross torch and numpy
78
+ expected = test_soft_clip_func (im , 5 , 95 )
79
+ # the rtol is set to 1e-4 because the logaddexp function used in softplus is not stable accross torch and numpy
61
80
assert_allclose (result , p (expected ), type_test = "tensor" , rtol = 1e-4 , atol = 0 )
62
81
63
82
@parameterized .expand ([[p ] for p in TEST_NDARRAYS ])
64
83
def test_soft_clipping_one_sided_high (self , p ):
65
84
soft_clipper = ClipIntensityPercentiles (upper = 95 , lower = None , sharpness_factor = 1.0 )
66
85
im = p (self .imt )
67
86
result = soft_clipper (im )
68
- upper = percentile (im , 95 )
69
- expected = soft_clip (im , sharpness_factor = 1.0 , minv = None , maxv = upper , dtype = torch .float32 )
70
- # the rtol is set to 5e-5 because the logaddexp function used in softplus is not stable accross torch and numpy
87
+ expected = test_soft_clip_func (im , None , 95 )
88
+ # the rtol is set to 1e-4 because the logaddexp function used in softplus is not stable accross torch and numpy
71
89
assert_allclose (result , p (expected ), type_test = "tensor" , rtol = 1e-4 , atol = 0 )
72
90
73
91
@parameterized .expand ([[p ] for p in TEST_NDARRAYS ])
74
92
def test_soft_clipping_one_sided_low (self , p ):
75
93
soft_clipper = ClipIntensityPercentiles (upper = None , lower = 5 , sharpness_factor = 1.0 )
76
94
im = p (self .imt )
77
95
result = soft_clipper (im )
78
- lower = percentile (im , 5 )
79
- expected = soft_clip (im , sharpness_factor = 1.0 , minv = lower , maxv = None , dtype = torch .float32 )
80
- # the rtol is set to 1e-6 because the logaddexp function used in softplus is not stable accross torch and numpy
96
+ expected = test_soft_clip_func (im , 5 , None )
97
+ # the rtol is set to 1e-4 because the logaddexp function used in softplus is not stable accross torch and numpy
81
98
assert_allclose (result , p (expected ), type_test = "tensor" , rtol = 1e-4 , atol = 0 )
82
99
83
100
@parameterized .expand ([[p ] for p in TEST_NDARRAYS ])
84
101
def test_channel_wise (self , p ):
85
102
clipper = ClipIntensityPercentiles (upper = 95 , lower = 5 , channel_wise = True )
86
103
im = p (self .imt )
87
104
result = clipper (im )
88
- for i , c in enumerate (im ):
105
+ im_t = convert_to_tensor (self .imt )
106
+ for i , c in enumerate (im_t ):
89
107
lower , upper = percentile (c , (5 , 95 ))
90
108
expected = clip (c , lower , upper )
91
109
assert_allclose (result [i ], p (expected ), type_test = "tensor" , rtol = 1e-4 , atol = 0 )
@@ -118,35 +136,31 @@ def test_hard_clipping_two_sided(self, p):
118
136
hard_clipper = ClipIntensityPercentiles (upper = 95 , lower = 5 )
119
137
im = p (self .imt )
120
138
result = hard_clipper (im )
121
- lower , upper = percentile (im , (5 , 95 ))
122
- expected = clip (im , lower , upper )
139
+ expected = test_hard_clip_func (im , 5 , 95 )
123
140
assert_allclose (result , p (expected ), type_test = "tensor" , rtol = 1e-4 , atol = 0 )
124
141
125
142
@parameterized .expand ([[p ] for p in TEST_NDARRAYS ])
126
143
def test_hard_clipping_one_sided_high (self , p ):
127
144
hard_clipper = ClipIntensityPercentiles (upper = 95 , lower = None )
128
145
im = p (self .imt )
129
146
result = hard_clipper (im )
130
- lower , upper = percentile (im , (0 , 95 ))
131
- expected = clip (im , lower , upper )
147
+ expected = test_hard_clip_func (im , 0 , 95 )
132
148
assert_allclose (result , p (expected ), type_test = "tensor" , rtol = 1e-4 , atol = 0 )
133
149
134
150
@parameterized .expand ([[p ] for p in TEST_NDARRAYS ])
135
151
def test_hard_clipping_one_sided_low (self , p ):
136
152
hard_clipper = ClipIntensityPercentiles (upper = None , lower = 5 )
137
153
im = p (self .imt )
138
154
result = hard_clipper (im )
139
- lower , upper = percentile (im , (5 , 100 ))
140
- expected = clip (im , lower , upper )
155
+ expected = test_hard_clip_func (im , 5 , 100 )
141
156
assert_allclose (result , p (expected ), type_test = "tensor" , rtol = 1e-4 , atol = 0 )
142
157
143
158
@parameterized .expand ([[p ] for p in TEST_NDARRAYS ])
144
159
def test_soft_clipping_two_sided (self , p ):
145
160
soft_clipper = ClipIntensityPercentiles (upper = 95 , lower = 5 , sharpness_factor = 1.0 )
146
161
im = p (self .imt )
147
162
result = soft_clipper (im )
148
- lower , upper = percentile (im , (5 , 95 ))
149
- expected = soft_clip (im , sharpness_factor = 1.0 , minv = lower , maxv = upper , dtype = torch .float32 )
163
+ expected = test_soft_clip_func (im , 5 , 95 )
150
164
# the rtol is set to 1e-4 because the logaddexp function used in softplus is not stable accross torch and numpy
151
165
assert_allclose (result , p (expected ), type_test = "tensor" , rtol = 1e-4 , atol = 0 )
152
166
@@ -155,27 +169,26 @@ def test_soft_clipping_one_sided_high(self, p):
155
169
soft_clipper = ClipIntensityPercentiles (upper = 95 , lower = None , sharpness_factor = 1.0 )
156
170
im = p (self .imt )
157
171
result = soft_clipper (im )
158
- upper = percentile (im , 95 )
159
- expected = soft_clip (im , sharpness_factor = 1.0 , minv = None , maxv = upper , dtype = torch .float32 )
160
- # the rtol is set to 5e-5 because the logaddexp function used in softplus is not stable accross torch and numpy
172
+ expected = test_soft_clip_func (im , None , 95 )
173
+ # the rtol is set to 1e-4 because the logaddexp function used in softplus is not stable accross torch and numpy
161
174
assert_allclose (result , p (expected ), type_test = "tensor" , rtol = 1e-4 , atol = 0 )
162
175
163
176
@parameterized .expand ([[p ] for p in TEST_NDARRAYS ])
164
177
def test_soft_clipping_one_sided_low (self , p ):
165
178
soft_clipper = ClipIntensityPercentiles (upper = None , lower = 5 , sharpness_factor = 1.0 )
166
179
im = p (self .imt )
167
180
result = soft_clipper (im )
168
- lower = percentile (im , 5 )
169
- expected = soft_clip (im , sharpness_factor = 1.0 , minv = lower , maxv = None , dtype = torch .float32 )
170
- # the rtol is set to 1e-6 because the logaddexp function used in softplus is not stable accross torch and numpy
181
+ expected = test_soft_clip_func (im , 5 , None )
182
+ # the rtol is set to 1e-4 because the logaddexp function used in softplus is not stable accross torch and numpy
171
183
assert_allclose (result , p (expected ), type_test = "tensor" , rtol = 1e-4 , atol = 0 )
172
184
173
185
@parameterized .expand ([[p ] for p in TEST_NDARRAYS ])
174
186
def test_channel_wise (self , p ):
175
187
clipper = ClipIntensityPercentiles (upper = 95 , lower = 5 , channel_wise = True )
176
188
im = p (self .imt )
177
189
result = clipper (im )
178
- for i , c in enumerate (im ):
190
+ im_t = convert_to_tensor (self .imt )
191
+ for i , c in enumerate (im_t ):
179
192
lower , upper = percentile (c , (5 , 95 ))
180
193
expected = clip (c , lower , upper )
181
194
assert_allclose (result [i ], p (expected ), type_test = "tensor" , rtol = 1e-4 , atol = 0 )
0 commit comments