1
1
import math
2
- from abc import ABCMeta , abstractmethod
2
+ from abc import ABC , abstractmethod
3
3
from pathlib import Path
4
4
from typing import Union
5
5
6
6
import numpy as np
7
7
import rasterio
8
- import torch
9
8
from rasterio .windows import Window
10
9
11
10
12
11
# Implementation of paper:
13
12
# https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0229839#pone.0229839.ref007
14
13
15
14
16
- class Kernel (torch .nn .Module , metaclass = ABCMeta ):
17
- def __init__ (
18
- self , size : int = 512 , device : torch .device .type = torch .device ("cpu" )
19
- ):
15
+ class Kernel (ABC ):
16
+ def __init__ (self , size : int = 512 ):
20
17
super ().__init__ ()
21
18
self .size = size
22
- self .wi = self ._init_wi (size , device )
23
- self .wj = self .wi .clone ()
19
+ self .wi = self ._init_wi (size )
20
+ self .wj = self .wi .copy ()
24
21
25
22
@staticmethod
26
23
@abstractmethod
27
- def _init_wi (size : int , device : torch . device . type ) -> torch . Tensor :
24
+ def _init_wi (size : int ) -> np . ndarray :
28
25
raise NotImplementedError
29
26
30
27
def get_kernel (
31
- self ,
32
- top : bool = False ,
33
- bottom : bool = False ,
34
- left : bool = False ,
35
- right : bool = False ,
36
- ) -> torch . Tensor :
37
- wi , wj = self .wi .clone (), self .wj .clone ()
28
+ self ,
29
+ top : bool = False ,
30
+ bottom : bool = False ,
31
+ left : bool = False ,
32
+ right : bool = False ,
33
+ ) -> "np.ndarray" :
34
+ wi , wj = self .wi .copy (), self .wj .copy ()
38
35
39
36
if top :
40
37
wi [: self .size // 2 ] = 1
41
38
if bottom :
42
- wi [self .size // 2 :] = 1
39
+ wi [self .size // 2 :] = 1
43
40
44
41
if left :
45
42
wj [: self .size // 2 ] = 1
46
43
if right :
47
- wj [self .size // 2 :] = 1
44
+ wj [self .size // 2 :] = 1
48
45
49
- return wi . unsqueeze ( 1 ) @ wj . unsqueeze ( 0 )
46
+ return np . expand_dims ( wi , 1 ) @ np . expand_dims ( wj , 0 )
50
47
51
48
def forward (
52
- self ,
53
- x : torch . Tensor ,
54
- top : bool = False ,
55
- bottom : bool = False ,
56
- left : bool = False ,
57
- right : bool = False ,
58
- ) -> torch . Tensor :
49
+ self ,
50
+ x : "np.ndarray" ,
51
+ top : bool = False ,
52
+ bottom : bool = False ,
53
+ left : bool = False ,
54
+ right : bool = False ,
55
+ ) -> np . ndarray :
59
56
kernel = self .get_kernel (top = top , bottom = bottom , left = left , right = right )
60
- return torch . mul (x , kernel )
57
+ return np . multiply (x , kernel )
61
58
62
59
63
60
class HannKernel (Kernel ):
64
61
@staticmethod
65
- def _init_wi (size : int , device : torch . device . type ) -> torch . Tensor :
66
- i = torch .arange (0 , size , device = device )
67
- return (1 - (( 2 * np .pi * i ) / (size - 1 )). cos ( )) / 2
62
+ def _init_wi (size : int ) -> np . ndarray :
63
+ i = np .arange (0 , size )
64
+ return (1 - np . cos ((( 2 * np .pi * i ) / (size - 1 )))) / 2
68
65
69
66
70
67
class BartlettHannKernel (Kernel ):
71
68
@staticmethod
72
- def _init_wi (size : int , device : torch . device . type ) -> torch . Tensor :
69
+ def _init_wi (size : int ) -> np . ndarray :
73
70
# Follows original paper:
74
71
# Ha YH, Pearce JA. A new window and comparison to standard windows.
75
72
# IEEE Transactions on Acoustics, Speech, and Signal Processing.
76
73
# 1989;37(2):298–301.
77
- i = torch .arange (0 , size , device = device )
74
+ i = np .arange (0 , size )
78
75
return (
79
- 0.62
80
- - 0.48 * ( i / size - 1 / 2 ). abs ( )
81
- + 0.38 * ( 2 * np .pi * ( i / size - 1 / 2 ). abs ()). cos ( )
76
+ 0.62
77
+ - 0.48 * np . abs (( i / size - 1 / 2 ))
78
+ + 0.38 * np . cos (( 2 * np .pi * np . abs (( i / size - 1 / 2 ))) )
82
79
)
83
80
84
81
85
82
class TriangularKernel (Kernel ):
86
83
@staticmethod
87
- def _init_wi (size : int , device : torch . device . type ) -> torch . Tensor :
88
- i = torch .arange (0 , size , device = device )
89
- return 1 - ( 2 * i / size - 1 ). abs ( )
84
+ def _init_wi (size : int ) -> np . ndarray :
85
+ i = np .arange (0 , size )
86
+ return 1 - np . abs (( 2 * i / size - 1 ))
90
87
91
88
92
89
class BlackmanKernel (Kernel ):
93
90
@staticmethod
94
- def _init_wi (size : int , device : torch . device . type ) -> torch . Tensor :
95
- i = torch .arange (0 , size , device = device )
91
+ def _init_wi (size : int ) -> np . ndarray :
92
+ i = np .arange (0 , size )
96
93
return (
97
- 0.42
98
- - 0.5 * ( 2 * np .pi * i / size ). cos ( )
99
- + 0.08 * ( 4 * np .pi * i / size ). cos ( )
94
+ 0.42
95
+ - 0.5 * np . cos (( 2 * np .pi * i / size ))
96
+ + 0.08 * np . cos (( 4 * np .pi * i / size ))
100
97
)
101
98
102
99
103
- class TorchMemoryRegister (object ):
100
+ class NumpyMemoryRegister (object ):
104
101
def __init__ (
105
- self ,
106
- image_path : Union [str , Path ],
107
- reg_depth : int ,
108
- window_size : int ,
109
- device : torch .device .type ,
102
+ self ,
103
+ image_path : Union [str , Path ],
104
+ reg_depth : int ,
105
+ window_size : int ,
110
106
):
111
107
super ().__init__ ()
112
108
self .image_path = Path (image_path )
113
109
self .n = reg_depth
114
110
self .ws = window_size
115
111
self .hws = window_size // 2
116
- self .device = device
117
112
118
113
# Copy metadata from img
119
114
with rasterio .open (str (image_path ), "r" ) as src :
120
115
src_width = src .width
121
116
122
117
self .height = self .ws
123
118
self .width = (math .ceil (src_width / self .ws ) * self .ws ) + self .hws
124
- self .register = torch .zeros (
125
- (self .n , self .height , self .width ), device = self .device
126
- )
119
+ self .register = np .zeros ((self .n , self .height , self .width ))
127
120
128
121
@property
129
122
def _zero_chip (self ):
130
- return torch .zeros (
131
- (self .n , self .hws , self .hws ), dtype = torch .float , device = self .device
132
- )
123
+ return np .zeros ((self .n , self .hws , self .hws ), dtype = float )
133
124
134
- def step (self , new_logits : torch . Tensor , img_window : Window ):
125
+ def step (self , new_logits : "np.ndarray" , img_window : Window ):
135
126
# 1. Read data from the registry to update with the new logits
136
127
# |a|b| |
137
128
# |c|d| |
138
- with torch .no_grad ():
139
- logits_abcd = self .register [
140
- :, :, img_window .col_off : img_window .col_off + self .ws
141
- ].clone ()
142
- logits_abcd += new_logits
129
+ logits_abcd = self .register [:, :,
130
+ img_window .col_off : img_window .col_off + self .ws ].copy ()
131
+ logits_abcd += new_logits
143
132
144
133
# Update the registry and pop information-complete data
145
134
# |c|b| | + pop a
146
135
# |0|d| |
147
136
logits_a = logits_abcd [:, : self .hws , : self .hws ]
148
- logits_c = logits_abcd [:, self .hws :, : self .hws ]
149
- logits_c0 = torch . concat ([logits_c , self ._zero_chip ], dim = 1 )
150
- logits_bd = logits_abcd [:, :, self .hws :]
137
+ logits_c = logits_abcd [:, self .hws :, : self .hws ]
138
+ logits_c0 = np . concatenate ([logits_c , self ._zero_chip ], axis = 1 )
139
+ logits_bd = logits_abcd [:, :, self .hws :]
151
140
152
141
# write c0
153
142
self .register [
154
- :, :, img_window .col_off : img_window .col_off + self .hws
143
+ :, :, img_window .col_off : img_window .col_off + self .hws
155
144
] = logits_c0
156
145
157
146
# write bd
158
147
col_off_bd = img_window .col_off + self .hws
159
- self .register [:, :, col_off_bd : col_off_bd + self .hws ] = logits_bd
148
+ self .register [:, :, col_off_bd : col_off_bd + self .hws ] = logits_bd
160
149
161
150
# Return the information-complete predictions
162
151
preds_win = Window (
@@ -165,6 +154,9 @@ def step(self, new_logits: torch.Tensor, img_window: Window):
165
154
height = min (self .hws , img_window .height ),
166
155
width = min (self .hws , img_window .width ),
167
156
)
168
- preds = logits_a [:, : img_window .height , : img_window .width ].softmax (axis = 0 )
157
+ preds = logits_a [:, : img_window .height , : img_window .width ]
158
+
159
+ # Numpy softmax on axis 0
160
+ preds = np .exp (preds ) / np .sum (np .exp (preds ), axis = 0 )
169
161
170
162
return preds , preds_win
0 commit comments