Skip to content

Commit 4b1eeca

Browse files
committed
Remove dead code.
1 parent ecb4b83 commit 4b1eeca

File tree

1 file changed

+1
-23
lines changed

1 file changed

+1
-23
lines changed

HIPT_4K/hipt_4k.py

Lines changed: 1 addition & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@
55
import sys
66

77
# LinAlg / Stats / Plotting Dependencies
8-
import h5py
98
import matplotlib.pyplot as plt
109
import numpy as np
1110
import pandas as pd
@@ -16,16 +15,13 @@
1615
# Torch Dependencies
1716
import torch
1817
import torch.multiprocessing
19-
import torchvision
2018
from torchvision import transforms
2119
from einops import rearrange, repeat
2220
torch.multiprocessing.set_sharing_strategy('file_system')
2321

2422
# Local Dependencies
25-
import vision_transformer as vits
26-
import vision_transformer4k as vits4k
2723
from hipt_heatmap_utils import *
28-
from hipt_model_utils import get_vit256, get_vit4k, tensorbatch2im, eval_transforms, roll_batch2img
24+
from hipt_model_utils import get_vit256, get_vit4k, tensorbatch2im, eval_transforms
2925

3026

3127
class HIPT_4K(torch.nn.Module):
@@ -133,7 +129,6 @@ def _get_region_attention_scores(self, region, scale=1):
133129
- attention_256 (torch.Tensor): [256, 256/scale, 256/scale, 3] torch.Tensor sequence of attention maps for 256-sized patches.
134130
- attention_4k (torch.Tensor): [1, 4096/scale, 4096/scale, 3] torch.Tensor sequence of attention maps for 4k-sized regions.
135131
"""
136-
eval_t = transforms.Compose([transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
137132
x = eval_transforms()(region).unsqueeze(dim=0)
138133

139134
batch_256, w_256, h_256 = self.prepare_img_tensor(x)
@@ -150,7 +145,6 @@ def _get_region_attention_scores(self, region, scale=1):
150145

151146
features_grid256 = features_cls256.reshape(w_256, h_256, 384).transpose(0,1).transpose(0,2).unsqueeze(dim=0)
152147
features_grid256 = features_grid256.to(self.device4k, non_blocking=True)
153-
features_cls4k = self.model4k.forward(features_grid256).detach().cpu()
154148

155149
attention_4k = self.model4k.get_last_selfattention(features_grid256)
156150
nh = attention_4k.shape[1] # number of head
@@ -185,13 +179,6 @@ def get_region_attention_heatmaps(self, x, offset=128, scale=4, alpha=0.5, cmap
185179
region = Image.fromarray(tensorbatch2im(x)[0])
186180
w, h = region.size
187181

188-
region2 = add_margin(region.crop((128,128,w,h)),
189-
top=0, left=0, bottom=128, right=128, color=(255,255,255))
190-
region3 = add_margin(region.crop((128*2,128*2,w,h)),
191-
top=0, left=0, bottom=128*2, right=128*2, color=(255,255,255))
192-
region4 = add_margin(region.crop((128*3,128*3,w,h)),
193-
top=0, left=0, bottom=128*4, right=128*4, color=(255,255,255))
194-
195182
b256_1, a256_1, a4k_1 = self._get_region_attention_scores(region, scale)
196183
b256_2, a256_2, a4k_2 = self._get_region_attention_scores(region, scale)
197184
b256_3, a256_3, a4k_3 = self._get_region_attention_scores(region, scale)
@@ -224,14 +211,6 @@ def get_region_attention_heatmaps(self, x, offset=128, scale=4, alpha=0.5, cmap
224211
img_inverse[mask256 == 0.95] = 0
225212
Image.fromarray(region256_hm+img_inverse).save(os.path.join(output_dir, '%s_256th[%d].png' % (fname, i)))
226213

227-
if False:
228-
for j in range(6):
229-
score4k_1 = concat_scores4k(a4k_1[j], size=(h_s,w_s))
230-
score4k = score4k_1 / 100
231-
color_block4k = (cmap(score4k)*255)[:,:,:3].astype(np.uint8)
232-
region4k_hm = cv2.addWeighted(color_block4k, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
233-
Image.fromarray(region4k_hm).save(os.path.join(output_dir, '%s_4k[%s].png' % (fname, j)))
234-
235214
hm4k, hm256, hm4k_256 = [], [], []
236215
for j in range(6):
237216
score4k_1 = concat_scores4k(a4k_1[j], size=(h_s,w_s))
@@ -296,7 +275,6 @@ def get_region_attention_heatmaps(self, x, offset=128, scale=4, alpha=0.5, cmap
296275
overlay256[offset_2:h_s, offset_2:w_s] += 100
297276
score256 = (score256_1+new_score256_2)/overlay256
298277

299-
factorize = lambda data: (data - np.min(data)) / (np.max(data) - np.min(data))
300278
score = (score4k*overlay4k+score256*overlay256)/(overlay4k+overlay256) #factorize(score256*score4k)
301279
color_block = (cmap(score)*255)[:,:,:3].astype(np.uint8)
302280
region4k_256_hm = cv2.addWeighted(color_block, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())

0 commit comments

Comments
 (0)