-
Notifications
You must be signed in to change notification settings - Fork 0
/
util.py
59 lines (48 loc) · 2.38 KB
/
util.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
import re
import torch
import shutil
import logging
import torchscan
import numpy as np
from os.path import join
from sklearn.decomposition import PCA
import datasets_ws
def get_flops(model, input_shape=(480, 640)):
"""Return the FLOPs as a string, such as '22.33 GFLOPs'"""
assert len(input_shape) == 2, f"input_shape should have len==2, but it's {input_shape}"
module_info = torchscan.crawl_module(model, (3, input_shape[0], input_shape[1]))
output = torchscan.utils.format_info(module_info)
return re.findall("Floating Point Operations on forward: (.*)\n", output)[0]
def save_checkpoint(args, state, is_best, filename):
model_path = join(args.save_dir, filename)
torch.save(state, model_path)
if is_best:
shutil.copyfile(model_path, join(args.save_dir, "best_model.pth"))
def resume_train(args, model, optimizer=None, strict=False):
"""Load model, optimizer, and other training parameters"""
logging.debug(f"Loading checkpoint: {args.resume}")
checkpoint = torch.load(args.resume)
start_epoch_num = checkpoint["epoch_num"]
model.load_state_dict(checkpoint["model_state_dict"], strict=strict)
if optimizer:
optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
best_r1pr5 = checkpoint["best_r1pr5"]
not_improved_num = checkpoint["not_improved_num"]
logging.debug(f"Loaded checkpoint: start_epoch_num = {start_epoch_num}, " \
f"current_best_R@1+R@5 = {best_r1pr5:.1f}")
if args.resume.endswith("last_model.pth"): # Copy best model to current save_dir
shutil.copy(args.resume.replace("last_model.pth", "best_model.pth"), args.save_dir)
return model, optimizer, best_r1pr5, start_epoch_num, not_improved_num
def compute_pca(args, model, pca_dataset_folder, full_features_dim):
model = model.eval()
pca_ds = datasets_ws.PCADataset(args, args.datasets_folder, pca_dataset_folder)
dl = torch.utils.data.DataLoader(pca_ds, args.infer_batch_size, shuffle=True)
pca_features = np.empty([min(len(pca_ds), 2**14), full_features_dim])
with torch.no_grad():
for i, images in enumerate(dl):
if i*args.infer_batch_size >= len(pca_features): break
features = model(images).cpu().numpy()
pca_features[i*args.infer_batch_size : (i*args.infer_batch_size)+len(features)] = features
pca = PCA(args.pca_dim)
pca.fit(pca_features)
return pca