-
Notifications
You must be signed in to change notification settings - Fork 5
/
pnp_sci_video_grayscale_benchmark.py
243 lines (188 loc) · 9.72 KB
/
pnp_sci_video_grayscale_benchmark.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
#!/usr/bin/env python
# coding: utf-8
# ## GAP-TV for Video Compressive Sensing
# ### GAP-TV
# > X. Yuan, "Generalized alternating projection based total variation minimization for compressive sensing," in *IEEE International Conference on Image Processing (ICIP)*, 2016, pp. 2539-2543.
# ### Code credit
# [Xin Yuan](https://www.bell-labs.com/usr/x.yuan "Dr. Xin Yuan, Bell Labs"), [Bell Labs](https://www.bell-labs.com/), xyuan@bell-labs.com, created Aug 7, 2018.
# [Yang Liu](https://liuyang12.github.io "Yang Liu, Tsinghua University"), [Tsinghua University](http://www.tsinghua.edu.cn/publish/thu2018en/index.html), y-liu16@mails.tsinghua.edu.cn, updated Jan 20, 2019.
# In[1]:
import os
import time
import math
import h5py
import numpy as np
import scipy.io as sio
import matplotlib.pyplot as plt
from statistics import mean
from pnp_sci import admmdenoise_cacti
from utils import (A_, At_)
# In[2]:
# [0] environment configuration
datasetdir = './dataset/cacti/grayscale_benchmark' # dataset
resultsdir = './results' # results
alldatname = ['kobe32','traffic48','runner40','drop40','crash32','aerial32']
allnframes = [ -1, -1, 1, 1, -1, -1]
for datname, nframe in zip(alldatname, allnframes):
# datname = 'kobe32' # name of the dataset
# datname = 'traffic48' # name of the dataset
# datname = 'runner40' # name of the dataset
# datname = 'drop40' # name of the dataset
# datname = 'crash32' # name of the dataset
# datname = 'aerial32' # name of the dataset
# datname = 'bicycle24' # name of the dataset
# datname = 'starfish48' # name of the dataset
# datname = 'starfish_c16_48' # name of the dataset
matfile = datasetdir + '/' + datname + '_cacti.mat' # path of the .mat data file
# In[3]:
from scipy.io.matlab.mio import _open_file
from scipy.io.matlab.miobase import get_matfile_version
# [1] load data
if get_matfile_version(_open_file(matfile, appendmat=True)[0])[0] < 2: # MATLAB .mat v7.2 or lower versions
file = sio.loadmat(matfile) # for '-v7.2' and lower version of .mat file (MATLAB)
meas = np.float32(file['meas'])
mask = np.float32(file['mask'])
orig = np.float32(file['orig'])
else: # MATLAB .mat v7.3
file = h5py.File(matfile, 'r') # for '-v7.3' .mat file (MATLAB)
meas = np.float32(file['meas']).transpose()
mask = np.float32(file['mask']).transpose()
orig = np.float32(file['orig']).transpose()
print(meas.shape, mask.shape, orig.shape)
iframe = 0
# nframe = 1
# nframe = meas.shape[2]
if nframe < 0:
nframe = meas.shape[2]
MAXB = 255.
# common parameters and pre-calculation for PnP
# define forward model and its transpose
A = lambda x : A_(x, mask) # forward model function handle
At = lambda y : At_(y, mask) # transpose of forward model
mask_sum = np.sum(mask, axis=2)
mask_sum[mask_sum==0] = 1
# In[4]:
## [2.3] GAP/ADMM-TV
### [2.3.1] GAP-TV
projmeth = 'gap' # projection method
_lambda = 1 # regularization factor
accelerate = True # enable accelerated version of GAP
denoiser = 'tv' # total variation (TV)
iter_max = 40 # maximum number of iterations
tv_weight = 0.3 # TV denoising weight (larger for smoother but slower)
tv_iter_max = 5 # TV denoising maximum number of iterations each
vgaptv,tgaptv,psnr_gaptv,ssim_gaptv,psnrall_gaptv = admmdenoise_cacti(meas, mask, A, At,
projmeth=projmeth, v0=None, orig=orig,
iframe=iframe, nframe=nframe,
MAXB=MAXB, maskdirection='plain',
_lambda=_lambda, accelerate=accelerate,
denoiser=denoiser, iter_max=iter_max,
tv_weight=tv_weight,
tv_iter_max=tv_iter_max)
print('{}-{} PSNR {:2.2f} dB, SSIM {:.4f}, running time {:.1f} seconds.'.format(
projmeth.upper(), denoiser.upper(), mean(psnr_gaptv), mean(ssim_gaptv), tgaptv))
# In[6]:
import torch
from packages.ffdnet.models import FFDNet
## [2.5] GAP/ADMM-FFDNet
### [2.5.1] GAP-FFDNet (FFDNet-based frame-wise video denoising)
projmeth = 'gap' # projection method
_lambda = 1 # regularization factor
accelerate = True # enable accelerated version of GAP
denoiser = 'ffdnet' # video non-local network
noise_estimate = False # disable noise estimation for GAP
sigma = [50/255, 25/255, 12/255, 6/255] # pre-set noise standard deviation
iter_max = [10, 10, 10, 10] # maximum number of iterations
# sigma = [12/255, 6/255] # pre-set noise standard deviation
# iter_max = [10,10] # maximum number of iterations
useGPU = True # use GPU
# pre-load the model for FFDNet image denoising
in_ch = 1
model_fn = 'packages/ffdnet/models/net_gray.pth'
# Absolute path to model file
# model_fn = os.path.join(os.path.abspath(os.path.dirname(__file__)), model_fn)
# Create model
net = FFDNet(num_input_channels=in_ch)
# Load saved weights
if useGPU:
state_dict = torch.load(model_fn)
device_ids = [0]
model = torch.nn.DataParallel(net, device_ids=device_ids).cuda()
else:
state_dict = torch.load(model_fn, map_location='cpu')
# CPU mode: remove the DataParallel wrapper
state_dict = remove_dataparallel_wrapper(state_dict)
model = net
model.load_state_dict(state_dict)
model.eval() # evaluation mode
vgapffdnet,tgapffdnet,psnr_gapffdnet,ssim_gapffdnet,psnrall_gapffdnet = admmdenoise_cacti(meas, mask, A, At,
projmeth=projmeth, v0=None, orig=orig,
iframe=iframe, nframe=nframe,
MAXB=MAXB, maskdirection='plain',
_lambda=_lambda, accelerate=accelerate,
denoiser=denoiser, model=model,
iter_max=iter_max, sigma=sigma)
print('{}-{} PSNR {:2.2f} dB, SSIM {:.4f}, running time {:.1f} seconds.'.format(
projmeth.upper(), denoiser.upper(), mean(psnr_gapffdnet), mean(ssim_gapffdnet), tgapffdnet))
# In[7]:
import torch
from packages.fastdvdnet.models import FastDVDnet
## [2.2] GAP-FastDVDnet
projmeth = 'gap' # projection method
_lambda = 1 # regularization factor
accelerate = True # enable accelerated version of GAP
denoiser = 'fastdvdnet' # video non-local network
noise_estimate = False # disable noise estimation for GAP
sigma = [100/255, 50/255, 25/255, 12/255] # pre-set noise standard deviation
iter_max = [20, 20, 20, 20] # maximum number of iterations
# sigma = [12/255] # pre-set noise standard deviation
# iter_max = [20] # maximum number of iterations
useGPU = True # use GPU
# pre-load the model for fastdvdnet image denoising
NUM_IN_FR_EXT = 5 # temporal size of patch
model = FastDVDnet(num_input_frames=NUM_IN_FR_EXT,num_color_channels=1)
# Load saved weights
state_temp_dict = torch.load('./packages/fastdvdnet/model_gray.pth')
if useGPU:
device_ids = [0]
# model = torch.nn.DataParallel(model, device_ids=device_ids).cuda()
model = model.cuda()
# else:
# # CPU mode: remove the DataParallel wrapper
# state_temp_dict = remove_dataparallel_wrapper(state_temp_dict)
model.load_state_dict(state_temp_dict)
# Sets the model in evaluation mode (e.g. it removes BN)
model.eval()
vgapfastdvdnet,tgapfastdvdnet,psnr_gapfastdvdnet,ssim_gapfastdvdnet,psnrall_gapfastdvdnet = admmdenoise_cacti(meas, mask, A, At,
projmeth=projmeth, v0=None, orig=orig,
iframe=iframe, nframe=nframe,
MAXB=MAXB, maskdirection='plain',
_lambda=_lambda, accelerate=accelerate,
denoiser=denoiser, model=model,
iter_max=iter_max, sigma=sigma)
print('{}-{} PSNR {:2.2f} dB, SSIM {:.4f}, running time {:.1f} seconds.'.format(
projmeth.upper(), denoiser.upper(), mean(psnr_gapfastdvdnet), mean(ssim_gapfastdvdnet), tgapfastdvdnet))
# In[8]:
# [3.3] result demonstration of GAP-Denoise
nmask = mask.shape[2]
savedmatdir = resultsdir + '/savedmat/cacti/'
if not os.path.exists(savedmatdir):
os.makedirs(savedmatdir)
# sio.savemat('{}gap{}_{}{:d}.mat'.format(savedmatdir,denoiser.lower(),datname,nmask),
# {'vgapdenoise':vgapdenoise},{'psnr_gapdenoise':psnr_gapdenoise})
sio.savemat('{}gap{}_{}_{:d}_sigma{:d}.mat'.format(savedmatdir,denoiser.lower(),datname,nmask,int(sigma[-1]*MAXB)),
{'vgaptv':vgaptv,
'psnr_gaptv':psnr_gaptv,
'ssim_gaptv':ssim_gaptv,
'psnrall_tv':psnrall_gaptv,
'tgaptv':tgaptv,
'vgapffdnet':vgapffdnet,
'psnr_gapffdnet':psnr_gapffdnet,
'ssim_gapffdnet':ssim_gapffdnet,
'psnrall_ffdnet':psnrall_gapffdnet,
'tgapffdnet':tgapffdnet,
'vgapfastdvdnet':vgapfastdvdnet,
'psnr_gapfastdvdnet':psnr_gapfastdvdnet,
'ssim_gapfastdvdnet':ssim_gapfastdvdnet,
'psnrall_fastdvdnet':psnrall_gapfastdvdnet,
'tgapfastdvdnet':tgapfastdvdnet})