-
Notifications
You must be signed in to change notification settings - Fork 19
/
model.py
515 lines (454 loc) · 30.7 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
#! /usr/bin/python
# -*- coding: utf8 -*-
import tensorflow as tf
import tensorlayer as tl
import numpy as np
from tensorlayer.layers import *
def UNet(t_image, is_train=False, reuse=False, scope = "UNet"):
w_init1 = tf.random_normal_initializer(stddev=0.02)
w_init2 = tf.random_normal_initializer(stddev=0.01)
w_init3 = tf.random_normal_initializer(stddev=0.005)
w_init4 = tf.random_normal_initializer(stddev=0.002)
b_init = None # tf.constant_initializer(value=0.0)
g_init = tf.random_normal_initializer(1., 0.02)
hrg = t_image.get_shape()[1]
wrg = t_image.get_shape()[2]
with tf.variable_scope(scope, reuse=reuse) as vs:
tl.layers.set_name_reuse(reuse)
n = InputLayer(t_image, name='in')
# n_init = InputLayer(t_image, name='in2')
n = Conv2d(n, 64, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init1, name='f0/c')
n = BatchNormLayer(n, is_train=is_train, gamma_init=g_init, name='f0/b')
f0 = n
n = Conv2d(n, 64, (3, 3), (2, 2), act=None, padding='SAME', W_init=w_init2, name='d1/c1')
n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='d1/b1')
n = Conv2d(n, 128, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init2, name='d1/c2')
n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='d1/b2')
n = Conv2d(n, 128, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init2, name='d1/c3')
n = BatchNormLayer(n, is_train=is_train, gamma_init=g_init, name='d1/b3')
f1_2 = n
n = Conv2d(n, 256, (3, 3), (2, 2), act=None, padding='SAME', W_init=w_init3, name='d2/c1')
n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='d2/b1')
n = Conv2d(n, 256, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init3, name='d2/c2')
n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='d2/b2')
n = Conv2d(n, 256, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init3, name='d2/c3')
n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='d2/b3')
n = Conv2d(n, 256, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init3, name='d2/c4')
n = BatchNormLayer(n, is_train=is_train, gamma_init=g_init, name='d2/b4')
f2_3 = n
n = Conv2d(n, 512, (3, 3), (2, 2), act=None, padding='SAME', W_init=w_init4, name='d3/c1')
n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='d3/b1')
n = Conv2d(n, 512, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init4, name='d3/c2')
n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='d3/b2')
n = Conv2d(n, 512, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init4, name='d3/c3')
n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='d3/b3')
n = Conv2d(n, 512, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init4, name='d3/c4')
n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='d3/b4')
n = DeConv2d(n, 256, (3, 3), (hrg//4, wrg//4), (2, 2), act=None, padding='SAME', W_init=w_init3, name='u3/d')
n = BatchNormLayer(n, is_train=is_train, gamma_init=g_init, name='u3/b')
n = ElementwiseLayer([n, f2_3], tf.add, name='s4')
# n = InputLayer(tf.nn.relu(n.outputs), name='relu4')
n.outputs = tf.nn.relu(n.outputs)
n = Conv2d(n, 256, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init3, name='u3/c1')
n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='u3/b1')
n = Conv2d(n, 256, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init3, name='u3/c2')
n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='u3/b2')
n = Conv2d(n, 256, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init3, name='u3/c3')
n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='u3/b3')
n = DeConv2d(n, 128, (3, 3), (hrg//2, wrg//2), (2, 2), act=None, padding='SAME', W_init=w_init2, name='u2/d')
n = BatchNormLayer(n, is_train=is_train, gamma_init=g_init, name='u2/b')
n = ElementwiseLayer([n, f1_2], tf.add, name='s3')
# n = InputLayer(tf.nn.relu(n.outputs), name='relu3')
n.outputs = tf.nn.relu(n.outputs)
n = Conv2d(n, 128, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init2, name='u2/c1')
n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='u2/b1')
n = Conv2d(n, 64, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init2, name='u2/c2')
n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='u2/b2')
n = DeConv2d(n, 64, (3, 3), (hrg, wrg), (2, 2), act=None, padding='SAME', W_init=w_init1, name='u1/d')
n = BatchNormLayer(n, is_train=is_train, gamma_init=g_init, name='u1/b')
n = ElementwiseLayer([n, f0], tf.add, name='s2')
n.outputs = tf.nn.relu(n.outputs)
#n = InputLayer(tf.nn.relu(n.outputs), name='relu2')
n = Conv2d(n, 15, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init1, name='u1/c1')
n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='u1/b1')
n = Conv2d(n, 1, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init1, name='u1/c2')
n = BatchNormLayer(n, is_train=is_train, gamma_init=g_init, name='u1/b2')
# n = ElementwiseLayer([n, n_init], tf.add, name='s1')
n2 = n
n.outputs = tf.nn.sigmoid(n.outputs)
# n2 = InputLayer(tf.nn.sigmoid(n.outputs), name='sigmoid')
return n, n2
def UNet_(t_image, is_train=False, reuse=False, scope = "UNet"):
w_init1 = tf.random_normal_initializer(stddev=0.02)
w_init2 = tf.random_normal_initializer(stddev=0.01)
w_init3 = tf.random_normal_initializer(stddev=0.005)
w_init4 = tf.random_normal_initializer(stddev=0.002)
b_init = None # tf.constant_initializer(value=0.0)
g_init = tf.random_normal_initializer(1., 0.02)
hrg = t_image.get_shape()[1]
wrg = t_image.get_shape()[2]
with tf.variable_scope(scope, reuse=reuse) as vs:
tl.layers.set_name_reuse(reuse)
n = InputLayer(t_image, name='in')
# n_init = InputLayer(t_image, name='in2')
n = Conv2d(n, 64, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init1, name='f0/c')
n = BatchNormLayer(n, is_train=is_train, gamma_init=g_init, name='f0/b')
f0 = n
n = Conv2d(n, 64, (3, 3), (2, 2), act=None, padding='SAME', W_init=w_init2, name='d1/c1')
n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='d1/b1')
n = Conv2d(n, 128, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init2, name='d1/c2')
n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='d1/b2')
n = Conv2d(n, 128, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init2, name='d1/c3')
n = BatchNormLayer(n, is_train=is_train, gamma_init=g_init, name='d1/b3')
f1_2 = n
n = Conv2d(n, 256, (3, 3), (2, 2), act=None, padding='SAME', W_init=w_init3, name='d2/c1')
n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='d2/b1')
n = Conv2d(n, 256, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init3, name='d2/c2')
n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='d2/b2')
n = Conv2d(n, 256, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init3, name='d2/c3')
n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='d2/b3')
n = Conv2d(n, 256, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init3, name='d2/c4')
n = BatchNormLayer(n, is_train=is_train, gamma_init=g_init, name='d2/b4')
f2_3 = n
n = Conv2d(n, 512, (3, 3), (2, 2), act=None, padding='SAME', W_init=w_init4, name='d3/c1')
n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='d3/b1')
n = Conv2d(n, 512, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init4, name='d3/c2')
n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='d3/b2')
n = Conv2d(n, 512, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init4, name='d3/c3')
n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='d3/b3')
n = Conv2d(n, 512, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init4, name='d3/c4')
n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='d3/b4')
n = DeConv2d(n, 256, (3, 3), (hrg//4, wrg//4), (2, 2), act=None, padding='SAME', W_init=w_init3, name='u3/d')
n = BatchNormLayer(n, is_train=is_train, gamma_init=g_init, name='u3/b')
n = ElementwiseLayer([n, f2_3], tf.add, name='s4')
# n = InputLayer(tf.nn.relu(n.outputs), name='relu4')
n.outputs = tf.nn.relu(n.outputs)
n = Conv2d(n, 256, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init3, name='u3/c1')
n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='u3/b1')
n = Conv2d(n, 256, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init3, name='u3/c2')
n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='u3/b2')
n = Conv2d(n, 256, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init3, name='u3/c3')
n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='u3/b3')
n = DeConv2d(n, 128, (3, 3), (hrg//2, wrg//2), (2, 2), act=None, padding='SAME', W_init=w_init2, name='u2/d')
n = BatchNormLayer(n, is_train=is_train, gamma_init=g_init, name='u2/b')
n = ElementwiseLayer([n, f1_2], tf.add, name='s3')
# n = InputLayer(tf.nn.relu(n.outputs), name='relu3')
n.outputs = tf.nn.relu(n.outputs)
n = Conv2d(n, 128, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init2, name='u2/c1')
n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='u2/b1')
n = Conv2d(n, 64, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init2, name='u2/c2')
n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='u2/b2')
n = DeConv2d(n, 64, (3, 3), (hrg, wrg), (2, 2), act=None, padding='SAME', W_init=w_init1, name='u1/d')
n = BatchNormLayer(n, is_train=is_train, gamma_init=g_init, name='u1/b')
n = ElementwiseLayer([n, f0], tf.add, name='s2')
n.outputs = tf.nn.relu(n.outputs)
#n = InputLayer(tf.nn.relu(n.outputs), name='relu2')
n = Conv2d(n, 15, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init1, name='u1/c1')
n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='u1/b1')
n = Conv2d(n, 1, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init1, name='u1/c2')
n = BatchNormLayer(n, is_train=is_train, gamma_init=g_init, name='u1/b2')
# n = ElementwiseLayer([n, n_init], tf.add, name='s1')
n2=n
n2.outputs = tf.nn.sigmoid(n.outputs)
# n2 = InputLayer(tf.nn.sigmoid(n.outputs), name='sigmoid')
return n
def VGG19_pretrained(t_image,reuse = False,scope="VGG"):
hrg = t_image.get_shape()[1]
wrg = t_image.get_shape()[2]
VGG_MEAN = [103.939, 116.779, 123.68]
"""
Build the VGG 19 Model
Parameters
-----------
rgb : rgb image placeholder [batch, height, width, 3] values scaled [0, 1]
"""
print("build model started")
rgb_scaled = t_image * 255.0
# Convert RGB to BGR
if tf.__version__ <= '0.11':
red, green, blue = tf.split(3, 3, rgb_scaled)
else: # TF 1.0
print(rgb_scaled)
red, green, blue = tf.split(rgb_scaled, 3, 3)
if tf.__version__ <= '0.11':
bgr = tf.concat(3, [
blue - VGG_MEAN[0],
green - VGG_MEAN[1],
red - VGG_MEAN[2],
])
else:
bgr = tf.concat(
[
blue - VGG_MEAN[0],
green - VGG_MEAN[1],
red - VGG_MEAN[2],
], axis=3)
with tf.variable_scope(scope, reuse=reuse) as vs:
# input layer
net_in = InputLayer(bgr, name='input')
# conv1
network = Conv2d(net_in, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',name='conv1_1')
network = Conv2d(network, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv1_2')
f0 = network
#n.outputs= tf.nn.relu(n.outputs)
network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool1')
# conv2
network = Conv2d(network, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv2_1')
network = Conv2d(network, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv2_2')
f0_1 = network
#n.outputs = tf.nn.relu(n.outputs)
network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool2')
# conv3
network = Conv2d(network, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv3_1')
network = Conv2d(network, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv3_2')
network = Conv2d(network, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv3_3')
network = Conv2d(network, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv3_4')
f1_2 = network
#n.outputs = tf.nn.relu(n.outputs)
network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool3')
# conv4
network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv4_1')
network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv4_2')
network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv4_3')
network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv4_4')
f2_3 = network
#n.outputs=tf.nn.relu(n.outputs)
network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool4')
# conv5
network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv5_1')
network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv5_2')
network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv5_3')
n = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv5_4')
return n, f0, f0_1,f1_2,f2_3 ,hrg,wrg
def VGG19_finetuning(t_image,reuse = False,scope="VGG"):
hrg = t_image.get_shape()[1]
wrg = t_image.get_shape()[2]
VGG_MEAN = [103.939, 116.779, 123.68]
"""
Build the VGG 19 Model
Parameters
-----------
rgb : rgb image placeholder [batch, height, width, 3] values scaled [0, 1]
"""
print("build model started")
rgb_scaled = t_image * 255.0
# Convert RGB to BGR
if tf.__version__ <= '0.11':
red, green, blue = tf.split(3, 3, rgb_scaled)
else: # TF 1.0
print(rgb_scaled)
red, green, blue = tf.split(rgb_scaled, 3, 3)
if tf.__version__ <= '0.11':
bgr = tf.concat(3, [
blue - VGG_MEAN[0],
green - VGG_MEAN[1],
red - VGG_MEAN[2],
])
else:
bgr = tf.concat(
[
blue - VGG_MEAN[0],
green - VGG_MEAN[1],
red - VGG_MEAN[2],
], axis=3)
with tf.variable_scope(scope, reuse=reuse) as vs:
# input layer
net_in = InputLayer(bgr, name='input')
# conv1
network = Conv2d(net_in, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',name='conv1_1')
network = Conv2d(network, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv1_2')
f0 = network
#n.outputs= tf.nn.relu(n.outputs)
network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool1')
# conv2
network = Conv2d(network, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv2_1')
network = Conv2d(network, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv2_2')
f0_1 = network
#n.outputs = tf.nn.relu(n.outputs)
network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool2')
# conv3
network = Conv2d(network, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv3_1')
network = Conv2d(network, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv3_2')
network = Conv2d(network, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv3_3')
network = Conv2d(network, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv3_4')
f1_2 = network
#n.outputs = tf.nn.relu(n.outputs)
network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool3')
# conv4
network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv4_1')
network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv4_2')
network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv4_3')
network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv4_4')
f2_3 = network
#n.outputs=tf.nn.relu(n.outputs)
network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool4')
# conv5
network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv5_1')
network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv5_2')
network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv5_3')
n = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='conv5_4')
return n, f0, f0_1,f1_2,f2_3 ,hrg,wrg
def Decoder_Network_classification(n,f0,f1_2,f2_3,f3_4,hrg,wrg, reuse=False, scope = "UNet"):
w_init1 = tf.contrib.layers.xavier_initializer()
w_init2 = tf.contrib.layers.xavier_initializer()
w_init3 = tf.contrib.layers.xavier_initializer()
w_init4 = tf.contrib.layers.xavier_initializer()
with tf.variable_scope(scope, reuse=reuse) as vs:
#this bug..... w_init3->w_init4
n = DeConv2d(n, 512, (3, 3), (hrg // 8, wrg // 8), (2, 2), act=None, padding='SAME', W_init=w_init4,
name='u4/d')
# n = BatchNormLayer(n, is_train=is_train, gamma_init=g_init, name='u3/b')
# n.outputs = tf.nn.relu(n.outputs)
f3_4 = Conv2d(f3_4, 512, (1, 1), (1, 1), act=None, padding='SAME', W_init=w_init4, name='f3_4/c1')
n = ElementwiseLayer([n, f3_4], tf.add, name='s5')
# n = InputLayer(tf.nn.relu(n.outputs), name='relu4')
n.outputs = tf.nn.relu(n.outputs)
n = Conv2d(n, 512, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init4, name='u34/c1')
# n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='u3/b1')
# n.outputs = tf.nn.relu(n.outputs)
n = Conv2d(n, 512, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init4, name='u4/c2')
# n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='u3/b2')
# n.outputs = tf.nn.relu(n.outputs)
n = Conv2d(n, 512, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init4, name='u4/c3')
# n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='u3/b3')
# n.outputs = tf.nn.relu(n.outputs)
n_m3 = Conv2d(n, 3, (1, 1), (1, 1), act=None, padding='SAME', W_init=w_init4, name='u4/loss3')
n = DeConv2d(n, 256, (3, 3), (hrg // 4, wrg // 4), (2, 2), act=None, padding='SAME', W_init=w_init3,
name='u3/d')
# n = BatchNormLayer(n, is_train=is_train, gamma_init=g_init, name='u3/b')
f2_3 = Conv2d(f2_3, 256, (1, 1), (1, 1), act=None, padding='SAME', W_init=w_init3, name='f2_3/c1')
n = ElementwiseLayer([n, f2_3], tf.add, name='s4')
# n = InputLayer(tf.nn.relu(n.outputs), name='relu4')
n.outputs = tf.nn.relu(n.outputs)
n = Conv2d(n, 256, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init3, name='u3/c1')
# n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='u3/b1')
# n.outputs = tf.nn.relu(n.outputs)
n = Conv2d(n, 256, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init3, name='u3/c2')
# n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='u3/b2')
# n.outputs = tf.nn.relu(n.outputs)
n = Conv2d(n, 256, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init3, name='u3/c3')
# n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='u3/b3')
# n.outputs = tf.nn.relu(n.outputs)
n_m2 = Conv2d(n,3, (1, 1), (1, 1), act=None, padding='SAME', W_init=w_init3, name='u3/loss2')
n = DeConv2d(n, 128, (3, 3), (hrg // 2, wrg // 2), (2, 2), act=None, padding='SAME', W_init=w_init2,
name='u2/d')
# n = BatchNormLayer(n, is_train=is_train, gamma_init=g_init, name='u2/b')
# n.outputs = tf.nn.relu(n.outputs)
f1_2 = Conv2d(f1_2, 128, (1, 1), (1, 1), act=None, padding='SAME', W_init=w_init2, name='f1_2/c1')
n = ElementwiseLayer([n, f1_2], tf.add, name='s3')
# n = InputLayer(tf.nn.relu(n.outputs), name='relu3')
n.outputs = tf.nn.relu(n.outputs)
n = Conv2d(n, 128, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init2, name='u2/c1')
# n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='u2/b1')
# n.outputs = tf.nn.relu(n.outputs)
n = Conv2d(n, 128, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init2, name='u2/c2')
# n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='u2/b2')
# n.outputs = tf.nn.relu(n.outputs)
n_m1 = Conv2d(n, 3, (1, 1), (1, 1), act=None, padding='SAME', W_init=w_init2, name='u2/loss1')
n = DeConv2d(n, 64, (3, 3), (hrg, wrg), (2, 2), act=None, padding='SAME', W_init=w_init1, name='u1/d')
# n = BatchNormLayer(n, is_train=is_train, gamma_init=g_init, name='u1/b')
# n.outputs = tf.nn.relu(n.outputs)
f0 = Conv2d(f0, 64, (1, 1), (1, 1), act=None, padding='SAME', W_init=w_init1, name='f0/c1')
n = ElementwiseLayer([n, f0], tf.add, name='s2')
n.outputs = tf.nn.relu(n.outputs)
# n = InputLayer(tf.nn.relu(n.outputs), name='relu2')
n = Conv2d(n, 64, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init1, name='u1/c1')
# n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='u1/b1')
# n.outputs = tf.nn.relu(n.outputs)
n = Conv2d(n, 64, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init1, name='u1/c2')
# n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='u1/b1')
# n.outputs = tf.nn.relu(n.outputs)
#n = Conv2d(n, 64, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init1, name='u1/c3')
# n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='u1/b1')
# n.outputs = tf.nn.relu(n.outputs)
n = Conv2d(n, 3, (1, 1), (1, 1), act=None, padding='SAME', W_init=w_init1, name='u1/c5')
# n = BatchNormLayer(n, is_train=is_train, gamma_init=g_init, name='u1/b2')
# n.outputs = tf.nn.relu(n.outputs)
# n = ElementwiseLayer([n, n_init], tf.add, name='s1')
#n.outputs = tf.nn.sigmoid(n.outputs) # -> this is bug??
#n2 = n
# n2 = InputLayer(tf.nn.sigmoid(n.outputs), name='sigmoid')
return n, n_m1, n_m2, n_m3
'''
def Decoder_Network_classification(n,f0,f1_2,f2_3,f3_4,hrg,wrg, reuse=False, scope = "UNet"):
w_init1 = tf.contrib.layers.xavier_initializer()
w_init2 = tf.contrib.layers.xavier_initializer()
w_init3 = tf.contrib.layers.xavier_initializer()
w_init4 = tf.contrib.layers.xavier_initializer()
with tf.variable_scope(scope, reuse=reuse) as vs:
#this bug..... w_init3->w_init4
n = DeConv2d(n, 512, (3, 3), (hrg // 8, wrg // 8), (2, 2), act=None, padding='SAME', W_init=w_init4,
name='u4/d')
# n = BatchNormLayer(n, is_train=is_train, gamma_init=g_init, name='u3/b')
# n.outputs = tf.nn.relu(n.outputs)
f3_4 = Conv2d(f3_4, 512, (1, 1), (1, 1), act=None, padding='SAME', W_init=w_init4, name='f3_4/c1')
n = ElementwiseLayer([n, f3_4], tf.add, name='s5')
# n = InputLayer(tf.nn.relu(n.outputs), name='relu4')
n.outputs = tf.nn.relu(n.outputs)
n = Conv2d(n, 512, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init4, name='u34/c1')
# n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='u3/b1')
# n.outputs = tf.nn.relu(n.outputs)
n = Conv2d(n, 512, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init4, name='u4/c2')
# n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='u3/b2')
# n.outputs = tf.nn.relu(n.outputs)
n = Conv2d(n, 512, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init4, name='u4/c3')
# n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='u3/b3')
# n.outputs = tf.nn.relu(n.outputs)
n_m3 = Conv2d(n, 3, (1, 1), (1, 1), act=None, padding='SAME', W_init=w_init4, name='u4/loss3')
n = DeConv2d(n, 256, (3, 3), (hrg // 4, wrg // 4), (2, 2), act=None, padding='SAME', W_init=w_init3,
name='u3/d')
# n = BatchNormLayer(n, is_train=is_train, gamma_init=g_init, name='u3/b')
f2_3 = Conv2d(f2_3, 256, (1, 1), (1, 1), act=None, padding='SAME', W_init=w_init3, name='f2_3/c1')
n = ElementwiseLayer([n, f2_3], tf.add, name='s4')
# n = InputLayer(tf.nn.relu(n.outputs), name='relu4')
n.outputs = tf.nn.relu(n.outputs)
n = Conv2d(n, 256, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init3, name='u3/c1')
# n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='u3/b1')
# n.outputs = tf.nn.relu(n.outputs)
n = Conv2d(n, 256, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init3, name='u3/c2')
# n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='u3/b2')
# n.outputs = tf.nn.relu(n.outputs)
n = Conv2d(n, 256, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init3, name='u3/c3')
# n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='u3/b3')
# n.outputs = tf.nn.relu(n.outputs)
n_m2 = Conv2d(n,3, (1, 1), (1, 1), act=None, padding='SAME', W_init=w_init3, name='u3/loss2')
n = DeConv2d(n, 128, (3, 3), (hrg // 2, wrg // 2), (2, 2), act=None, padding='SAME', W_init=w_init2,
name='u2/d')
# n = BatchNormLayer(n, is_train=is_train, gamma_init=g_init, name='u2/b')
# n.outputs = tf.nn.relu(n.outputs)
f1_2 = Conv2d(f1_2, 128, (1, 1), (1, 1), act=None, padding='SAME', W_init=w_init2, name='f1_2/c1')
n = ElementwiseLayer([n, f1_2], tf.add, name='s3')
# n = InputLayer(tf.nn.relu(n.outputs), name='relu3')
n.outputs = tf.nn.relu(n.outputs)
n = Conv2d(n, 128, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init2, name='u2/c1')
# n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='u2/b1')
# n.outputs = tf.nn.relu(n.outputs)
n = Conv2d(n, 128, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init2, name='u2/c2')
# n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='u2/b2')
# n.outputs = tf.nn.relu(n.outputs)
n_m1 = Conv2d(n, 3, (1, 1), (1, 1), act=None, padding='SAME', W_init=w_init2, name='u2/loss1')
n = DeConv2d(n, 64, (3, 3), (hrg, wrg), (2, 2), act=None, padding='SAME', W_init=w_init1, name='u1/d')
# n = BatchNormLayer(n, is_train=is_train, gamma_init=g_init, name='u1/b')
# n.outputs = tf.nn.relu(n.outputs)
f0 = Conv2d(f0, 64, (1, 1), (1, 1), act=None, padding='SAME', W_init=w_init1, name='f0/c1')
n = ElementwiseLayer([n, f0], tf.add, name='s2')
n.outputs = tf.nn.relu(n.outputs)
# n = InputLayer(tf.nn.relu(n.outputs), name='relu2')
n = Conv2d(n, 64, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init1, name='u1/c1')
# n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='u1/b1')
# n.outputs = tf.nn.relu(n.outputs)
n = Conv2d(n, 64, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init1, name='u1/c2')
# n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='u1/b1')
# n.outputs = tf.nn.relu(n.outputs)
#n = Conv2d(n, 64, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init1, name='u1/c3')
# n = BatchNormLayer(n, act=tf.nn.relu, is_train=is_train, gamma_init=g_init, name='u1/b1')
# n.outputs = tf.nn.relu(n.outputs)
n = Conv2d(n, 3, (1, 1), (1, 1), act=None, padding='SAME', W_init=w_init1, name='u1/c5')
# n = BatchNormLayer(n, is_train=is_train, gamma_init=g_init, name='u1/b2')
# n.outputs = tf.nn.relu(n.outputs)
# n = ElementwiseLayer([n, n_init], tf.add, name='s1')
#n.outputs = tf.nn.sigmoid(n.outputs) # -> this is bug??
#n2 = n
# n2 = InputLayer(tf.nn.sigmoid(n.outputs), name='sigmoid')
return n, n_m1, n_m2, n_m3
'''