-
Notifications
You must be signed in to change notification settings - Fork 2
/
model_vgg19.py
80 lines (61 loc) · 3.53 KB
/
model_vgg19.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
import tensorflow as tf
import tensorflow.contrib.slim as slim
import config
class Model_Vgg19:
def __init__(self, is_train=True):
self.input_image = tf.placeholder(tf.float32, [None, 32, 32, 3])
self.images = tf.reshape(self.input_image, [-1, 32, 32, 3])
self.input_label = tf.placeholder(tf.float32, [None, 10])
self.labels = tf.cast(self.input_label, tf.int32)
self.global_step = tf.Variable(0.0, trainable=False, dtype=tf.float32)
self.num_sample = config.num_sample
self.batch_size = config.batch_size
self.learning_rate = config.learning_rate
self.weight_decay = 0.0005
print(config.learning_rate)
with tf.variable_scope("Vgg19") as scope:
self.train_digits = self.build(True)
scope.reuse_variables()
self.pred_digits = self.build(False)
self.prediction = tf.argmax(self.pred_digits, 1)
self.correct_prediction = tf.equal(tf.argmax(self.pred_digits, 1), tf.argmax(self.labels, 1))
self.train_accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, "float"))
self.loss = slim.losses.softmax_cross_entropy(self.train_digits, self.labels)
self.lr = tf.train.exponential_decay(self.learning_rate, self.global_step,
(self.num_sample // self.batch_size) * config.epoch_decay, config.learning_decay, staircase=True)
self.train_op = tf.train.MomentumOptimizer(self.lr, config.momentum).minimize(self.loss, global_step=self.global_step)
def build(self, is_train=True):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(self.weight_decay),
biases_initializer=tf.zeros_initializer):
print(self.images.shape)
net = slim.repeat(self.images, 2, slim.conv2d, 64, [3, 3], padding='SAME', scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
print(net.shape)
net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], padding='SAME', scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
print(net.shape)
net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], padding='SAME', scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
print(net.shape)
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], padding='SAME', scope='conv4')
net = slim.max_pool2d(net, [2, 2], scope='pool4')
print(net.shape)
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], padding='SAME', scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
print(net.shape)
#net = slim.flatten(net, scope='flat')
#net = slim.fully_connected(net, 1024, activation_fn=tf.nn.relu, scope='fc1')
#net = slim.dropout(net, 0.5, is_training=is_train, scope='drop4')
net = slim.conv2d(net, 1024, [1, 1], padding='SAME', scope='fc6')
net = slim.dropout(net, 0.5, is_training=is_train, scope='dropout6')
print(net.shape)
net = slim.conv2d(net, 1024, [1, 1], padding='SAME', scope='fc7')
net = slim.dropout(net, 0.5, is_training=is_train, scope='dropout7')
print(net.shape)
digits = slim.conv2d(net, 10, [1, 1], padding='SAME', scope='fc8')
print(digits.shape)
digits = tf.reshape(digits, shape=[-1, 10])
print(digits.shape)
return digits