-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathVGG16.py
112 lines (90 loc) · 4.16 KB
/
VGG16.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
# coding=utf-8
import tensorflow as tf
# 时间卷积参数
tCONV_KENEL_NUM = 39
tCONV_KENEL_SIZE = 400
tCONV_STRIDE = 160
tCONV_POOLING_SIZE = 3
tCONV_POOLING_STRIDE = 1
NUM_CHANNELS = 1
# 频率卷积参数
fCONV_KENEL_NUM = 1
fCONV_KENEL_SIZE = 8
fCONV_STRIDE = 1
fCONV_POOLING_SIZE = 3
fCONV_POOLING_STRIDE = 1
# LTSM层参数
LSTM_NUM_UNITS = 280
KEEP_PROB = 0.5
NUM_LAYERS = 2
BATCH_SIZE = 32
NUM_STEPS = 1
# 分类个数
OUTPUT_NODE = 18
def forward(x, train, regularizer):
conv1_1 = conv_op(x, name="conv1_1", kh=3, kw=3, n_out=64, dh=1, dw=1)
conv1_2 = conv_op(conv1_1, name="conv1_2", kh=3, kw=3, n_out=64, dh=1, dw=1)
pool1 = mpool_op(conv1_2, name="pool1", kh=2, kw=2, dw=2, dh=2)
# block 2 -- outputs 56x56x128
conv2_1 = conv_op(pool1, name="conv2_1", kh=3, kw=3, n_out=128, dh=1, dw=1)
conv2_2 = conv_op(conv2_1, name="conv2_2", kh=3, kw=3, n_out=128, dh=1, dw=1)
pool2 = mpool_op(conv2_2, name="pool2", kh=2, kw=2, dh=2, dw=2)
# # block 3 -- outputs 28x28x256
conv3_1 = conv_op(pool2, name="conv3_1", kh=3, kw=3, n_out=256, dh=1, dw=1)
conv3_2 = conv_op(conv3_1, name="conv3_2", kh=3, kw=3, n_out=256, dh=1, dw=1)
conv3_3 = conv_op(conv3_2, name="conv3_3", kh=3, kw=3, n_out=256, dh=1, dw=1)
pool3 = mpool_op(conv3_3, name="pool3", kh=2, kw=2, dh=2, dw=2)
# block 4 -- outputs 14x14x512
conv4_1 = conv_op(pool3, name="conv4_1", kh=3, kw=3, n_out=512, dh=1, dw=1)
conv4_2 = conv_op(conv4_1, name="conv4_2", kh=3, kw=3, n_out=512, dh=1, dw=1)
conv4_3 = conv_op(conv4_2, name="conv4_3", kh=3, kw=3, n_out=512, dh=1, dw=1)
pool4 = mpool_op(conv4_3, name="pool4", kh=2, kw=2, dh=2, dw=2)
# block 5 -- outputs 7x7x512
conv5_1 = conv_op(pool4, name="conv5_1", kh=3, kw=3, n_out=512, dh=1, dw=1)
conv5_2 = conv_op(conv5_1, name="conv5_2", kh=3, kw=3, n_out=512, dh=1, dw=1)
conv5_3 = conv_op(conv5_2, name="conv5_3", kh=3, kw=3, n_out=512, dh=1, dw=1)
pool5 = mpool_op(conv5_3, name="pool5", kh=2, kw=2, dw=2, dh=2)
# flatten
shp = pool5.get_shape()
flattened_shape = shp[1].value * shp[2].value * shp[3].value
resh1 = tf.reshape(pool5, [-1, flattened_shape], name="resh1")
# fully connected
fc6 = fc_op(resh1, name="fc6", n_out=4096)
fc6_drop = tf.nn.dropout(fc6, KEEP_PROB, name="fc6_drop")
fc7 = fc_op(fc6_drop, name="fc7", n_out=4096)
fc7_drop = tf.nn.dropout(fc7, KEEP_PROB, name="fc7_drop")
logits = fc_op(fc7_drop, name="fc8", n_out=2)
return logits
def conv_op(input_op, name, kh, kw, n_out, dh, dw):
input_op = tf.convert_to_tensor(input_op)
n_in = input_op.get_shape()[-1].value
with tf.name_scope(name) as scope:
kernel = tf.get_variable(scope + "w",
shape=[kh, kw, n_in, n_out],
dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer_conv2d())
conv = tf.nn.conv2d(input_op, kernel, (1, dh, dw, 1), padding='SAME')
bias_init_val = tf.constant(0.0, shape=[n_out], dtype=tf.float32)
biases = tf.Variable(bias_init_val, trainable=True, name='b')
z = tf.nn.bias_add(conv, biases)
activation = tf.nn.relu(z, name=scope)
return activation
# 定义全连接操作
def fc_op(input_op, name, n_out):
n_in = input_op.get_shape()[-1].value
with tf.name_scope(name) as scope:
kernel = tf.get_variable(scope + 'w',
shape=[n_in, n_out],
dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
biases = tf.Variable(tf.constant(0.1, shape=[n_out], dtype=tf.float32), name='b')
# tf.nn.relu_layer对输入变量input_op与kernel做矩阵乘法加上bias,再做RELU非线性变换得到activation
activation = tf.nn.relu_layer(input_op, kernel, biases, name=scope)
return activation
# 定义池化层
def mpool_op(input_op, name, kh, kw, dh, dw):
return tf.nn.max_pool(input_op,
ksize=[1, kh, kw, 1],
strides=[1, dh, dw, 1],
padding='SAME',
name=name)