-
Notifications
You must be signed in to change notification settings - Fork 0
/
architecture.py
73 lines (63 loc) · 3.67 KB
/
architecture.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
# Author: Jacob Dawson
#
# In this file, we will specify the architecture of our neural network
# classifier. I'm thinking that, at least to begin with, we'll be using a
# pretty standard convolutional neural network
import tensorflow as tf
from tensorflow import keras
from constants import *
# conv, batchnorm, and activation
def layer(input, filters, size, stride, apply_batchnorm=True):
out = keras.layers.Conv2D(filters, kernel_size=size, strides=stride, padding='same')(input)
if apply_batchnorm:
out = keras.layers.BatchNormalization()(out)
return keras.layers.Activation('selu')(out)
# reduce the size of the image by half and only give a depth of filters
def downsample(input, filters, apply_batchnorm=True):
out = keras.layers.Conv2D(filters, kernel_size=1, strides=1, padding='same')(input)
if apply_batchnorm:
out = keras.layers.BatchNormalization()(out)
out = keras.layers.Activation('selu')(out)
return keras.layers.AveragePooling2D()(out)
# link the above layers into a dense block!
def denseBlock(input, filters, size, apply_batchnorm=True):
# how many layers do we want per dense block?
l1 = layer(input, filters=filters, size=size, stride=1, apply_batchnorm=apply_batchnorm)
l2 = layer(keras.layers.Concatenate()([input,l1]), filters=filters, size=size, stride=1, apply_batchnorm=apply_batchnorm)
l3 = layer(keras.layers.Concatenate()([input,l1,l2]), filters=filters, size=size, stride=1, apply_batchnorm=apply_batchnorm)
#l4 = layer(keras.layers.Concatenate()([input,l1,l2,l3]), filters=filters, size=size, stride=1, apply_batchnorm=apply_batchnorm)
#l5 = layer(keras.layers.Concatenate()([input,l1,l2,l3,l4]), filters=filters, size=size, stride=1, apply_batchnorm=apply_batchnorm)
return downsample(l3, filters=filters, apply_batchnorm=apply_batchnorm)
# conv, batchnorm, and activation
def simpleBlock(input, filters, size, stride, apply_batchnorm=True, apply_dropout=True):
out = keras.layers.Conv2D(filters, kernel_size=size, strides=stride, padding='same')(input)
out = keras.layers.Activation('selu')(out)
if apply_batchnorm:
out = keras.layers.BatchNormalization()(out)
if apply_dropout:
out = keras.layers.Dropout(0.25)(out)
return keras.layers.AveragePooling2D()(out)
def convNet():
input = keras.layers.Input(shape=(image_size,image_size,num_channels), dtype=tf.float16)
# dense net solution:
out = denseBlock(input, filters=8, size=3, apply_batchnorm=False)
out = denseBlock(out, filters=16, size=3, apply_batchnorm=True)
out = denseBlock(out, filters=32, size=3, apply_batchnorm=True)
out = denseBlock(out, filters=64, size=3, apply_batchnorm=True)
out = denseBlock(out, filters=128, size=3, apply_batchnorm=True)
# simpler solution:
'''
out = simpleBlock(input, 8, 3, 1, apply_batchnorm=False, apply_dropout=True)
out = simpleBlock(out, 16, 3, 1, apply_batchnorm=True, apply_dropout=True)
out = simpleBlock(out, 32, 3, 1, apply_batchnorm=True, apply_dropout=True)
out = simpleBlock(out, 64, 3, 1, apply_batchnorm=True, apply_dropout=True)
out = simpleBlock(out, 128, 3, 1, apply_batchnorm=True, apply_dropout=False)
'''
out = keras.layers.GlobalAveragePooling2D()(out)
out = keras.layers.Dense(output_options,activation='sigmoid')(out) # what activation should we use here?
return keras.Model(inputs=input, outputs=out, name='classifier')
if __name__ == '__main__':
network = convNet()
network.summary()
# this requires graphviz, the bane of my existence
#keras.utils.plot_model(network, to_file='network_plot.png', show_shapes=True, show_layer_names=False, show_layer_activations=True, expand_nested=True)