forked from wushidonguc/two-stream-action-recognition-keras
-
Notifications
You must be signed in to change notification settings - Fork 0
/
spatial_train_model.py
56 lines (47 loc) · 2.22 KB
/
spatial_train_model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
import numpy as np
from keras.applications.inception_v3 import InceptionV3
from keras.models import Sequential, load_model, Model
from keras.layers import Input, Average, GlobalAveragePooling2D
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.optimizers import SGD, Adam
from keras.layers.normalization import BatchNormalization
from keras.utils import multi_gpu_model
# CNN model for the spatial stream
def get_model(data, weights='imagenet'):
# create the base pre-trained model
base_model = InceptionV3(weights=weights, include_top=False)
# add a global spatial average pooling layer
x = base_model.output
x = GlobalAveragePooling2D()(x)
# let's add a fully-connected layer
x = Dense(1024, activation='relu')(x)
# and a logistic layer
predictions = Dense(len(data.classes), activation='softmax')(x)
# this is the model we will train
model = Model(inputs=base_model.input, outputs=predictions)
return model
def freeze_all_but_top(parallel_model,model):
"""Used to train just the top layers of the model."""
# first: train only the top layers (which were randomly initialized)
# i.e. freeze all convolutional InceptionV3 layers
for layer in model.layers[:-2]:
layer.trainable = False
# compile the model (should be done *after* setting layers to non-trainable)
parallel_model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
return parallel_model, model
def freeze_all_but_mid_and_top(parallel_model,model):
"""After we fine-tune the dense layers, train deeper."""
# we chose to train the top 2 inception blocks, i.e. we will freeze
# the first 172 layers and unfreeze the rest:
for layer in model.layers[:172]:
layer.trainable = False
for layer in model.layers[172:]:
layer.trainable = True
# we need to recompile the model for these modifications to take effect
# we use SGD with a low learning rate
parallel_model.compile(
optimizer=SGD(lr=0.0001, momentum=0.9),
loss='categorical_crossentropy',
metrics=['accuracy', 'top_k_categorical_accuracy'])
return parallel_model, model