-
Notifications
You must be signed in to change notification settings - Fork 2
/
model_configuration.py
235 lines (189 loc) · 9.96 KB
/
model_configuration.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
from keras.callbacks import ReduceLROnPlateau, ModelCheckpoint
from tensorflow.keras import mixed_precision
from models.model_zoo.pidnet.pidnet import PIDNet
from models.model_builder import ModelBuilder
from utils.load_semantic_datasets import SemanticGenerator
from utils.semantic_loss import SemanticLoss, BoundaryLoss, AuxiliaryLoss
from utils.metrics import MIoU, CityMIoU
import os
import tensorflow as tf
import tensorflow_addons as tfa
class ModelConfiguration(SemanticGenerator):
def __init__(self, args: object, mirrored_strategy: object = None):
"""
Args:
args (argparse): Training options (argparse).
mirrored_strategy (tf.distribute): tf.distribute.MirroredStrategy() with Session.
"""
self.args = args
self.mirrored_strategy = mirrored_strategy
self.__set_args()
super().__init__(data_dir=self.DATASET_DIR, image_size=self.IMAGE_SIZE,
batch_size=self.BATCH_SIZE, dataset_name=self.DATASET_NAME)
def configuration_dataset(self):
"""
Configure the dataset. Train and validation dataset is inherited from the parent class and used.
"""
# Wrapping tf.data generator
self.train_data = self.get_trainData(train_data=self.train_data)
self.valid_data = self.get_validData(valid_data=self.valid_data)
# Calculate training and validation steps
self.steps_per_epoch = self.number_train // self.BATCH_SIZE
self.validation_steps = self.number_valid // self.BATCH_SIZE
# Wrapping tf.data generator if when use multi-gpu training
if self.DISTRIBUTION_MODE:
self.train_data = self.mirrored_strategy.experimental_distribute_dataset(self.train_data)
self.valid_data = self.mirrored_strategy.experimental_distribute_dataset(self.valid_data)
def __set_args(self):
"""
Configure the options received from argparse.
"""
# Set training variables from argparse's arguments
self.MODEL_PREFIX = self.args.model_prefix
self.WEIGHT_DECAY = self.args.weight_decay
self.NUM_CLASSES = self.args.num_classes
self.OPTIMIZER_TYPE = self.args.optimizer
self.BATCH_SIZE = self.args.batch_size
self.EPOCHS = self.args.epoch
self.INIT_LR = self.args.lr
self.INPUT_NORM_TYPE = self.args.image_norm_type
self.NETWORK_NAME = self.args.network_name
self.SAVE_MODEL_NAME = self.args.model_name + '_' + self.args.model_prefix
self.DATASET_DIR = self.args.dataset_dir
self.DATASET_NAME = self.args.dataset_name
self.LOSS_TYPE = self.args.loss_type
self.CHECKPOINT_DIR = self.args.checkpoint_dir
self.TENSORBOARD_DIR = self.args.tensorboard_dir
self.IMAGE_SIZE = self.args.image_size
self.USE_WEIGHT_DECAY = self.args.use_weightDecay
self.MIXED_PRECISION = self.args.mixed_precision
self.DISTRIBUTION_MODE = self.args.multi_gpu
if self.DISTRIBUTION_MODE:
self.BATCH_SIZE *= 2
os.makedirs(self.DATASET_DIR, exist_ok=True)
os.makedirs(self.CHECKPOINT_DIR, exist_ok=True)
os.makedirs(self.CHECKPOINT_DIR + self.args.model_name, exist_ok=True)
def __set_callbacks(self):
"""
Set the keras callback of model.fit.
For some metric callbacks, the name of the custom metric may be different and may not be valid,
so you must specify the name of the custom metric.
"""
# Set training keras callbacks
checkpoint_val_loss = ModelCheckpoint(self.CHECKPOINT_DIR + self.args.model_name + '/_' + self.SAVE_MODEL_NAME + '_best_loss.h5',
monitor='val_loss', save_best_only=True, save_weights_only=True, verbose=1)
checkpoint_val_iou = ModelCheckpoint(self.CHECKPOINT_DIR + self.args.model_name + '/_' + self.SAVE_MODEL_NAME + '_best_iou.h5',
monitor='val_' + self.miou_name, save_best_only=True, save_weights_only=True,
verbose=1, mode='max')
tensorboard = tf.keras.callbacks.TensorBoard(
log_dir=self.TENSORBOARD_DIR + 'semantic/' + self.MODEL_PREFIX, write_graph=True, write_images=True)
# lrDecay = tf.keras.optimizers.schedules.PolynomialDecay(initial_learning_rate=self.INIT_LR,
# decay_steps=self.EPOCHS,
# end_learning_rate=self.INIT_LR * 0.01, power=0.9)
lrDecay = tf.keras.optimizers.schedules.CosineDecay(initial_learning_rate=self.INIT_LR, decay_steps=self.EPOCHS, alpha=self.INIT_LR * 0.001)
lr_scheduler = tf.keras.callbacks.LearningRateScheduler(lrDecay, verbose=1)
# If you wanna need another callbacks, please add here.
self.callback = [checkpoint_val_iou,
checkpoint_val_loss, tensorboard, lr_scheduler]
def __set_optimizer(self):
"""
Configure the optimizer for backpropagation calculations.
"""
if self.OPTIMIZER_TYPE == 'sgd':
self.optimizer = tf.keras.optimizers.SGD(momentum=0.9, learning_rate=self.INIT_LR)
self.optimizer = tf.keras.optimizers.experimental.SGD(learning_rate=self.INIT_LR, momentum=0.9)
elif self.OPTIMIZER_TYPE == 'adam':
self.optimizer = tf.keras.optimizers.Adam(learning_rate=self.INIT_LR)
elif self.OPTIMIZER_TYPE == 'radam':
self.optimizer = tfa.optimizers.RectifiedAdam(learning_rate=self.INIT_LR,
weight_decay=0.00001,
total_steps=int(
self.number_train / (self.BATCH_SIZE / self.EPOCHS)),
warmup_proportion=0.1,
min_lr=0.0001)
if self.MIXED_PRECISION:
# Wrapping optimizer when use distribute training (multi-gpu training)
# mixed_precision.set_global_policy('mixed_float16')
policy = mixed_precision.Policy('mixed_float16')
self.optimizer = mixed_precision.LossScaleOptimizer(self.optimizer)
def __configuration_model(self):
"""
Build a deep learning model.
"""
self.model = ModelBuilder(image_size=self.IMAGE_SIZE,
num_classes=self.NUM_CLASSES, use_weight_decay=self.USE_WEIGHT_DECAY, weight_decay=self.WEIGHT_DECAY)
self.model = self.model.build_model(model_name=self.NETWORK_NAME, training=True)
def __configuration_metric(self):
"""
Configure metrics for use in training and evaluation.
"""
self.metric_list = []
if self.DATASET_NAME == 'cityscapes':
print('cityscapes dataset miou')
mIoU = CityMIoU(self.NUM_CLASSES+1)
self.miou_name = 'city_m_io_u'
else:
print('custom dataset miou')
mIoU = MIoU(self.NUM_CLASSES)
self.miou_name = 'main_m_io_u:'
# You can add here custom metrics.
self.metric_list.append(mIoU)
def train(self):
"""
Compile all configuration settings required for training.
If the custom metric name is different in the __set_callbacks function,
the update may not be possible, so please check the name.
"""
self.configuration_dataset()
self.__set_optimizer()
self.__configuration_model()
self.__configuration_metric()
self.__set_callbacks()
main_loss = SemanticLoss(gamma=2.0, from_logits=True, use_multi_gpu=self.DISTRIBUTION_MODE,
global_batch_size=self.BATCH_SIZE, num_classes=self.NUM_CLASSES,
dataset_name=self.DATASET_NAME, loss_type=self.LOSS_TYPE)
boundary_loss = BoundaryLoss(from_logits=True, use_multi_gpu=self.DISTRIBUTION_MODE,
global_batch_size=self.BATCH_SIZE, num_classes=self.NUM_CLASSES)
auxilary_loss = AuxiliaryLoss(from_logits=True, use_multi_gpu=self.DISTRIBUTION_MODE,
global_batch_size=self.BATCH_SIZE, num_classes=self.NUM_CLASSES)
losses = {
'main': main_loss,
'boundary': boundary_loss,
'aux': auxilary_loss
}
metrics = {
'main': self.metric_list[0]
}
self.model.compile(
optimizer=self.optimizer,
loss=losses,
metrics=metrics)
self.model.summary()
self.model.fit(self.train_data,
validation_data=self.valid_data,
steps_per_epoch=self.steps_per_epoch,
validation_steps=self.validation_steps,
epochs=self.EPOCHS,
callbacks=self.callback)
def saved_model(self):
"""
Convert it to a graph model (.pb) using the learned weights.
"""
from models.model_zoo.PIDNet import PIDNet
self.model = PIDNet(input_shape=(*self.IMAGE_SIZE, 3), m=2, n=3, num_classes=self.NUM_CLASSES,
planes=32, ppm_planes=96, head_planes=128, augment=True, training=False).build()
self.model.load_weights(self.args.saved_model_path)
export_path = os.path.join(self.CHECKPOINT_DIR, 'export_path', '1')
os.makedirs(export_path, exist_ok=True)
self.export_path = export_path
self.model.summary()
tf.keras.models.save_model(
self.model,
self.export_path,
overwrite=True,
include_optimizer=False,
save_format=None,
signatures=None,
options=None
)
print("save model clear")