-
Notifications
You must be signed in to change notification settings - Fork 0
/
main_nlg_gan_sclstm.py
253 lines (214 loc) · 10.2 KB
/
main_nlg_gan_sclstm.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import _pickle as cPickle
import getopt
import json
import logging
import os
import sys
from math import ceil
from os.path import join
import numpy as np
#do this before importing anything from Keras
np.random.seed(1337)
import keras.backend as K
from keras.callbacks import ModelCheckpoint, BaseLogger, ProgbarLogger, CallbackList, TensorBoard,EarlyStopping
from data_loaders.data_loader_nlg_syntax import load_text_gen_data
from sc_lstm_architecutre.sclstm_gan_architecture import vae_model
import time
from keras.optimizers import Adam, Nadam, Adadelta
from custom_callbacks import StepCallback, GANOutputCallback, TerminateOnNaN, LexOutputCallbackGAN
from data_loaders.lex_features_utils import get_number_outputs
def set_trainability(model, trainable=False):
model.trainable = trainable
for layer in model.layers:
layer.trainable = trainable
def clip_weights(model):
for layer in model.layers:
weights = layer.get_weights()
weights = [np.clip(w, -0.5, 0.5) for w in weights]
layer.set_weights(weights)
def sample_noise(noise_scale, batch_size, noise_dim):
return np.random.normal(scale=noise_scale, size=(batch_size, noise_dim[0]))
def gen_batch(X, batch_size):
while True:
idx = np.random.choice(X.shape[0], batch_size, replace=False)
yield idx
def get_disc_batch(X_real_batch, generator_model, batch_counter, batch_size, noise_dim, noise_scale=0.5):
# Pass noise to the generator
noise_input = sample_noise(noise_scale, batch_size, noise_dim)
# Produce an output
X_disc_gen = generator_model.predict(noise_input, batch_size=batch_size)
X_disc_real = X_real_batch[:batch_size]
return X_disc_real, X_disc_gen
def last_one(a):
di = {}
for i, j in zip(*np.where(a > 0)):
if i in di:
di[i] = np.max([di[i], j])
else:
di[i] = j
return di
def main(args):
try:
opts, args = getopt.getopt(args, "c:s", ["config="])
except getopt.GetoptError:
print('usage: -c config.json')
sys.exit(2)
start_from_model = False
for opt, arg in opts:
if opt in ("-c", "--config"):
config_fname = os.path.join('configurations', arg)
elif opt == '-s':
start_from_model = True
if start_from_model:
filemode = 'a'
else:
filemode = 'w'
log_path = 'logging/vae_nlg_{}'.format(int(round(time.time() * 1000)))
os.mkdir(log_path)
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO,
filename='{}/evolution.log'.format(log_path), filemode=filemode)
with open(config_fname, 'r') as json_data:
config_data = json.load(json_data)
batch_size = config_data['batch_size']
epochs = config_data['nb_epochs']
discriminator_iterations = config_data['discriminator_iterations']
tweets_path = config_data['tweets_path']
vocab_path = config_data['vocab_path']
vocab = cPickle.load(open(join(vocab_path, 'vocabulary.pkl'), 'rb'))
model_path = config_data['output_path']
if not os.path.exists(model_path):
os.makedirs(model_path)
#== == == == == == =
# Load all the Data
#== == == == == == =
delimiter = ''
noutputs = 9 #+ get_number_outputs(config_data)
logging.info('Load Training Data')
train_input, train_output, train_weights, train_lex = load_text_gen_data(
join(tweets_path, 'trainset.csv'),
join(tweets_path, 'train_lex_features.csv'),
join(tweets_path, 'train_lex_features_tree.json'),
config_data,
vocab,
noutputs,
word_based=False
)
logging.info('Load Validation Data')
valid_input, valid_output, _, valid_lex = load_text_gen_data(
join(tweets_path, 'devset.csv'),
join(tweets_path, 'devset_lex_features.csv'),
join(tweets_path, 'dev_lex_features_tree.json'),
config_data,
vocab,
noutputs,
word_based=False
)
logging.info('Load Output Validation Data')
valid_dev_input, valid_dev_output, _, valid_dev_lex = load_text_gen_data(
join(tweets_path, 'devset_reduced.csv'),
join(tweets_path, 'devset_lex_features.csv'),
join(tweets_path, 'dev_lex_features_tree.json'),
config_data,
vocab,
noutputs,
random_output=True,
word_based=False,
random_first_word=True,
)
valid_dev_input3, valid_dev_output3, _, valid_dev_lex3 = load_text_gen_data(
join(tweets_path, 'test_e2e.csv'),
join(tweets_path, 'devset_lex_features.csv'),
join(tweets_path, 'dev_lex_features_tree.json'),
config_data,
vocab,
noutputs,
random_output=True,
word_based=False,
random_first_word=True
)
step = K.variable(1., name='step_varialbe')
steps_per_epoch = ceil(train_output[0].shape[0] / config_data['batch_size'])
# == == == == == == == == == == =
# Define and load the CNN model
# == == == == == == == == == == =
train_model, test_model, discriminator_models = vae_model(config_data, vocab, step)
with open(os.path.join(log_path, 'models.txt'), 'wt') as fh:
fh.write('VAE Model Train\n')
fh.write('---------\n')
train_model.summary(print_fn=lambda x: fh.write(x + '\n'))
fh.write('VAE Model Test\n')
fh.write('--------------\n')
test_model.summary(print_fn=lambda x: fh.write(x + '\n'))
fh.write('Discriminator Models\n')
for discriminator in discriminator_models:
fh.write('{}\n'.format(discriminator.name))
fh.write('---------------------------\n')
discriminator.summary(print_fn=lambda x: fh.write(x + '\n'))
optimizer = Adadelta(lr=1, epsilon=1e-8, rho=0.95, decay=0.0001, clipnorm=10)
train_model.compile(optimizer=optimizer, loss=lambda y_true, y_pred: y_pred, )
disX_train = train_input[-1] # take input 8 as train input and the rest as targets
disy_train = train_input[:-1] # take input 1-7 as targets
if config_data.get('pretrain_dirscr', 1) == 1:
for i, discriminator in enumerate(discriminator_models):
discriminator.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy']) #output of the discriminator model are the outputs -> specifiy cross_entropy as loss
if discriminator.name == 'nsent':
last_ones_idx = last_one(disy_train[i])
y_train = np.zeros_like(disy_train[i])
for j in range(y_train.shape[0]):
lidx = last_ones_idx[j]
y_train[j][lidx] = 1.0
last_ones_idx = last_one(valid_input[i])
y_dev = np.zeros_like(valid_input[i])
for j in range(y_dev.shape[0]):
lidx = last_ones_idx[j]
y_dev[j][lidx] = 1.0
else:
y_train = disy_train[i]
y_dev = valid_input[i]
# == == == == == == == == =
# Pretrain Discriminators
# == == == == == == == == =
early_stopping = EarlyStopping(monitor='val_loss', patience=25, mode='min', verbose=1, min_delta=1e-6)
model_checkpoint = ModelCheckpoint(join(model_path, 'discr_weights_{}.hdf5'.format(discriminator.name)), save_weights_only=True, save_best_only=True, monitor='val_loss', mode='min', verbose=1)
logging.info('Pretrain the {} Discriminator'.format(discriminator.name))
history = discriminator.fit(
x=disX_train,
y=y_train,
validation_data=(valid_input[-1], y_dev),
epochs=1,
batch_size=1024,
callbacks=[early_stopping, model_checkpoint]
)
losses = history.history['loss']
val_losses = history.history['val_loss']
val_accs = history.history['val_acc']
for i, (loss, val_loss, val_acc) in enumerate(zip(losses, val_losses, val_accs)):
logging.info('Epoch: {} Loss: {} Val Loss: {} Val Acc: {}'.format(i, loss, val_loss, val_acc))
for i, discriminator in enumerate(discriminator_models):
logging.info('Loading the {} Discriminator'.format(discriminator.name))
model_weights = join(model_path, 'discr_weights_{}.hdf5'.format(discriminator.name))
discriminator.load_weights(model_weights)
terminate_on_nan = TerminateOnNaN()
model_checkpoint = ModelCheckpoint(join(model_path, 'weights.{epoch:02d}.hdf5'), period=15, save_weights_only=True)
tensorboard = TensorBoard(log_dir='logging/tensorboard', histogram_freq=0, write_grads=True, write_images=True)
step_callback = StepCallback(step, steps_per_epoch)
lex_output = LexOutputCallbackGAN(test_model, valid_dev_input, valid_dev_lex, 1, vocab, delimiter, fname='{}/test_output'.format(log_path))
#lex_output2 = LexOutputCallbackGAN(test_model, valid_dev_input2, valid_dev_lex2, 1, vocab, delimiter, fname='{}/test_output_random'.format(log_path))
lex_output3 = LexOutputCallbackGAN(test_model, valid_dev_input3, valid_dev_lex3, 1, vocab, delimiter, fname='{}/final_test_output_random'.format(log_path))
callbacks = [step_callback, tensorboard, lex_output, lex_output3, model_checkpoint, terminate_on_nan]
for i, discriminator in enumerate(discriminator_models):
set_trainability(discriminator, trainable=False)
logging.info('Train the Model')
train_model.fit(
x=train_input,
y=train_output,
epochs=epochs,
batch_size=batch_size,
validation_data=(valid_input, valid_output),
sample_weight=train_weights,
callbacks=callbacks
)
if __name__ == '__main__':
main(sys.argv[1:])