-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathbi-lstm.py
71 lines (54 loc) · 2.13 KB
/
bi-lstm.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Embedding, BatchNormalization, Dropout
from keras.layers import LSTM, Bidirectional
from keras.datasets import imdb
from keras.callbacks import ModelCheckpoint
import numpy as np
max_features = 20000
maxlen = 80 # cut texts after this number of words (among top max_features most common words)
batch_size = 32
print('Loading data...')
(x_train, y_train_tmp), (x_test, y_test_tmp) = imdb.load_data(num_words=max_features)
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')
print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
print('one-hot encoding')
#one-hot encoding, numpy only
y_train = np.zeros((len(y_train_tmp),2))
y_test = np.zeros((len(y_test_tmp),2))
y_train[np.arange(len(y_train_tmp)),y_train_tmp] = 1
y_test[np.arange(len(y_test_tmp)),y_test_tmp] = 1
print('Build model...')
model = Sequential()
model.add(Embedding(max_features, 128))
model.add(BatchNormalization())#maybe
model.add(Bidirectional(LSTM(128, dropout=0.2,
recurrent_dropout=0.2,
return_sequences=True)))
model.add(Bidirectional(LSTM(32)))#remove for faster training()
model.add(Dropout(0.5))
model.add(Dense(2, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# checkpoint
filepath="weights-improvement-{epoch:02d}-{val_acc:.2f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
print('Train...')
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=3,
validation_data=(x_test, y_test),
callbacks=callbacks_list,
verbose = 0
)
score, acc = model.evaluate(x_test, y_test,
batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc)