-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathsiamese_attentive_conv.py
48 lines (36 loc) · 1.37 KB
/
siamese_attentive_conv.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
from keras import Input
from keras.models import Model
from keras.layers import Conv1D, GlobalMaxPooling1D, Activation, dot, Lambda
from attention_layer import AttentionLayer
import keras.backend as K
# from keras examples
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
def eucl_dist_output_shape(shapes):
shape1, shape2 = shapes
return (shape1[0], 1)
input = Input(shape=(None,300), dtype='float32')
conv_4 = Conv1D(300,
4,
padding='same',
activation='relu',
strides=1)(input)
shared = Model(input, conv_4)
input_1 = Input(shape=(None, 300), dtype='float32')
input_2 = Input(shape=(None, 300), dtype='float32')
out_1 = shared(input_1)
out_2 = shared(input_2)
attention = AttentionLayer()([out_1,out_2])
# out_1 column wise
att_1 = GlobalMaxPooling1D()(attention)
att_1 = Activation('softmax')(att_1)
out_1 = dot([att_1, out_1], axes=1)
# out_2 row wise
attention_transposed = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(attention)
att_2 = GlobalMaxPooling1D()(attention_transposed)
att_2 = Activation('softmax')(att_2)
out_2 = dot([att_2, out_2], axes=1)
distance = Lambda(euclidean_distance,
output_shape=eucl_dist_output_shape)([out_1, out_2])
model = Model(input=[input_1, input_2], output=distance)