-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathhierarchical_attention_network.py
53 lines (44 loc) · 1.96 KB
/
hierarchical_attention_network.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
from keras import backend as K
from keras.models import Model
from keras import initializers
from keras.layers import Dense, Input, Layer, Attention
from keras.layers import Embedding, GRU, Bidirectional, TimeDistributed
from keras.preprocessing.text import Tokenizer, text_to_word_sequence
class HanAttentionLayer(Layer):
def __init__(self, attention_dim):
# self.init = initializers.get('normal')
self.init = initializers.RandomNormal()
self.supports_masking = True
self.attention_dim = attention_dim
super(HanAttentionLayer, self).__init__()
def get_config(self):
config = super().get_config().copy()
config.update({
'attention_dim': self.attention_dim,
'supports_masking': self.supports_masking
})
return config
def build(self, input_shape):
assert len(input_shape) == 3
self.W = K.variable(self.init((input_shape[-1], self.attention_dim)))
self.b = K.variable(self.init((self.attention_dim,)))
self.u = K.variable(self.init((self.attention_dim, 1)))
self._trainable_weights = [self.W, self.b, self.u]
super(HanAttentionLayer, self).build(input_shape)
def compute_mask(self, inputs, mask=None):
return mask
def call(self, x, mask=None):
# size of x :[batch_size, sel_len, attention_dim]
# size of u :[batch_size, attention_dim]
# uit = tanh(xW+b)
uit = K.tanh(K.bias_add(K.dot(x, self.W), self.b))
ait = K.exp(K.squeeze(K.dot(uit, self.u), -1))
if mask is not None:
# Cast the mask to floatX to avoid float64 upcasting
ait *= K.cast(mask, K.floatx())
ait /= K.cast(K.sum(ait, axis=1, keepdims=True) + K.epsilon(), K.floatx())
weighted_input = x * K.expand_dims(ait)
output = K.sum(weighted_input, axis=1)
return output
def compute_output_shape(self, input_shape):
return input_shape[0], input_shape[-1]