gpt4 book ai didi

python - Keras:如何在 LSTM 模型中显示注意力权重

转载 作者:行者123 更新时间:2023-12-02 03:29:53 26 4
gpt4 key购买 nike

我使用带有注意层的 LSTM 制作了一个文本分类模型。我的模型做得很好,效果很好,但我无法显示评论(输入文本)中每个单词的注意力权重和重要性/注意力。该模型使用的代码是:

def dot_product(x, kernel):
if K.backend() == 'tensorflow':
return K.squeeze(K.dot(x, K.expand_dims(kernel)),axis=-1)
else:
return K.dot(x, kernel)

class AttentionWithContext(Layer):
"""
Attention operation, with a context/query vector, for temporal data.

"Hierarchical Attention Networks for Document Classification"
by using a context vector to assist the attention
# Input shape
3D tensor with shape: (samples, steps, features).
# Output shape
2D tensor with shape: (samples, features).
How to use:
Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True.
The dimensions are inferred based on the output shape of the RNN.
Note: The layer has been tested with Keras 2.0.6
Example:
model.add(LSTM(64, return_sequences=True))
model.add(AttentionWithContext())
# next add a Dense layer (for classification/regression) or whatever
"""

def __init__(self,
W_regularizer=None, u_regularizer=None, b_regularizer=None,
W_constraint=None, u_constraint=None, b_constraint=None,
bias=True, **kwargs):

self.supports_masking = True
self.init = initializers.get('glorot_uniform')

self.W_regularizer = regularizers.get(W_regularizer)
self.u_regularizer = regularizers.get(u_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)

self.W_constraint = constraints.get(W_constraint)
self.u_constraint = constraints.get(u_constraint)
self.b_constraint = constraints.get(b_constraint)

self.bias = bias
super(AttentionWithContext, self).__init__(**kwargs)

def build(self, input_shape):
assert len(input_shape) == 3

self.W = self.add_weight((input_shape[-1], input_shape[-1],),
initializer=self.init,
name='{}_W'.format(self.name),
regularizer=self.W_regularizer,
constraint=self.W_constraint)
if self.bias:
self.b = self.add_weight((input_shape[-1],),
initializer='zero',
name='{}_b'.format(self.name),
regularizer=self.b_regularizer,
constraint=self.b_constraint)

self.u = self.add_weight((input_shape[-1],),
initializer=self.init,
name='{}_u'.format(self.name),
regularizer=self.u_regularizer,
constraint=self.u_constraint)

super(AttentionWithContext, self).build(input_shape)

def compute_mask(self, input, input_mask=None):
# do not pass the mask to the next layers
return None

def call(self, x, mask=None):
uit = dot_product(x, self.W)

if self.bias:
uit += self.b

uit = K.tanh(uit)
ait = dot_product(uit, self.u)

a = K.exp(ait)

# apply mask after the exp. will be re-normalized next
if mask is not None:
# Cast the mask to floatX to avoid float64 upcasting in theano
a *= K.cast(mask, K.floatx())

# in some cases especially in the early stages of training the sum may be almost zero
# and this results in NaN's. A workaround is to add a very small positive number ε to the sum.
# a /= K.cast(K.sum(a, axis=1, keepdims=True), K.floatx())
a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())

a = K.expand_dims(a)
weighted_input = x * a
return K.sum(weighted_input, axis=1)

def compute_output_shape(self, input_shape):
return input_shape[0], input_shape[-1]


EMBEDDING_DIM=100
max_seq_len=118
bach_size = 256
num_epochs=50
from keras.models import Model
from keras.layers import Dense, Embedding, Input
from keras.layers import LSTM, Bidirectional, Dropout


def BidLstm():
#inp = Input(shape=(118,100))
#x = Embedding(max_features, embed_size, weights=[embedding_matrix],
#trainable=False)(inp)
model1=Sequential()
model1.add(Dense(512,input_shape=(118,100)))
model1.add(Activation('relu'))
#model1.add(Flatten())
#model1.add(BatchNormalization(input_shape=(100,)))
model1.add(Bidirectional(LSTM(100, activation="relu",return_sequences=True)))
model1.add(Dropout(0.1))
model1.add(TimeDistributed(Dense(200)))
model1.add(AttentionWithContext())
model1.add(Dropout(0.25))
model1.add(Dense(4, activation="softmax"))
model1.compile(loss='sparse_categorical_crossentropy', optimizer='adam',
metrics=['accuracy'])
model1.summary()
return model1

最佳答案

请在此处查看 github 存储库:https://github.com/FlorisHoogenboom/keras-han-for-docla

首先明确定义注意力层中的权重计算第二步提取前几层输出和注意力层权重,然后将其相乘作为单词注意力权重

关于python - Keras:如何在 LSTM 模型中显示注意力权重,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/52152054/

26 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com