我一直在尝试定制LSTM层以进一步改进。但是自定义LSTM之后,在合并层出现了一个正常的错误。
我的环境是:
回溯(最近通话最近):
中的文件“ E:/PycharmProjects/dialogResearch/dialog/classifier.py”,第60行 模型= build_model(word_dict,args.max_len,args.max_sents,args.embedding_dim)
文件“ E:\ PycharmProjects \ dialogResearch \ dialog \ model \ keras_himodel.py”, 第177行,在build_model
中 l_dense = TimeDistributed(Dense(200))(l_lstm)
文件“ C:\ ProgramData \ Anaconda3 \ lib \ site-packages \ keras \ engine \ topology.py”, 第592行,致电
self.build(input_shapes [0])
文件“ C:\ ProgramData \ Anaconda3 \ lib \ site-packages \ keras \ layers \ wrappers.py”, 第162行,在构建中
断言len(input_shape)> = 3
AssertionError
我的自定义LSTM的代码是:
class CustomLSTM(Layer):
def __init__(self, output_dim, return_sequences, **kwargs):
self.init = initializers.get('normal')
# self.input_spec = [InputSpec(ndim=3)]
self.output_dim = output_dim
self.return_sequences = return_sequences
super(CustomLSTM, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape) == 3
self.original_shape = input_shape
self.Wi = self.add_weight('Wi', (input_shape[-1], self.output_dim), initializer=self.init, trainable=True)
self.Wf = self.add_weight('Wf', (input_shape[-1], self.output_dim), initializer=self.init, trainable=True)
self.Wo = self.add_weight('Wo', (input_shape[-1], self.output_dim), initializer=self.init, trainable=True)
self.Wu = self.add_weight('Wu', (input_shape[-1], self.output_dim), initializer=self.init, trainable=True)
self.Ui = self.add_weight('Ui', (self.output_dim, self.output_dim), initializer=self.init, trainable=True)
self.Uf = self.add_weight('Uf', (self.output_dim, self.output_dim), initializer=self.init, trainable=True)
self.Uo = self.add_weight('Uo', (self.output_dim, self.output_dim), initializer=self.init, trainable=True)
self.Uu = self.add_weight('Uu', (self.output_dim, self.output_dim), initializer=self.init, trainable=True)
self.bi = self.add_weight('bi', (self.output_dim,), initializer=self.init, trainable=True)
self.bf = self.add_weight('bf', (self.output_dim,), initializer=self.init, trainable=True)
self.bo = self.add_weight('bo', (self.output_dim,), initializer=self.init, trainable=True)
self.bu = self.add_weight('bu', (self.output_dim,), initializer=self.init, trainable=True)
super(CustomLSTM, self).build(input_shape)
def step_op(self, step_in, states):
i = K.softmax(K.dot(step_in, self.Wi) + K.dot(states[0], self.Ui) + self.bi)
f = K.softmax(K.dot(step_in, self.Wf) + K.dot(states[0], self.Uf) + self.bf)
o = K.softmax(K.dot(step_in, self.Wo) + K.dot(states[0], self.Uo) + self.bo)
u = K.tanh(K.dot(step_in, self.Wu) + K.dot(states[0], self.Uu) + self.bu)
c = i * u + f * states[1]
h = o * K.tanh(c)
return h, [h, c]
def call(self, x, mask=None):
init_states = [K.zeros((K.shape(x)[0], self.output_dim)),
K.zeros((K.shape(x)[0], self.output_dim))]
outputs = K.rnn(self.step_op, x, init_states)
if self.return_sequences:
return outputs[1]
else:
return outputs[0]
def compute_output_shape(self, input_shape):
return input_shape[0], input_shape[-1]
模型是:
def build_model(words, max_len, max_sents, embedding_dim):
sentence_input = Input(shape=(max_len,), dtype='int32')
embedding_layer = Embedding(len(words) + 1,
embedding_dim,
input_length=max_len,
trainable=True)
embedded_sequences = embedding_layer(sentence_input)
l_lstm = CustomLSTM(200, return_sequences=True)(embedded_sequences)
print(l_lstm.get_shape())
l_dense = TimeDistributed(Dense(200))(l_lstm)
l_att = AttLayer()(l_dense)
sentEncoder = Model(sentence_input, l_att)
review_input = Input(shape=(max_sents, max_len), dtype='int32')
review_encoder = TimeDistributed(sentEncoder)(review_input)
l_lstm_sent = CustomLSTM(200, return_sequences=True)(review_encoder)
l_dense_sent = TimeDistributed(Dense(200))(l_lstm_sent)
l_att_sent = AttLayer()(l_dense_sent)
preds = Dense(3, activation='softmax')(l_att_sent)
model = Model(review_input, preds)
optimizer = Nadam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004)
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=[precision, recall, f1, 'acc'])
return model
感谢您的帮助。
答案 0 :(得分:0)
我认为是由于compute_output_shape
不正确时return_sequences=True
返回的形状而发生错误。我会尝试以下方法:
def compute_output_shape(self, input_shape):
if self.return_sequences:
return input_shape
return (input_shape[0], input_shape[-1])