AttributeError:在实施共同注意层期间,“ Tensor”对象没有属性“ _keras_history”

时间:2018-07-16 10:33:19

标签: python keras nlp deep-learning artificial-intelligence

大家。我正在尝试为匹配任务自定义共同注意层。而且有一个错误使我很困惑。

model = Model(inputs=[ans_input, ques_input], outputs=output)

运行上面的代码时我的程序关闭。然后它会抛出  错误

AttributeError: 'Tensor' object has no attribute '_keras_history'

这意味着我的模型不能是完整的图形。所以我尝试了很多在StackOverflow和其他博客中找到的方法。但是所有这些都不起作用。 :(
我将在下面粘贴我的模型。谢谢您的帮助:)

import time
from keras.layers import Embedding, LSTM, TimeDistributed, Lambda
from keras.layers.core import *
from keras.layers.merge import concatenate
from keras.layers.pooling import GlobalMaxPooling1D
from keras.models import *
from keras.optimizers import *
from dialog.keras_lstm.k_call import *
from dialog.model.keras_himodel import ZeroMaskedEntries, logger


class Co_AttLayer(Layer):
    def __init__(self, **kwargs):
        # self.input_spec = [InputSpec(ndim=3)]
        super(Co_AttLayer, self).__init__(**kwargs)

    def build(self, input_shape):
        assert len(input_shape) == 2
        assert len(input_shape[0]) == len(input_shape[1])
        super(Co_AttLayer, self).build(input_shape)

    def cosine_sim(self, x):
        ans_ss = K.sum(K.square(x[0]), axis=2, keepdims=True)
        ans_norm = K.sqrt(K.maximum(ans_ss, K.epsilon()))

        ques_ss = K.sum(K.square(x[1]), axis=2, keepdims=True)
        ques_norm = K.sqrt(K.maximum(ques_ss, K.epsilon()))
        tr_ques_norm = K.permute_dimensions(ques_norm, (0, 2, 1))

        tr_ques = K.permute_dimensions(x[1], (0, 2, 1))
        ss = K.batch_dot(x[0], tr_ques, axes=[2, 1])

        den = K.batch_dot(ans_norm, tr_ques_norm, axes=[2, 1])
        return ss / den

    def call(self, x, mask=None):
        cosine = Lambda(self.cosine_sim)(x)
        coqWij = K.softmax(cosine)
        print(x[1].shape, coqWij.shape)
        ai = K.dot(coqWij, x[1])  # (N A Q) (N Q L)
        coaWij = K.softmax(K.permute_dimensions(cosine, (0, 2, 1)))
        qj = K.dot(coaWij, x[0])
        print(qj.shape, ai.shape)
        return concatenate([ai, qj], axis=2)

    def compute_output_shape(self, input_shape):
        return input_shape


def build_QAmatch_model(opts, vocab_size=0, maxlen=300, embedd_dim=50, init_mean_value=None):

    ans_input = Input(shape=(maxlen,), dtype='int32', name='ans_input')
    ques_input = Input(shape=(maxlen,), dtype='int32', name='ques_input')
    embedding = Embedding(output_dim=embedd_dim, input_dim=vocab_size, input_length=maxlen,
                          mask_zero=True, name='embedding')
    dropout = Dropout(opts.dropout, name='dropout')
    lstm = LSTM(opts.lstm_units, return_sequences=True, name='lstm')
    hidden_layer = Dense(units=opts.hidden_units, name='hidden_layer')
    output_layer = Dense(units=1, name='output_layer')
    zme = ZeroMaskedEntries(name='maskedout')
    ans_maskedout = zme(embedding(ans_input))
    ques_maskedout = zme(embedding(ques_input))
    ans_lstm = lstm(dropout(ans_maskedout))  # (A V)
    ques_lstm = lstm(dropout(ques_maskedout))  # (Q V)

    co_att = Co_AttLayer()([ans_lstm, ques_lstm])

    def slice(x, index):

        return x[:, :, index, :]

    ans_att = Lambda(slice, output_shape=(maxlen, embedd_dim), arguments={'index': 0})(co_att)
    ques_att = Lambda(slice, output_shape=(maxlen, embedd_dim), arguments={'index': 1})(co_att)
    merged_ques = concatenate([ques_lstm, ques_att, ques_maskedout], axis=2)
    merged_ans = concatenate([ans_lstm, ans_att, ans_maskedout], axis=2)
    ans_vec = GlobalMaxPooling1D(name='ans_pooling')(merged_ans)
    ques_vec = GlobalMaxPooling1D(name='ques_pooling')(merged_ques)
    ans_hid = hidden_layer(ans_vec)
    ques_hid = hidden_layer(ques_vec)
    merged_hid = concatenate([ans_hid, ques_hid], axis=-1)
    merged_all = concatenate([merged_hid, ans_hid + ques_hid, ans_hid - ques_hid, K.abs(ans_hid - ques_hid)], axis=-1)
    output = output_layer(merged_all)
    model = Model(inputs=[ans_input, ques_input], outputs=output)
    if init_mean_value:
        logger.info("Initialise output layer bias with log(y_mean/1-y_mean)")
        bias_value = (np.log(init_mean_value) - np.log(1 - init_mean_value)).astype(K.floatx())
        model.layers[-1].b.set_value(bias_value)

    if verbose:
        model.summary()

    start_time = time.time()
    model.compile(loss='mse', optimizer='rmsprop')
    total_time = time.time() - start_time
    logger.info("Model compiled in %.4f s" % total_time)

    return model

1 个答案:

答案 0 :(得分:0)

我无法复制您的代码,但我认为错误发生在这里:

merged_all = concatenate([merged_hid, ans_hid + ques_hid, ans_hid - ques_hid,
                          K.abs(ans_hid - ques_hid)], axis=-1)

后端操作+-K.abs未包装在Lambda层中,因此生成的张量不是Keras张量,因此缺少某些属性,例如为_keras_history。您可以将它们包装如下:

l1 = Lambda(lambda x: x[0] + x[1])([ans_hid, ques_hid])
l2 = Lambda(lambda x: x[0] - x[1])([ans_hid, ques_hid])
l3 = Lambda(lambda x: K.abs(x[0] - x[1]))([ans_hid, ques_hid])

merged_all = concatenate([merged_hid, l1, l2, l3], axis=-1)

注意:未测试。