Unhashable Type TypeError: Tensor is unhashable。相反,使用 tensor.ref() 作为关键

时间:2021-02-06 06:50:02

标签: tensorflow keras lstm autoencoder

我正在尝试实现自定义变分自动编码器。以下是重现的代码。

epsilon_std = 1.0

vx = tf.keras.layers.Input(batch_shape=(None, max_length_output), name='vae_enc_in')

vx_emb = tf.keras.layers.Embedding(
    vocab_tar_size,
    embedding_dim,
    input_length=max_length_output,
    name='vae_enc_emb'
)(vx)

vxbi = tf.keras.layers.Bidirectional(
    tf.keras.layers.LSTM(units, return_sequences=False, recurrent_dropout=0.2, name='vae_enc_lstm'), merge_mode='concat'
)(vx_emb)

vx_drop = tf.keras.layers.Dropout(0.2, name='vae_enc_drop')(vxbi)

vx_dense = tf.keras.layers.Dense(units, activation='linear', name='vae_enc_dense')(vx_drop)

vx_elu = tf.keras.layers.ELU(name='vae_enc_elu')(vx_dense)

vx_drop1 = tf.keras.layers.Dropout(0.2, name='vae_enc_drop2')(vx_elu)
z_mean = tf.keras.layers.Dense(20, name='vae_enc_dense2')(vx_drop1)
z_log_var = tf.keras.layers.Dense(20, name='vae_enc_dense3')(vx_drop1)

def sampling(args):
    z_mean, z_log_var = args
    epsilon = tf.random.normal(shape=(BATCH_SIZE, 20), mean=0.,
                              stddev=epsilon_std)
    return z_mean + tf.math.exp(z_log_var / 2) * epsilon


z = tf.keras.layers.Lambda(sampling, output_shape=(20,), name='vae_lambda')([z_mean, z_log_var])

repeated_context = tf.keras.layers.RepeatVector(max_length_output, name='vae_repeat')
decoder_h = tf.keras.layers.LSTM(units, return_sequences=True, recurrent_dropout=0.2, name='vae_dec_lstm')
decoder_mean = tf.keras.layers.TimeDistributed(
    tf.keras.layers.Dense(vocab_tar_size, activation='linear', name='vae_dec_lstm'),
    name='vae_dec_time_dist'
)

h_decoded = decoder_h(repeated_context(z))
x_decoded_mean = decoder_mean(h_decoded)

def zero_loss(y_true, y_pred):
    print("ZERO LOSS")
    return tf.zeros_like(y_pred)

然后创建自定义 vae 层

class VAELayer(tf.keras.layers.Layer):
    def __init__(self, batch_size, max_len, **kwargs):
        self.is_placeholder = True
        super(VAELayer, self).__init__(**kwargs)
        self.target_weights = tf.constant(np.ones((batch_size, max_len)), tf.float32)
    
    def vae_loss(self, x, x_decoded_mean):
        #xent_loss = K.sum(metrics.categorical_crossentropy(x, x_decoded_mean), axis=-1)
        labels = tf.cast(x, tf.int32)
        xent_loss = tf.math.reduce_sum(
                tfa.seq2seq.sequence_loss(
                    x_decoded_mean, 
                    labels, 
                    weights=self.target_weights,
                    average_across_timesteps=False,
                    average_across_batch=False
                ), 
                axis=-1
        )
        #softmax_loss_function=softmax_loss_f), axis=-1)#, for sampled softmax
        kl_loss = - 0.5 * tf.math.reduce_sum(1 + z_log_var - tf.math.square(z_mean) - tf.math.exp(z_log_var), axis=-1)
        return tf.math.reduce_mean(xent_loss + kl_loss)
    
    def call(self, inputs):
        x = inputs[0]
        x_decoded_mean = inputs[1]
        print(x.shape, x_decoded_mean.shape)
        loss = self.vae_loss(x, x_decoded_mean)
        print("Adding loss")
        self.add_loss(loss, inputs=inputs)
        print("Returning ones like")
        return tf.ones_like(x)

我编译成功,还通过调用模型生成了测试输出。但是当我尝试训练时,它会产生错误 TypeError: Tensors are unhashable. (KerasTensor(type_spec=TensorSpec(shape=(), dtype=tf.float32, name=None), name='tf.math.reduce_sum_25/Sum:0', description="created by layer 'tf.math.reduce_sum_25'"))Instead, use tensor.ref() as the key.

以下是编译拟合模型的代码

loss_layer = VAELayer(BATCH_SIZE, max_length_output)([vx, x_decoded_mean])
vae = tf.keras.Model(vx, [loss_layer], name='VariationalAutoEncoderLayer')
opt = tf.keras.optimizers.Adam(lr=0.01) #SGD(lr=1e-2, decay=1e-6, momentum=0.9, nesterov=True)
vae.compile(optimizer=opt, loss=[zero_loss])
def vae_sentence_generator():
    for ip, tg in train_dataset:
        yield tg.numpy()
vae.fit(vae_sentence_generator(steps_per_epoch=steps_per_epoch, epochs=10))

0 个答案:

没有答案