尝试使用Tensorflow自定义回调获取中间层预测时出现“未连接层,无输入返回”错误

时间:2020-06-30 10:01:50

标签: python tensorflow keras

在尝试使用自定义回调进行训练期间,访问模型中间层的预测。以下精简版的实际代码演示了此问题。

import tensorflow as tf
import numpy as np

class Model(tf.keras.Model):
    def __init__(self, input_shape=None, name="cus_model", **kwargs):
        super(Model, self).__init__(name=name, **kwargs)
        
    def build(self, input_shape):
        self.dense1 = tf.keras.layers.Dense(input_shape=input_shape, units=32)
        
    def call(self, input_tensor):
        return self.dense1(input_tensor)

class CustomCallback(tf.keras.callbacks.Callback):
    def on_epoch_end(self, epoch, logs=None):
        get_output = tf.keras.backend.function(
            inputs = self.model.layers[0].input,
            outputs = self.model.layers[0].output
        )
        print("Layer output: ",get_output.outputs)

X = np.ones((8,16))
y = np.sum(X, axis=1)

model = Model()
model.compile(optimizer='adam',loss='mean_squared_error', metrics='accuracy')
model.fit(X,y, epochs=8, callbacks=[CustomCallback()])

回调是按照此answer中的建议编写的。出现以下错误:

<ipython-input-3-635fd53dbffc> in on_epoch_end(self, epoch, logs)
     12     def on_epoch_end(self, epoch, logs=None):
     13         get_output = tf.keras.backend.function(
---> 14             inputs = self.model.layers[0].input,
     15             outputs = self.model.layers[0].output
     16         )
.
.
AttributeError: Layer dense is not connected, no input to return.

是什么原因造成的?如何解决?

3 个答案:

答案 0 :(得分:1)

我没问题运行它:

import tensorflow as tf
import numpy as np

X = np.ones((8,16))
y = np.sum(X, axis=1)


class CustomCallback(tf.keras.callbacks.Callback):
    def on_epoch_end(self, epoch, logs=None):
        get_output = tf.keras.backend.function(
            inputs = self.model.layers[0].input,
            outputs = self.model.layers[1].output # return output of first dense
        )
        print("\nLayer output: ", get_output(X))


inp = tf.keras.layers.Input((16,))
dense1 = tf.keras.layers.Dense(units=32)(inp)
dense2 = tf.keras.layers.Dense(units=20)(dense1)
model = tf.keras.Model(inp, dense2)

model.compile(optimizer='adam',loss='mean_squared_error', metrics='accuracy')
model.fit(X,y, epochs=8, callbacks=[CustomCallback()])

答案 1 :(得分:1)

由于相同的错误,我也无法获得self.layers[0].input,但是也许您可以像这样直接调用Model中定义的函数:

class Model(tf.keras.Model):
    def __init__(self, input_shape=None, name="cus_model", **kwargs):
        super(Model, self).__init__(name=name, **kwargs)
        if not input_shape:
            input_shape = (10,)
        self.dense1 = tf.keras.layers.Dense(input_shape=input_shape, units=32)
        self.dev_dataset = np.ones((8,16))

    def call(self, input_tensor):
        return self.dense1(input_tensor)


class CustomCallback(tf.keras.callbacks.Callback):
    def on_epoch_end(self, epoch, logs=None):
        self.model.call(self.model.dev_dataset)


X = np.ones((8,16))
y = np.sum(X, axis=1)

model = Model()
model.compile(optimizer='adam',loss='mean_squared_error', metrics='accuracy')
model.fit(X,y, epochs=1, callbacks=[CustomCallback()])

答案 2 :(得分:0)

我猜函数式 API子类化 tf.keras.Model 之间有一些区别。重写 Dense 也是必要的。至少,它现在正在发挥作用。知道原因的人请解释一下。

import tensorflow as tf
import numpy as np

class Model(tf.keras.Model):
    def __init__(self, inputs=None, name="cus_model", **kwargs):
        super(Model, self).__init__(name=name, **kwargs)
        self.inputs= inputs
        self.output_dim = 8

    def build(self, input_shape):
        #tf.keras.layers.Dense(input_shape=input_shape, units=32)
        self.dense1 = self.add_weight(name='weight_vector', shape=(input_shape[1], self.output_dim),
                                      initializer='glorot_uniform', trainable=True,
                                      regularizer=None)

    def call(self, input_tensor):
        return tf.matmul(self.dense1, input_tensor)

class CustomCallback(tf.keras.callbacks.Callback):
    def on_epoch_end(self, epoch, logs=None):
        get_output = tf.keras.backend.function(
            inputs = self.model.layers[0].input,
            outputs = self.model.layers[0].output
        )
        print("Layer output: ",get_output.outputs)

X = np.ones((10,8,16))
y = np.sum(X, axis=1)

print(X.shape,y.shape)

inp = tf.keras.layers.Input((8, 16))
model = Model(inp)
# model.compile(optimizer='adam',loss='mean_squared_error', metrics='accuracy')
# model.fit(X,y, epochs=8, callbacks=[CustomCallback()])

optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
loss_fn = tf.keras.losses.BinaryCrossentropy(from_logits=False)

for i in range(X.shape[0]):
    with tf.GradientTape() as tape:
        out = model(X[i, :])
        label = tf.cast(y[i], dtype=tf.float32)
        loss = loss_fn(label, out)
        grads = tape.gradient(loss, model.trainable_weights)
        optimizer.apply_gradients(zip(grads, model.trainable_weights))
        print("step: ", i, "loss:", loss.numpy())