ValueError:没有为任何可变自定义损失提供梯度

时间:2020-12-28 17:15:17

标签: machine-learning deep-learning

我有以下代码告诉我“Tensorflow 没有为任何变量提供梯度”

这里可以找到可以执行的代码:

Reproducible Code


import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Flatten, Dense, Dropout, Lambda
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.datasets import fashion_mnist
from tensorflow.python.keras.utils.vis_utils import plot_model
from tensorflow.keras import backend as K

num_classes = 2
pseudo_lab_class = 1280

input = Input(shape=(224, 224,3), name="base_input")
x = tf.keras.layers.experimental.preprocessing.Rescaling(1./255, input_shape=(img_height, img_width, 3))(input)
x = tf.keras.layers.Conv2D(16, 3, padding='same', activation='relu')(x)
x = tf.keras.layers.MaxPooling2D()(x)
x = tf.keras.layers.Conv2D(32, 3, padding='same', activation='relu')(x)
x = tf.keras.layers.MaxPooling2D()(x)
x = tf.keras.layers.Conv2D(64, 3, padding='same', activation='relu')(x)
x = tf.keras.layers.MaxPooling2D()(x)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(128, activation='relu')(x)
y = tf.keras.layers.Dense(num_classes,activation='softmax')(x)

y2 = tf.keras.layers.Dense(pseudo_lab_class, activation='softmax')(x)
x2 = tf.keras.layers.experimental.preprocessing.Rescaling(1./255, input_shape=(img_height, img_width, 3))(input)
x2 = tf.keras.applications.mobilenet_v2.preprocess_input(x2)

x2 = pretrainmodel(x2)
#x2 = aux_model(x2, training=False)

x2 = tf.keras.layers.Dropout(0.2)(x2)
y_pseudo_lab = tf.keras.layers.Dense(pseudo_lab_class)(x2)

concat = tf.keras.layers.Concatenate()([y,y2,y_pseudo_lab])

archi = Model(inputs=input, outputs=concat)
archi.compile(loss=custom_loss(0.5), optimizer='adam',metrics=['accuracy'])


archi.fit(
  train_list_ds,
  validation_data=val_ds,
  epochs=3
)


1 个答案:

答案 0 :(得分:1)

对于梯度问题,您忘记了返回损失,这就是原因。

def custom_loss(lam):
  def loss(y_true, y_pred):
    # Do your loss on your subset
    print(y_true) 
    print(y_pred)
    y_pred_n_aux, y_pred_aux, pseudo_lables  = y_pred[:, :1], y_pred[:, 1:1280], y_pred[:, 1280:]
    l1 = tf.nn.softmax_cross_entropy_with_logits(labels=y_pred_n_aux, logits=y_true)
    l2 = tf.nn.softmax_cross_entropy_with_logits(labels=y_pred_aux,logits=pseudo_lables)
    return l1 + lam*l2
  return loss