TensorFloed 更新导致回调故障,

时间:2020-12-23 01:47:29

标签: python tensorflow machine-learning keras deep-learning

以下代码包含一个带有回调的 LSTM 模型,一旦训练集准确率达到 90%,就会停止训练。

当我使用 tensorflow-gpu=1.14 时它工作正常。


import tensorflow as tf
from tensorflow.keras.layers import Dense, Dropout,Bidirectional,Masking,LSTM
from keras_self_attention import SeqSelfAttention

def duo_bi_LSTM_model(X_train, y_train, X_test, y_test, num_classes, loss,batch_size=68, units=128, learning_rate=0.005,epochs=20, dropout=0.2,recurrent_dropout=0.2,optimizer='Adam'):
    
    
    class myCallback(tf.keras.callbacks.Callback):
        def on_epoch_end(self, epoch, logs={}):
            if (logs.get('acc') > 0.90):
                print("\nReached 90% accuracy so cancelling training!")
                self.model.stop_training = True
                     
    callbacks = myCallback()

    
    adamopt = tf.keras.optimizers.Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
    RMSopt = tf.keras.optimizers.RMSprop(lr=learning_rate, rho=0.9, epsilon=1e-6)        
    
    model = tf.keras.models.Sequential()
    model.add(Masking(mask_value=0.0, input_shape=(X_train.shape[1], X_train.shape[2])))
    model.add(Bidirectional(
        LSTM(units, dropout=dropout, recurrent_dropout=recurrent_dropout, return_sequences=True)))
    model.add(Bidirectional(LSTM(units, dropout=dropout, recurrent_dropout=recurrent_dropout)))
    model.add(Dense(num_classes, activation='softmax'))

    opt = opt_select(optimizer)
    model.compile(loss=loss,
                  optimizer=opt,
                  metrics=['accuracy'])

    history = model.fit(X_train, y_train,
                        batch_size=batch_size,
                        epochs=epochs,
                        validation_data=(X_test, y_test),
                        verbose=1,
                        callbacks=[callbacks])

    score, acc = model.evaluate(X_test, y_test,
                                batch_size=batch_size)

    return history, that


X_train = np.random.rand(700, 50,34)
y_train = np.random.choice([0, 1], 700)
X_test = np.random.rand(100, 50, 34)
y_test = np.random.choice([0, 1], 100)

batch_size= 217
epochs = 600
dropout = 0.6
Rdropout = 0.7
learning_rate = 0.00001
optimizer = 'RMS'
loss = 'categorical_crossentropy'
num_classes = y_train.shape[1]

duo_bi_LSTM_his,yhat = duo_bi_LSTM_model(X_train,y_train,X_test,y_test,loss =loss,num_classes=num_classes,batch_size=batch_size,units=32,learning_rate=learning_rate,epochs=epochs,dropout = 0.5,recurrent_dropout=Rdropout,optimizer=optimizer)

将 TensorFlow 更新到 2.2 后出现以下错误。

~/Speech/Feature_engineering/LXRmodels.py in on_epoch_end(self, epoch, logs)
    103     class myCallback(tf.keras.callbacks.Callback):
    104         def on_epoch_end(self, epoch, logs={}):
--> 105             if (logs.get('acc') > 0.90):
    106                 print("\nReached 90% accuracy so cancelling training!")
    107                 self.model.stop_training = True

TypeError: '>' not supported between instances of 'NoneType' and 'float'

是不是因为 TensorFlow 的更新导致了这个问题?代码应该怎么改?

1 个答案:

答案 0 :(得分:0)

你需要更换

logs.get('acc')

logs.get('accuracy')