似乎keras无法在Google colab中正确保存优化程序。保存并重新加载模型后,重新加载模型的丢失与原始模型完全不同。
尽管keras的文档中说model.save会保存优化器的状态,但是在某些特殊情况下,它不会保存优化器的整个状态吗?
keras == 2.2.5
backend == tensorflow
tensorflow == 1.14.0
这是我的代码。
from sklearn.model_selection import train_test_split
x_train, x_valid, y_train, y_valid = train_test_split(X, Y, test_size=0.3, shuffle= True)
import keras.optimizers as ko
optimizer = ko.Nadam()
model.compile(loss='mean_squared_error', optimizer=optimizer, metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=500, validation_data=(x_valid, y_valid), batch_size=64)
#output
"""
Epoch 495/500
1750/1750 [==============================] - 16s 9ms/step - loss: 0.9285 - acc: 1.0000 - val_loss: 4.0664 - val_acc: 1.0000
Epoch 496/500
1750/1750 [==============================] - 16s 9ms/step - loss: 1.8576 - acc: 1.0000 - val_loss: 1.7988 - val_acc: 1.0000
Epoch 497/500
1750/1750 [==============================] - 16s 9ms/step - loss: 0.8660 - acc: 1.0000 - val_loss: 1.4999 - val_acc: 1.0000
Epoch 498/500
1750/1750 [==============================] - 16s 9ms/step - loss: 0.5575 - acc: 1.0000 - val_loss: 0.6627 - val_acc: 1.0000
Epoch 499/500
1750/1750 [==============================] - 16s 9ms/step - loss: 0.4687 - acc: 1.0000 - val_loss: 1.3086 - val_acc: 1.0000
Epoch 500/500
1750/1750 [==============================] - 16s 9ms/step - loss: 0.9433 - acc: 1.0000 - val_loss: 5.9769 - val_acc: 1.0000
"""
import pickle
with open('/gdrive/My Drive/lab/2019_9_4/history500.pkl', 'wb') as output:
pickle.dump(history, output, pickle.HIGHEST_PROTOCOL)
model.save("/gdrive/My Drive/lab/2019_9_4/abdp.h5")
with open('/gdrive/My Drive/lab/2019_9_4/x_train.pkl', 'wb') as output:
pickle.dump(x_train, output, pickle.HIGHEST_PROTOCOL)
with open('/gdrive/My Drive/lab/2019_9_4/x_valid.pkl', 'wb') as output:
pickle.dump(x_valid, output, pickle.HIGHEST_PROTOCOL)
with open('/gdrive/My Drive/lab/2019_9_4/y_train.pkl', 'wb') as output:
pickle.dump(y_train, output, pickle.HIGHEST_PROTOCOL)
with open('/gdrive/My Drive/lab/2019_9_4/y_valid.pkl', 'wb') as output:
pickle.dump(y_valid, output, pickle.HIGHEST_PROTOCOL)
from keras.models import load_model
model = load_model("/gdrive/My Drive/lab/2019_9_4/abdp.h5")
with open('/gdrive/My Drive/lab/2019_9_4/x_train.pkl', 'rb') as f:
x_train = pickle.load(f)
with open('/gdrive/My Drive/lab/2019_9_4/x_valid.pkl', 'rb') as f:
x_valid = pickle.load(f)
with open('/gdrive/My Drive/lab/2019_9_4/y_train.pkl', 'rb') as f:
y_train = pickle.load(f)
with open('/gdrive/My Drive/lab/2019_9_4/y_valid.pkl', 'rb') as f:
y_valid = pickle.load(f)
history = model.fit(x_train, y_train, epochs=5, validation_data=(x_valid, y_valid), batch_size=64)
"""
Epoch 1/5
1750/1750 [==============================] - 32s 18ms/step - loss: 3671.9919 - acc: 1.0000 - val_loss: 27511.0797 - val_acc: 1.0000
Epoch 2/5
1750/1750 [==============================] - 29s 16ms/step - loss: 327.5412 - acc: 1.0000 - val_loss: 3598.9627 - val_acc: 1.0000
Epoch 3/5
1750/1750 [==============================] - 29s 16ms/step - loss: 216.6781 - acc: 1.0000 - val_loss: 1258.5962 - val_acc: 1.0000
Epoch 4/5
1750/1750 [==============================] - 29s 16ms/step - loss: 141.3966 - acc: 1.0000 - val_loss: 1526.8668 - val_acc: 1.0000
Epoch 5/5
1750/1750 [==============================] - 29s 16ms/step - loss: 114.3413 - acc: 1.0000 - val_loss: 1107.3428 - val_acc: 1.0000
"""