def LstmSingle(name, train, vaild, test, idx, epoch=500, layer_size=32,time_stap=10,learning_rate=5e-3, batch_size=8):
idx_text = ["news","sns","sq"]
wgh_path = "./weight/" + name + "_" + idx_text[idx] + ".wgh"
K.clear_session()
model = Sequential()
model.add(LSTM(layer_size, input_shape = (time_stap, 2) , return_sequences = True))
model.add(LSTM(layer_size, input_shape = (time_stap, 2)))
model.add(Dense(layer_size, activation='relu'))
model.add(Dense(layer_size, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='mse', optimizer=Adam(learning_rate=learning_rate))
history = None
if not os.path.isfile(wgh_path):
checkpoint = ModelCheckpoint(filepath=wgh_path, monitor='val_loss', verbose=0, save_best_only=True)
history = model.fit(
train["x"][idx],
train["y"],
validation_data = (vaild["x"][idx], vaild["y"]),
epochs=epoch,
batch_size=batch_size,
verbose=0,
callbacks=[checkpoint]
)
#garbege collect
del checkpoint
gc.collect()
#end if
model.load_weights(wgh_path)
#Print learning histroy
if history != None:
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Learning progress')
plt.plot(history.history['loss'] , color="black", label="loss")
plt.plot(history.history['val_loss'] , color="blue", label="val_loss")
plt.legend()
plt.show()
#end if
predict_train = model.predict(train["x"][idx])
predict_vaild = model.predict(vaild["x"][idx])
predict_test = model.predict(test["x"][idx])
K.clear_session()
model.reset_states()
#garbege collect
del model
gc.collect()
return {
"train" : predict_train,
"vaild" : predict_vaild,
"test" : predict_test
}
#end def
我训练了上面的模型,但是当我训练下一个训练数据时,先前使用的内存仍然没有初始化。
根据任务管理器。我的模型内存使用量最大为10.3GB。 当我拟合模型时,内存使用量正在增加。拟合模型后的内存仍然不会减少。
我该如何解决?