设置时期数与在for循环中反复调用之间有什么区别?

时间:2018-07-13 00:43:25

标签: python tensorflow keras

# -*- coding: utf-8 -*-
"""

import numpy as np
"""
"""
from numpy.random import seed
seed(10)
from tensorflow import set_random_seed
import tensorflow as tf
set_random_seed(1)
import os
os.environ['PYTHONHASHSEED'] = '0'
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
from keras import backend as K
K.set_session(sess)
"""

from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout,CuDNNGRU,CuDNNLSTM
from sklearn.preprocessing import MinMaxScaler

import matplotlib.pyplot as plt

# convert an array of values into a dataset matrix
def create_dataset(dataset, look_back=1):
    dataX, dataY = [], []
    for i in range(len(dataset)-look_back):
        dataX.append(dataset[i:(i+look_back), 0])
        dataY.append(dataset[i + look_back, 0])
    return np.array(dataX), np.array(dataY)


dataset = np.cos(np.arange(1000)*(20*np.pi/1000))[:,None]
plt.plot(dataset)
plt.show()

look_back = 30
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)

# split into train and test sets
train_size = int(len(dataset) * 0.67)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]

trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)

trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1))
testX = np.reshape(testX, (testX.shape[0], testX.shape[1], 1))

batch_size = 1
model = Sequential()
model.add(CuDNNLSTM(16, batch_input_shape=(batch_size, look_back, 1),  stateful=True, return_sequences=True))
model.add(Dropout(0.3))
model.add(CuDNNLSTM(16, batch_input_shape=(batch_size, look_back, 1),  stateful=True))
model.add(Dropout(0.3))
model.add(Dense(16,activation='relu'))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
for i in range(5):
    print(i)
    model.fit(trainX, trainY, epochs=1, batch_size=batch_size, verbose=1, shuffle=False)
    model.reset_states()


trainScore = model.evaluate(trainX, trainY, batch_size=batch_size, verbose=0)
print('Train Score: ', trainScore)
testScore = model.evaluate(testX[:252], testY[:252], batch_size=batch_size, verbose=0)
print('Test Score: ', testScore)

look_ahead = 250
trainPredict = [np.vstack([trainX[-1][1:], trainY[-1]])]
predictions = np.zeros((look_ahead,1))
for i in range(look_ahead):
    prediction = model.predict(np.array([trainPredict[-1]]), batch_size=batch_size)
    predictions[i] = prediction
    trainPredict.append(np.vstack([trainPredict[-1][1:],prediction]))


plt.figure(figsize=(12,5))
# plt.plot(np.arange(len(trainX)),np.squeeze(trainX))
# plt.plot(np.arange(200),scaler.inverse_transform(np.squeeze(trainPredict)[:,None][1:]))
# plt.plot(np.arange(200),scaler.inverse_transform(np.squeeze(testY)[:,None][:200]),'r')
plt.plot(np.arange(look_ahead),predictions,'r',label="prediction")
plt.plot(np.arange(look_ahead),dataset[train_size:(train_size+look_ahead)],label="test function")
plt.legend()
plt.show()

此代码基于此处的示例示例

https://github.com/sachinruk/PyData_Keras_Talk/blob/master/cosine_LSTM.ipynb

他没有设置时期数,而是使用for循环。您可以在fit命令中设置纪元吗?

其次,在没有设置种子的情况下,两次运行之间的结果差异很大。我了解您需要设置种子才能获得可重复的结果。但是结果应该相差很大。在一种情况下,我得到的东西看起来像正弦波。有时我会以一条直线结束。有时我会以错误的频率结束正弦波。这种变异性正常吗?

由于在这种情况下,我们使用LSTM来预测多个点,所以未来我了解到我们可能会遇到复合错误。我想知道是否正在调用fit可能导致这种情况。我尝试了两种方法,它们似乎产生了相似的结果。我有点不知道为什么会这样。

以下是我得到的各种输出的示例:

https://imgur.com/a/esEaVf9

1 个答案:

答案 0 :(得分:0)

在for循环中运行单个纪元与在拟合中指定多个纪元纪元之间存在一些差异。

例如:学习率衰减通常在每个时期都起作用/被修改。

以下帖子对此有更多详细说明

https://datascience.stackexchange.com/questions/26112/decay-parameter-in-keras-optimizers?rq=1