我一直在尝试通过不仅包含过去的功能,还包括目标输出功能旁边的功能来使多功能RNN正常工作。
我遇到的问题是不知道如何构造输入和输出,以使输入也包含与输出相同实例的信息。
到目前为止,我有这个:
mp = pd.read_csv('monthly-milk-production.csv')
mp.index = pd.to_datetime(mp.Month)
mp.drop('Month', axis = 1, inplace = True)
#Making an irrelevant 2nd feature
import random as rand
ran_arr = []
for i in range(len(mp)):
ran_arr.append(rand.randint(0,9))
mp['Noise'] = ran_arr
#Train test split
mp_train = mp.head(len(mp)-12)
mp_train DataFrame看起来像这样:
Milk Production Noise
0 0.086538 0.333333
1 0.019231 0.000000
2 0.209135 0.444444
3 0.247596 0.777778
4 0.418269 0.444444
...
_
mp_test = mp.tail(12)
#Normalizing
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
scaler.fit_transform(mp_train)
mp_train[['Milk Production', 'Noise']] = scaler.transform(mp_train)
mp_test[['Milk Production', 'Noise']] = scaler.transform(mp_test)
#Next_batch Function
def next_batch(training_data,batch_size,steps):
rand_start = np.random.randint(0, len(training_data)-steps)
x = np.array(training_data[rand_start: (rand_start + steps + 1)][['Milk Production', 'Noise']]).reshap e(steps + 1, 2)
y = np.array(training_data[rand_start: (rand_start + steps + 1)]['Milk Production']).reshape(1, steps + 1)
lower = x[:-1,:].reshape(-1, steps, 2)
#Looking for lower[0][-1][0] so i stuck in a None in its place
lower[0][-1][0] = None
higher = y[:, :-1].reshape(-1, steps, 1)
return lower, higher
_
a, b = next_batch(mp_train, 1, 12)
print(a)
print(b)
#Output
[[[0.25240385 0.33333333]
[0.16586538 0.22222222]
[0.375 0.44444444]
[0.40625 0.66666667]
[0.55048077 0.11111111]
[0.48798077 0.44444444]
[0.35817308 0. ]
[0.24038462 0.88888889]
[0.14903846 0.77777778]
[0.16346154 1. ]
[0.11778846 0.22222222]
[ nan 0.33333333]]]
[[[0.25240385]
[0.16586538]
[0.375 ]
[0.40625 ]
[0.55048077]
[0.48798077]
[0.35817308]
[0.24038462]
[0.14903846]
[0.16346154]
[0.11778846]
[0.19711538]]]
然后我打算将其传递给Tensorflow,如下所示:
import tensorflow as tf
num_inputs = 2
num_neurons = 100
num_outputs = 1
learning_rate = 0.03
num_itr = 1000
batch_size = 1
time_steps = 12
X = tf.placeholder(tf.float32, [None, time_steps, num_inputs])
y = tf.placeholder(tf.float32, [None, time_steps, num_outputs])
cell = tf.contrib.rnn.GRUCell(num_neurons, activation = tf.nn.relu)
cell = tf.contrib.rnn.OutputProjectionWrapper(cell, num_outputs)
outputs, states = tf.nn.dynamic_rnn(cell, X, dtype = tf.float32)
loss = tf.reduce_mean(tf.square(outputs - y))
optimizer = tf.train.AdamOptimizer(learning_rate)
train = optimizer.minimize(loss)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session(config = tf.ConfigProto(gpu_options = gpu_option)) as sess:
sess.run(init)
for itr in range(num_itr):
X_batch, y_batch = next_batch(mp_train, batch_size, time_steps)
sess.run(train, feed_dict = {X: X_batch, y: y_batch})
if itr % 100 == 0:
mse = loss.eval(feed_dict = {X: X_batch, y: y_batch})
print(str(itr) + '\t' + str(mse))
# Save Model for Later
saver.save(sess, "./SavedModel2_test")