** 我不知道教授如何提出这个概念。
代码中省略了第二层和第三层。
问:您有多少个权重(W)和偏差(b)?
第二季度:什么是输入数据的维度?>
Q3:如何解释这一行代码?
trainX = xy[0 : train_size, : -1]
无处可问,因为它是研究生院的内容。 有点帮助,非常感谢。
**
import tensorflow as tf
import numpy as np
import matplotlib
import os
import matplotlib.pyplot as plt
def MinMaxScaler(data):
return (data - np.min(data, 0)) / (np.max(data, 0) - np.min(data, 0))
inputDim = 15
outputDim = 1
learning_rate = 0.008
iterations = 20000
xy = np.loadtxt('./endterm.csv', delimiter=',')
xy = MinMaxScaler(xy)
train_size = int(len(xy) * 0.98)
trainX = xy[0 : train_size, : -1]
trainY = xy[0 : train_size, [-1]]
testX = xy[train_size :, : -1]
testY = xy[train_size :, [-1]]
X = tf.placeholder(tf.float32, shape=[None, inputDim])
Y = tf.placeholder(tf.float32, shape=[None, outputDim])
W1 = tf.Variable(tf.random_normal([inputDim, 8]))
b1 = tf.Variable(tf.random_normal([8]))
layer1 = tf.nn.relu(tf.matmul(X, W1) + b1)
W2 = tf.Variable(tf.random_normal([8, 8]))
b2 = tf.Variable(tf.random_normal([8]))
layer1 = tf.nn.relu(tf.matmul(X, W2) + b2)
hypothesis = tf.matmul(layer2, W3) + b3
loss = tf.reduce_mean(tf.square(hypothesis - Y))
train = tf.train.GradientDescentOptimizer(learning_rate=1e-3).minimize(loss)
targets = tf.placeholder(tf.float32, [None, 1])
predictions = tf.placeholder(tf.float32, [None, 1])
rmse = tf.sqrt(tf.reduce_mean(tf.square(targets - predictions)))
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
for i in range(iterations):
_, step_loss = sess.run([train, loss], feed_dict={X : trainX, Y : trainY})
print("[step: {}] loss: {}".format(i, step_loss))
test_predict = sess.run(hypothesis, feed_dict={X: testX})
rmse_val = sess.run(rmse, feed_dict={targets: testY, predictions: test_predict})
print("RMSE: {}".format(rmse_val))
plt.plot(testY)
plt.plot(test_predict)
plt.xlabel("Time Period")
plt.ylabel("Stock Price")
plt.show()