这是我的简单代码。目的是根据数据预测y =x²曲线。
import matplotlib.pyplot as plt
import tensorflow as tf
%matplotlib inline
#input data
x_data = np.linspace(0,10,11)
y_data=x_data**2
#Data for testing
x_test = np.linspace(0,10,11)
#placeholders
X = tf.placeholder("float32", [None, 11])
Y = tf.placeholder("float32", [None, 11])
#Neural Network Model
def multilayer_perceptron(X, weights, biases):
layer_1 = tf.add(tf.matmul(X, weights['h1']), biases['b1'])
layer_1 = tf.nn.sigmoid(layer_1)
out_layer = tf.matmul(layer_1, weights['out']) + biases['out']
return out_layer
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([11, 2], 0, 0.1)),
'out': tf.Variable(tf.random_normal([2, 1], 0, 0.1))
}
biases = {
'b1': tf.Variable(tf.random_normal([2], 0, 0.1)),
'out': tf.Variable(tf.random_normal([1], 0, 0.1))
}
# Construct model
pred = multilayer_perceptron(X, weights, biases)
# Define loss and optimizer
cost = tf.reduce_mean(tf.square(pred-Y))
optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)
init = tf.global_variables_initializer()
#Session Eval
with tf.Session() as sess:
sess.run(init)
epochs = 10000
for i in range(epochs):
o, c, p = sess.run([optimizer, cost, pred], feed_dict={X: x_data.reshape([1,11]),Y: y_data.reshape([1,11])})
pred_value = sess.run([pred],feed_dict={X: x_test.reshape([1,11])})
最后一行正在调用会话以使用x_test作为输入来评估pred吗?当我运行此代码时,pred_value只是一个随机数,而不是11个可能的正确值。我不知道代码有什么问题
答案 0 :(得分:0)
这是因为输出层的形状为(None, 1)
。具有您想要的输出应该是(None, 11)
。因此,您将不得不将weights['out']
的形状更改为(2,11)
,将biases['out']
的形状更改为(11)
。
完整的工作代码如下。
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
# %matplotlib inline
#input data
x_data = np.linspace(0,10,11)
y_data=x_data**2
#Data for testing
x_test = np.linspace(0,10,11)
#placeholders
X = tf.placeholder("float32", [None, 11])
Y = tf.placeholder("float32", [None, 11])
#Neural Network Model
def multilayer_perceptron(X, weights, biases):
layer_1 = tf.add(tf.matmul(X, weights['h1']), biases['b1'])
layer_1 = tf.nn.sigmoid(layer_1)
out_layer = tf.matmul(layer_1, weights['out']) + biases['out']
print (out_layer.shape)
return out_layer
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([11, 2], 0, 0.1)),
'out': tf.Variable(tf.random_normal([2, 11], 0, 0.1))
}
biases = {
'b1': tf.Variable(tf.random_normal([2], 0, 0.1)),
'out': tf.Variable(tf.random_normal([11], 0, 0.1))
}
# Construct model
pred = multilayer_perceptron(X, weights, biases)
# Define loss and optimizer
cost = tf.reduce_mean(tf.square(pred-Y))
optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)
init = tf.global_variables_initializer()
#Session Eval
with tf.Session() as sess:
sess.run(init)
epochs = 10000
for i in range(epochs):
o, c, p = sess.run([optimizer, cost, pred], feed_dict={X: x_data.reshape([1,11]),Y: y_data.reshape([1,11])})
# print (p.shape)
pred_value = sess.run([pred],feed_dict={X: x_test.reshape([1,11])})
print (pred_value)
输出如下。
[array([[ 0. , 1. , 3.9999986, 8.999985 , 15.999548 ,
23.496645 , 26.745462 , 28.187344 , 28.973213 , 28.841356 ,
29.02704 ]], dtype=float32)]