我刚刚复制了张量板教程,为什么会出现此错误?
InvalidArgumentError:您必须使用dtype float和shape [?,1]输入占位符张量'y-input_6'的值 [[节点y-input_6(定义为:19)]]
这是我的代码
x_data = [[0., 0.],
[0., 1.],
[1., 0.],
[1., 1.]]
y_data = [[0.],
[1.],
[1.],
[0.]]
x_data = np.array(x_data, dtype=np.float32)
y_data = np.array(y_data, dtype=np.float32)`enter code here
X = tf.placeholder(tf.float32, [None, 2], name='x-input')
Y = tf.placeholder(tf.float32, [None, 1], name='y-input')
...
with tf.Session() as sess:
# tensorboard --logdir=./logs/xor_logs
merged_summary = tf.summary.merge_all()
writer = tf.summary.FileWriter("./logs/xor_logs_r0_01")
writer.add_graph(sess.graph) # Show the graph
# Initialize TensorFlow variables
sess.run(tf.global_variables_initializer())
for step in range(10001):
summary, _ = sess.run([merged_summary, train], feed_dict={X: x_data, Y: y_data})
writer.add_summary(summary, global_step=step)
merged_summary没有问题。 但我需要
for step in range(10001):
sess.run(train, feed_dict={X: x_data, Y: y_data})
writer.add_summary(summary, global_step=step)
完整代码在这里
import tensorflow as tf
import numpy as np
tf.set_random_seed(777) # for reproducibility
learning_rate = 0.01
x_data = [[0., 0.],
[0., 1.],
[1., 0.],
[1., 1.]]
y_data = [[0.],
[1.],
[1.],
[0.]]
x_data = np.array(x_data, dtype=np.float32)
y_data = np.array(y_data, dtype=np.float32)
X = tf.placeholder(tf.float32, [None, 2], name='x-input')
Y = tf.placeholder(tf.float32, [None, 1], name='y-input')
with tf.name_scope("layer1"):
W1 = tf.Variable(tf.random_normal([2, 2]), name='weight1')
b1 = tf.Variable(tf.random_normal([2]), name='bias1')
layer1 = tf.sigmoid(tf.matmul(X, W1) + b1)
w1_hist = tf.summary.histogram("weights1", W1)
b1_hist = tf.summary.histogram("biases1", b1)
layer1_hist = tf.summary.histogram("layer1", layer1)
with tf.name_scope("layer2"):
W2 = tf.Variable(tf.random_normal([2, 1]), name='weight2')
b2 = tf.Variable(tf.random_normal([1]), name='bias2')
hypothesis = tf.sigmoid(tf.matmul(layer1, W2) + b2)
w2_hist = tf.summary.histogram("weights2", W2)
b2_hist = tf.summary.histogram("biases2", b2)
hypothesis_hist = tf.summary.histogram("hypothesis", hypothesis)
with tf.name_scope("cost"):
cost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1 - Y) *
tf.log(1 - hypothesis))
cost_summ = tf.summary.scalar("cost", cost)
with tf.name_scope("train"):
train = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
predicted = tf.cast(hypothesis > 0.5, dtype=tf.float32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y), dtype=tf.float32))
accuracy_summ = tf.summary.scalar("accuracy", accuracy)
with tf.Session() as sess:
# tensorboard --logdir=./logs/xor_logs
merged_summary = tf.summary.merge_all()
writer = tf.summary.FileWriter("./logs/xor_logs_r0_01")
writer.add_graph(sess.graph) # Show the graph
# Initialize TensorFlow variables
sess.run(tf.global_variables_initializer())
for step in range(10001):
s , _ = sess.run([merged_summary, train], feed_dict={X: x_data, Y: y_data})
writer.add_summary(summary, global_step=step)
if step % 100 == 0:
print(step, sess.run(cost, feed_dict={
X: x_data, Y: y_data}), sess.run([W1, W2]))
h, c, a = sess.run([hypothesis, predicted, accuracy],
feed_dict={X: x_data, Y: y_data})
print("\nHypothesis: ", h, "\nCorrect: ", c, "\nAccuracy: ", a)
答案 0 :(得分:1)
将其添加到代码的第一行,然后尝试运行:
tf.reset_default_graph()
赞:
import tensorflow as tf
import numpy as np
tf.reset_default_graph()
tf.set_random_seed(777) # for reproducibility
learning_rate = 0.01
此外,您的代码中存在错误(也许是错字)。
更改
s , _ = sess.run([merged_summary, train], feed_dict={X: x_data, Y: y_data})
到
summary , _ = sess.run([merged_summary, train], feed_dict={X: x_data, Y: y_data})