我正在研究本教程,发现了上面提到的错误。我是tensorflow的新手,所以如果有人可以帮助我,我将非常感激。在此特定行上出现错误
_,cost = sess.run([optimizer,cost_function],feed_dict={X:tr_features,Y:tr_labels})
有错误:ValueError:无法为张量为'(?,5)'的张量'Placeholder_21:0'输入形状(0,0)的值
以下是我的代码的一部分:
training_epochs = 300
n_dim = tr_features.shape[1]
n_classes = 5
n_hidden_units_one = 280
n_hidden_units_two = 300
sd = 1 / np.sqrt(n_dim)
learning_rate = 0.01
X = tf.placeholder(tf.float32,[None,n_dim]) #define tensorflow
architecture (training data)
Y = tf.placeholder(tf.float32,[None,n_classes]) # testing data
# define weight...for each layer
W_1 = tf.Variable(tf.random_normal([n_dim,n_hidden_units_one], mean = 0,
stddev=sd))
b_1 = tf.Variable(tf.random_normal([n_hidden_units_one], mean = 0,
stddev=sd))
h_1 = tf.nn.tanh(tf.matmul(X,W_1) + b_1)
W_2 = tf.Variable(tf.random_normal([n_hidden_units_one,n_hidden_units_two],
mean = 0, stddev=sd))
b_2 = tf.Variable(tf.random_normal([n_hidden_units_two], mean = 0,
stddev=sd))
h_2 = tf.nn.sigmoid(tf.matmul(h_1,W_2) + b_2)
W = tf.Variable(tf.random_normal([n_hidden_units_two,n_classes], mean = 0,
stddev=sd))
b = tf.Variable(tf.random_normal([n_classes], mean = 0, stddev=sd))
y_ = tf.nn.softmax(tf.matmul(h_2,W) + b)
init = tf.global_variables_initializer()
cost_function = -tf.reduce_sum(Y * tf.log(y_))
optimizer =
tf.train.GradientDescentOptimizer(learning_rate).minimize(cost_function)
correct_prediction = tf.equal(tf.argmax(y_,1), tf.argmax(Y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
cost_history = np.empty(shape=[1],dtype=float)
y_true, y_pred = None, None
with tf.Session() as sess:
sess.run(init)
for epoch in range(training_epochs):
_,cost = sess.run([optimizer,cost_function],feed_dict=
{X:tr_features,Y:tr_labels})
cost_history = np.append(cost_history,cost)
y_pred = sess.run(tf.argmax(y_,1),feed_dict={X: ts_features})
y_true = sess.run(tf.argmax(ts_labels,1))
print("Test accuracy: ",round(sess.run(accuracy, feed_dict={X:
ts_features,Y: ts_labels}),3))