我遇到错误 ValueError:在处理mnist数据时,无法为张量为'(?,10)'的张量'Placeholder_31:0'输入形状(100,)的值。
import tensorflow as tf
import numpy as np
from tensorflow import keras
mnist = tf.keras.datasets.mnist(train_images, train_labels),
(test_images, test_labels) = mnist.load_data()
train_images = np.array([image.flatten() for image in train_images])
test_images = np.array([image.flatten() for image in test_images])
train_images = train_images / 255
test_images = test_images / 255
print(train_images.shape[0])
print(train_images[0].shape[0])
print(len(train_images[0]))
print(train_labels.shape[0])
print(test_images.shape[0])
print(test_labels.shape[0])
# 10 Classes , 0 - 9
n_nodes_hdl_1 = 500
n_nodes_hdl_2 = 500
n_nodes_hdl_3 = 500
n_input = len(train_images[0])
n_classes = 10
batch_size = 100
learning_rate = 0.001
# height x, Shape: 28 * 28 matrix = 784
x = tf.placeholder(tf.float32,[None, n_input])
y = tf.placeholder(tf.float32,[None, n_classes])
def neural_network_model(data):
hidden_1_layer =
{'weights':tf.Variable(tf.random_normal([n_input,n_nodes_hdl_1])),
'biases':tf.Variable(tf.random_normal([n_nodes_hdl_1]))}
hidden_2_layer =
{'weights':tf.Variable(tf.random_normal
([n_nodes_hdl_1,n_nodes_hdl_2])),
'biases':tf.Variable(tf.random_normal([n_nodes_hdl_2]))}
hidden_3_layer = {'weights':tf.Variable
(tf.random_normal([n_nodes_hdl_2,n_nodes_hdl_3])),
'biases':tf.Variable(tf.random_normal([n_nodes_hdl_3]))}
output_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hdl_3,n_classes])),
'biases':tf.Variable(tf.random_normal([n_classes]))}
# input_data x, weights w, biases b : w * x + b;
l1 = tf.add(tf.matmul(data,hidden_1_layer['weights']),hidden_1_layer['biases'])
l1 = tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1,hidden_2_layer['weights']),hidden_2_layer['biases'])
l2 = tf.nn.relu(l2)
l3 = tf.add(tf.matmul(l2,hidden_3_layer['weights']),hidden_2_layer['biases'])
l3 = tf.nn.relu(l3)
output = tf.add(tf.matmul(l3,output_layer['weights']),output_layer['biases'])
return output
def train_neural_network(x):
prediction = neural_network_model(x)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=prediction,labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# cycles: feed forward + back prop
hm_epochs = 5
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for epoch in range(hm_epochs):
epoch_loss = 0
i = 0
while i < len(train_images):
start = i
end = i+batch_size
epoch_x = np.array(train_images[start:end])
epoch_y = np.array(train_labels[start:end])
i,c = sess.run([optimizer,cost], feed_dict = {x:epoch_x, y:epoch_y})
epoch_loss += c
i +=batch_size
print('Epoch', epoch+1, 'completed out of', hm_epochs, 'loss', epoch_loss)
correct = tf.equal(tf.argmax(prediction,1),tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct,tf.float32))
print('Accuracy:',accuracy.eval({x:test_images, y:test_labels}))
train_neural_network(x)
60000
784
784
60000
10000
10000
---------------------------------------------------------------------------
~/anaconda3/envs/LSTM-Human-Activity-Recognition/lib/python3.6/site-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata)
927 try:
928 result = self._run(None, fetches, feed_dict, options_ptr,
--> 929 run_metadata_ptr)
930 if run_metadata:
931 proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
~/anaconda3/envs/LSTM-Human-Activity-Recognition/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
1126 'which has shape %r' %
1127 (np_val.shape, subfeed_t.name,
-> 1128 str(subfeed_t.get_shape())))
1129 if not self.graph.is_feedable(subfeed_t):
1130 raise ValueError('Tensor %s may not be fed.' % subfeed_t)
我也已错误地发布了代码。该错误与未使用的Tensorflow类型有关。 ValueError跟踪(最近一次通话) 在 第134章真相大白(三更) 135 -> 136 train_neural_network(x) 137 138
<ipython-input-21-f73f84d7c08f> in train_neural_network(x)
124 epoch_y = np.array(train_labels[start:end])
125
--> 126 i,c = sess.run([optimizer,cost], feed_dict = {x:epoch_x, y:epoch_y})
127 epoch_loss += c
128 i +=batch_size