tensorflow.python.framework.errors_impl.InvalidArgumentError:您必须使用dtype float为占位符张量“占位符”提供值

时间:2017-06-14 12:25:08

标签: python tensorflow

我不确定发生了什么。这是一个错误还是我搞砸了。我不确定为什么Session.run会抛出这样的异常。是因为缺乏论据或无效论证?或问题出在其他地方?另外,为什么它是由op占位符X引起的.X在此测试集中没有任何作用。占位符

X = tf.placeholder(tf.float32, [None, image_height, image_width, NUM_CHANNELS])

Y = tf.placeholder(tf.float32, [None, num_classes])

X1 = tf.placeholder(tf.float32, [None, image_height, image_width, NUM_CHANNELS])

Y1 = tf.placeholder(tf.float32, [None, num_classes])

我正在打电话

train_cnn(X, X1)

我的代码是:

def train_cnn(X, X1):
Y_prediction = my_conv_net(X)
Y1_prediction = my_conv_net(X1)
#    print(Y_prediction)
#    print(Y1_prediction)
Y_pred = tf.nn.softmax(Y_prediction)
Y1_pred = tf.nn.softmax(Y1_prediction)
#   Y_prediction_class = tf.argmax(Y_prediction, dimension=1)
#    print(Y_pred)
#    print(Y1_pred)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=Y_prediction, labels=Y))
optimiser = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
#   hm_epochs = 8
f2 = open(r'D:\Speech_Project\oplog\accuracy_log.txt', 'a+')
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    saver = tf.train.Saver()
    train_loss = []
    train_acc = []
    test_acc = []
    for i in range(num_itr):
    #   print(i)
    #   training results
        rand_index = np.random.choice(len(features), size=BATCH_SIZE)
        rand_x = features[rand_index]
        rand_x = np.expand_dims(rand_x, 3)
        rand_y = labels[rand_index]
        train_dict = {X: rand_x, Y: rand_y}
    #    f2.write("\n Train Dictionary: " + str(train_dict) +"\n\n")
    #    print(train_dict)
        sess.run(optimiser, feed_dict=train_dict)
    #    print(Y_pred)
    #    print(train_dict)
        temp_train_loss, temp_train_preds = sess.run([cost,Y_pred], feed_dict=train_dict)
    #    print(temp_train_preds)
    #    print(rand_y)
        temp_train_acc = get_accuracy(temp_train_preds, rand_y)
        print('Iteration: ' + str(i+1))
        if(i+1) % eval_every == 0:
            # testing results
            print('Iteration1: ' + str(i+1))
            test_index = np.random.choice(len(test_features), size=BATCH_SIZE_1)
            test_x = test_features[test_index]
            test_x = np.expand_dims(test_x, 3)
            test_y = test_labels[test_index]
        #    print(test_x)
        #    print(test_y)
            eval_dict = {X1: test_x, Y1: test_y}
        #    print(eval_dict)
        #    f2.write("\n Test Dictionary: " + str(eval_dict) + "\n\n")
        #    print(Y1_pred)
        #    print(eval_dict)
            temp_test_preds = sess.run(Y1_pred, feed_dict=eval_dict)
            print('Iteration2: ' + str(i+1))
            temp_test_acc = get_accuracy(temp_test_preds, test_y)
            # record and print results
            train_loss.append(temp_train_loss)
            train_acc.append(temp_train_acc)
            test_acc.append(temp_test_acc)
            acc_and_loss = [(i+1), temp_train_loss, temp_train_acc, temp_test_acc]
        #    acc_and_loss = [(i + 1), temp_train_loss, temp_train_acc]
            acc_and_loss = [np.round(x,2) for x in acc_and_loss]
        #    print('Iteration # {}. Train Loss: {:.2f}. Train Accuracy: {:.2f}.'.format(*acc_and_loss))
            print('Iteration # {}. Train Loss: {:.2f}. Train Accuracy: {:.2f}. Test Accuracy: {:.2f}.'.format(*acc_and_loss))
            f2.write("Iteration Number :\n" + str(i+1) + "\n\n")
            f2.write("Training Predictions :\n" + str(temp_train_preds) + "\n\n")
            f2.write("Training Labels are :\n" + str(rand_y) + "\n\n")
            f2.write("Testing Predictions :\n" + str(temp_test_preds) + "\n\n")
            f2.write("Testing Labels are :\n" + str(test_y) + "\n\n")
            f2.write("Iteration # " + str(i+1) + ", Train loss: " + str(temp_train_loss) + ", Train Accuracy: " + str(temp_train_acc) + ", Test Accuracy: " + str(temp_test_acc) + "\n\n")
        #    f2.write("Iteration # " + str(i + 1) + ", Train loss: " + str(temp_train_loss) + ", Train Accuracy: " + str(temp_train_acc) + "\n\n")
    save_path = saver.save(sess, 'D:\Speech_Project\my_model\my_model_01')
    print("Model saved in file: %s" % save_path)
    f2.write("Model saved in file: %s" % save_path)
f2.close()

我收到此错误:

Traceback (most recent call last):
File "C:\Users\admin\AppData\Local\Programs\Python\Python35\lib\site-packages\tensorflow\python\client\session.py", line 1022, in _do_call
return fn(*args)
File "C:\Users\admin\AppData\Local\Programs\Python\Python35\lib\site-packages\tensorflow\python\client\session.py", line 1004, in _run_fn
status, run_metadata)
File "C:\Users\admin\AppData\Local\Programs\Python\Python35\lib\contextlib.py", line 66, in __exit__
next(self.gen)
File "C:\Users\admin\AppData\Local\Programs\Python\Python35\lib\site-packages\tensorflow\python\framework\errors_impl.py", line 466, in raise_exception_on_not_ok_status
pywrap_tensorflow.TF_GetCode(status))
tensorflow.python.framework.errors_impl.InvalidArgumentError: You must feed a value for placeholder tensor 'Placeholder' with dtype float
 [[Node: Placeholder = Placeholder[dtype=DT_FLOAT, shape=[], _device="/job:localhost/replica:0/task:0/cpu:0"]()]]

During handling of the above exception, another exception occurred:

Traceback (most recent call last):
File "C:/Users/admin/PycharmProjects/BanglaCNN01/banglacnn02.py", line 289, in <module>
train_cnn(X, X1)
File "C:/Users/admin/PycharmProjects/BanglaCNN01/banglacnn02.py", line 218, in train_cnn
temp_test_preds = sess.run(Y1_pred, feed_dict=eval_dict)
File "C:\Users\admin\AppData\Local\Programs\Python\Python35\lib\site-packages\tensorflow\python\client\session.py", line 767, in run
run_metadata_ptr)
File "C:\Users\admin\AppData\Local\Programs\Python\Python35\lib\site-packages\tensorflow\python\client\session.py", line 965, in _run
feed_dict_string, options, run_metadata)
File "C:\Users\admin\AppData\Local\Programs\Python\Python35\lib\site-packages\tensorflow\python\client\session.py", line 1015, in _do_run
target_list, options, run_metadata)
File "C:\Users\admin\AppData\Local\Programs\Python\Python35\lib\site-packages\tensorflow\python\client\session.py", line 1035, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.InvalidArgumentError: You must feed a value for placeholder tensor 'Placeholder' with dtype float
 [[Node: Placeholder = Placeholder[dtype=DT_FLOAT, shape=[], _device="/job:localhost/replica:0/task:0/cpu:0"]()]]

Caused by op 'Placeholder', defined at:
File "C:/Users/admin/PycharmProjects/BanglaCNN01/banglacnn02.py", line 285, in <module>
X = tf.placeholder(tf.float32, [None, image_height, image_width, NUM_CHANNELS])
File "C:\Users\admin\AppData\Local\Programs\Python\Python35\lib\site-packages\tensorflow\python\ops\array_ops.py", line 1502, in placeholder
name=name)
File "C:\Users\admin\AppData\Local\Programs\Python\Python35\lib\site-packages\tensorflow\python\ops\gen_array_ops.py", line 2149, in _placeholder
name=name)
File "C:\Users\admin\AppData\Local\Programs\Python\Python35\lib\site-packages\tensorflow\python\framework\op_def_library.py", line 763, in apply_op
op_def=op_def)
File "C:\Users\admin\AppData\Local\Programs\Python\Python35\lib\site-packages\tensorflow\python\framework\ops.py", line 2327, in create_op
original_op=self._default_original_op, op_def=op_def)
File "C:\Users\admin\AppData\Local\Programs\Python\Python35\lib\site-packages\tensorflow\python\framework\ops.py", line 1226, in __init__
self._traceback = _extract_stack()

InvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'Placeholder' with dtype float
 [[Node: Placeholder = Placeholder[dtype=DT_FLOAT, shape=[], _device="/job:localhost/replica:0/task:0/cpu:0"]()]]


Process finished with exit code 1

需要深入了解此错误/异常发生的原因以及解决方法。

my_conv_net()的代码:

def my_conv_net(input_data):
#   first convolutional layer, weights, biases, non linearity, pooling included
w_conv1 = weight_variable([4, 4, 1, 32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(X, w_conv1) + b_conv1)
#   print(h_conv1)
h_pool1 = max_pool_2x2(h_conv1)
#   print(h_pool1)
#   first fc layer
w_fc1 = weight_variable([13*13*32, 1024])
b_fc1 = bias_variable([1024])
h_pool1_flat = tf.reshape(h_pool1, [-1, 13*13*32])
h_fc1 = tf.nn.relu(tf.matmul(h_pool1_flat, w_fc1) + b_fc1)
#   for dropout
#   h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
#   final output layer
w_fc2 = weight_variable([1024, 4])
b_fc2 = bias_variable([4])
final_model_output = tf.matmul(h_fc1, w_fc2) + b_fc2
#   print(final_model_output)
#   final_model_output_sm = tf.nn.softmax(final_model_output)
#   print(final_model_output_sm)
return final_model_output

0 个答案:

没有答案