我已经解决了其他一些错误,但我从未见过这个错误,甚至在做过一些研究之后我仍然不确定这里的问题是什么或如何解决它
我猜想在某些时候需要重新整形数据,但我没有得到的是为什么这是一个问题,或者[1,2]和[1,1]的大小实际意味着什么。 / p>
输入脚本的数据是[128 x 128 x 128 ndarray,二进制标签]
我正在使用的代码是:
import tensorflow as tf
import numpy as np
import os
import math
# input arrays
x = tf.placeholder(tf.float32, [None, 128, 128, 128, 1])
# labels
y = tf.placeholder(tf.float32, None)
# learning rate
lr = tf.placeholder(tf.float32)
##### Code for ConvNet is here #####
# Data
INPUT_FOLDER = 'data/cubed_data/pp/labelled'
images = os.listdir(INPUT_FOLDER)
images.sort()
td = []
count = 1
for i in images:
im = np.load(INPUT_FOLDER + "/" + i)
data = im[0]
data = np.reshape(data, (128, 128, 128, 1))
label = im[1]
lbd = [data, label]
td.append(lbd)
test_data = td[:100]
train_data = td[100:]
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=fc3l, labels=y)
correct_prediction = tf.equal(tf.argmax(probs, 1), tf.argmax(y, 0))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
train_step = tf.train.AdamOptimizer(lr).minimize(cross_entropy)
# init
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
def training_step(i, update_test_data, update_train_data):
for a in range(len(train_data)):
batch = train_data[a]
batch_x = batch[0]
batch_y = batch[1]
# learning rate decay
max_learning_rate = 0.003
min_learning_rate = 0.0001
decay_speed = 2000.0
learning_rate = min_learning_rate + (max_learning_rate - min_learning_rate) * math.exp(-i / decay_speed)
if update_train_data:
a, c = sess.run([accuracy, cross_entropy], {x: [batch_x], y: [batch_y]})
print(str(i) + ": accuracy:" + str(a) + " loss: " + str(c) + " (lr:" + str(learning_rate) + ")")
if update_test_data:
a, c = sess.run([accuracy, cross_entropy], {x: [test_data[0]], y: [test_data[1]]})
print(str(i) + ": ********* epoch " + " ********* test accuracy:" + str(a) + " test loss: " + str(c))
sess.run(train_step, {x: [batch_x], y: [batch_y], lr: learning_rate})
for q in range(10000 + 1):
training_step(q, q % 100 == 0, q % 20 == 0)
..用:
Invalid argument: logits and labels must be same size: logits_size=[1,2] labels_size=[1,1]
[[Node: SoftmaxCrossEntropyWithLogits = SoftmaxCrossEntropyWithLogits[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/gpu:0"](Reshape, Reshape_1)]]
Traceback (most recent call last):
File "/home/entelechy/tfenv/lib/python3.5/site-packages/tensorflow/python/client/session.py", line 972, in _do_call
return fn(*args)
File "/home/entelechy/tfenv/lib/python3.5/site-packages/tensorflow/python/client/session.py", line 954, in _run_fn
status, run_metadata)
File "/usr/lib/python3.5/contextlib.py", line 66, in __exit__
next(self.gen)
File "/home/entelechy/tfenv/lib/python3.5/site-packages/tensorflow/python/framework/errors.py", line 463, in raise_exception_on_not_ok_status
pywrap_tensorflow.TF_GetCode(status))
tensorflow.python.framework.errors.InvalidArgumentError: logits and labels must be same size: logits_size=[1,2] labels_size=[1,1]
[[Node: SoftmaxCrossEntropyWithLogits = SoftmaxCrossEntropyWithLogits[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/gpu:0"](Reshape, Reshape_1)]]
[[Node: Reshape_2/_7 = _Recv[client_terminated=false, recv_device="/job:localhost/replica:0/task:0/cpu:0", send_device="/job:localhost/replica:0/task:0/gpu:0", send_device_incarnation=1, tensor_name="edge_233_Reshape_2", tensor_type=DT_FLOAT, _device="/job:localhost/replica:0/task:0/cpu:0"]()]]
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "tfvgg.py", line 293, in <module>
training_step(q, q % 100 == 0, q % 20 == 0)
File "tfvgg.py", line 282, in training_step
a, c = sess.run([accuracy, cross_entropy], {x: [batch_x], y: [batch_y]})
File "/home/entelechy/tfenv/lib/python3.5/site-packages/tensorflow/python/client/session.py", line 717, in run
run_metadata_ptr)
File "/home/entelechy/tfenv/lib/python3.5/site-packages/tensorflow/python/client/session.py", line 915, in _run
feed_dict_string, options, run_metadata)
File "/home/entelechy/tfenv/lib/python3.5/site-packages/tensorflow/python/client/session.py", line 965, in _do_run
target_list, options, run_metadata)
File "/home/entelechy/tfenv/lib/python3.5/site-packages/tensorflow/python/client/session.py", line 985, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors.InvalidArgumentError: logits and labels must be same size: logits_size=[1,2] labels_size=[1,1]
[[Node: SoftmaxCrossEntropyWithLogits = SoftmaxCrossEntropyWithLogits[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/gpu:0"](Reshape, Reshape_1)]]
[[Node: Reshape_2/_7 = _Recv[client_terminated=false, recv_device="/job:localhost/replica:0/task:0/cpu:0", send_device="/job:localhost/replica:0/task:0/gpu:0", send_device_incarnation=1, tensor_name="edge_233_Reshape_2", tensor_type=DT_FLOAT, _device="/job:localhost/replica:0/task:0/cpu:0"]()]]
Caused by op 'SoftmaxCrossEntropyWithLogits', defined at:
File "tfvgg.py", line 254, in <module>
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=fc3l, labels=y)
File "/home/entelechy/tfenv/lib/python3.5/site-packages/tensorflow/python/ops/nn_ops.py", line 676, in softmax_cross_entropy_with_logits
precise_logits, labels, name=name)
File "/home/entelechy/tfenv/lib/python3.5/site-packages/tensorflow/python/ops/gen_nn_ops.py", line 1744, in _softmax_cross_entropy_with_logits
features=features, labels=labels, name=name)
File "/home/entelechy/tfenv/lib/python3.5/site-packages/tensorflow/python/framework/op_def_library.py", line 749, in apply_op
op_def=op_def)
File "/home/entelechy/tfenv/lib/python3.5/site-packages/tensorflow/python/framework/ops.py", line 2380, in create_op
original_op=self._default_original_op, op_def=op_def)
File "/home/entelechy/tfenv/lib/python3.5/site-packages/tensorflow/python/framework/ops.py", line 1298, in __init__
self._traceback = _extract_stack()
InvalidArgumentError (see above for traceback): logits and labels must be same size: logits_size=[1,2] labels_size=[1,1]
[[Node: SoftmaxCrossEntropyWithLogits = SoftmaxCrossEntropyWithLogits[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/gpu:0"](Reshape, Reshape_1)]]
[[Node: Reshape_2/_7 = _Recv[client_terminated=false, recv_device="/job:localhost/replica:0/task:0/cpu:0", send_device="/job:localhost/replica:0/task:0/gpu:0", send_device_incarnation=1, tensor_name="edge_233_Reshape_2", tensor_type=DT_FLOAT, _device="/job:localhost/replica:0/task:0/cpu:0"]()]]
答案 0 :(得分:2)
仔细观察后我发现问题是第3个完全连接层的输出是2个类,当标签是单个类的二进制时。更改了上一个完全连接层中的代码以考虑单个类,并且此错误已解决。