我知道这个问题已经被问了很多,但是我无法解决。
我已经为图像建立了一个cnn和preprocceprocing,并且一切正常,直到我在feed_dict
中设置了sess.run
参数为止。我有24个课程,图像大小为(64,64)。 imagepaths
和labels
都是路径列表。
imagepaths = tf.convert_to_tensor(imagepaths, dtype=tf.string)
labels = tf.convert_to_tensor(labels, dtype=tf.int32)
image, label = tf.train.slice_input_producer([imagepaths, labels],
shuffle=True)
#(here) i did decoding,resing and normalizing
X, Y = tf.train.batch([image, label], batch_size=batch_size,
capacity=batch_size * 8,
num_threads=4)
place_holder_X = tf.placeholder(tf.string,(128, 64, 64, 3), name="Input")
place_holder_Y = tf.placeholder(tf.int32,(128,None), name="Target")
with tf.Session() as sess:
sess.run(init)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
try:
for step in range(1, num_steps+1):
#print("step = ",step,"display step",display_step,end=' ')
if coord.should_stop():
break
if step % display_step == 0:
images, labels = sess.run([X, Y])
_, loss, acc = sess.run([train_op, loss_op, accuracy], feed_dict={place_holder_X: images, place_holder_Y: labels})
print("Step " + str(step) + ", Minibatch Loss= " + \
"{:.4f}".format(loss) + ", Training Accuracy= " + \
"{:.3f}".format(acc))
else:
# Only run the optimization op (backprop)
sess.run(train_op)
except Exception as e:
coord.request_stop(e)
finally:
coord.request_stop()
coord.join(threads)
print("Optimization Finished!")
print("Time taken: %f" % (time.time() - startTime))
saver = tf.train.Saver()
saver.save(sess, "./models1/my_tf_model.ckpt")