我使用TFrecord
作为输入
现在我需要三批输入。 image_batch
和label_batch
没问题。但第二个posimage_batch
,poslabel_batch
是错误的。
我看过很多关于RandomShuffleQueue
错误问题的帖子
答案tf.local_variables_initializer()
无法解决我的错误
因为我只搜索一个batch_data
和batch_label
作为输入。所以我不知道三重输入
我在网上搜索了很长时间。但没用。请帮助或尝试提供一些如何实现这一目标的想法。
def real_read_and_decode(filename):
filename_queue = tf.train.string_input_producer([filename])
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(serialized_example,
features={
'label': tf.FixedLenFeature([], tf.int64),
'img_raw' : tf.FixedLenFeature([], tf.string),
})
img = tf.decode_raw(features['img_raw'], tf.uint8)
img = tf.reshape(img, [WIDTH,HEIGHT, 3])
label = tf.cast(features['label'], tf.int32)
labels = tf.one_hot(label, NUM_CLASSES)
return img, labels
def main():
image, label = read_and_decode("sketch_train.tfrecords")
posimage, poslabel = real_read_and_decode("pos_train.tfrecords")
negimage, neglabel = real_read_and_decode("neg_train.tfrecords")
image_batch, label_batch =tf.train.shuffle_batch([image, label],batch_size=BATCH_SIZE,capacity=1500, min_after_dequeue=1000)
posimage_batch, poslabel_batch = tf.train.shuffle_batch([posimage, poslabel],batch_size=BATCH_SIZE,capacity=1500, min_after_dequeue=1000)
negimage_batch, neglabel_batch = tf.train.shuffle_batch([negimage, neglabel],batch_size=BATCH_SIZE,capacity=1500, min_after_dequeue=1000)
with tf.Session(config=config) as sess:
sess.run(tf.local_variables_initializer())
sess.run(tf.global_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess,coord=coord)
for i in range(ITERATION):
if coord.should_stop():
print('corrd break!!!!!!')
break
#sess.run(tf.local_variables_initializer())
example_train, l_train = sess.run([image_batch, label_batch])
example_train2, l_train2= sess.run([posimage_batch, poslabel_batch])
example_train3, l_train3 = sess.run([negimage_batch, neglabel_batch])
_, loss_v = sess.run([train_step, loss],feed_dict={x1: example_train,y1: l_train,x2: example_train2, y2: l_train2,x3: example_train3, y3: l_train3})
因为我是新用户,而且我的英语不好 希望你不要介意。
答案 0 :(得分:1)
您可能只需要添加对OutOfRangeError
异常的一些处理,这种异常预计会迟早发生:
try:
sess.run(tf.local_variables_initializer())
sess.run(tf.global_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess,coord=coord)
for i in range(ITERATION):
#....
except tf.errors.OutOfRangeError:
print('Done training -- limit reached')
finally:
coord.request_stop()