我想在TensorFlow上构建一个CNN模型框架。但我一直被困在负载数据中。我的数据集是MIT-300,我想输入6张图片作为数据集,他们将通过网络,最后每张图片将与显着性图片(地面真实图片)进行比较。训练数据和基本事实都是'.jpg'的形式。
我试着像下面这样加载,但一直收到错误。现在我被告知这样:
OutOfRangeError (see above for traceback): FIFOQueue '_1_batch/fifo_queue' is closed and has insufficient elements (requested 3, current size 0)
[[Node: batch = QueueDequeueManyV2[component_types=[DT_FLOAT, DT_FLOAT], timeout_ms=-1, _device="/job:localhost/replica:0/task:0/cpu:0"](batch/fifo_queue, batch/n)]]
Mycodes如下:
from __future__ import print_function
from __future__ import print_function
import tensorflow as tf
import os
IMAGE_WIDTH = 1024
IMAGE_HEIGHT = 768
NUMBER_OF_CHANNELS = 3
SOURCE_DIR = '/home/lvjc2010/Documents/mnist/data/'
TRAINING_IMAGES_DIR = SOURCE_DIR + 'train/'
LIST_FILE_NAME = 'train.txt'
BATCH_SIZE = 3
TRAINING_SET_SIZE = 6
def create_photo_and_label_batches(source_directory):
# read the list of photo IDs and labels
photos_list = open(source_directory + LIST_FILE_NAME, 'r')
filenames_list = []
labels_list = []
# get lists of photo file names and labels
for line in photos_list:
filenames_list.append(source_directory + line.split(' ')[0])
labels_list.append(source_directory + line.split(' ')[1])
# convert the lists to tensors
filenames = tf.convert_to_tensor(filenames_list, dtype=tf.string)
labels = tf.convert_to_tensor(labels_list, dtype=tf.string)
# create queue with filenames and labels
file_names_queue, labels_names_queue =\
tf.train.slice_input_producer([filenames, labels], shuffle=False)
## IT WORKED
# convert filenames of photos to input vectors
photos_queue = tf.read_file(file_names_queue) # convert filenames to content
photos_queue = tf.image.decode_image(photos_queue, channels=NUMBER_OF_CHANNELS)
photos_queue.set_shape([IMAGE_HEIGHT, IMAGE_WIDTH, NUMBER_OF_CHANNELS])
photos_queue = tf.cast(photos_queue, tf.float32)*(1./255)-0.5 # convert uint8 to float32
photos_queue = tf.reshape(photos_queue, [-1]) # flatten the tensor
labels_queue = tf.read_file(labels_names_queue) # convert filenames to content
labels_queue = tf.image.decode_jpeg(labels_queue, channels=NUMBER_OF_CHANNELS)
labels_queue.set_shape([IMAGE_HEIGHT, IMAGE_WIDTH, NUMBER_OF_CHANNELS])
labels_queue = tf.to_float(labels_queue) # convert uint8 to float32
labels_queue = tf.reshape(labels_queue, [-1]) # flatten the tensor
# slice the data into mini batches
return tf.train.batch([photos_queue, labels_queue], batch_size=BATCH_SIZE)
# return tf.train.batch(photos_queue, batch_size=BATCH_SIZE)
def weight_variable(shape):
"""weight_variable generates a weight variable of a given shape."""
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
"""bias_variable generates a bias variable of a given shape."""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
if __name__ == '__main__':
# load the training data
training_photo_batch, training_label_batch =\
create_photo_and_label_batches(TRAINING_IMAGES_DIR)
# with tf.Session() as sess:
# print (sess.run(training_photo_batch))
# create the model
x = training_photo_batch
# W = tf.Variable(tf.zeros([IMAGE_WIDTH * IMAGE_HEIGHT * NUMBER_OF_CHANNELS, 10],
# dtype=tf.float32)) # weights tensor
b = tf.Variable(tf.zeros(IMAGE_WIDTH * IMAGE_HEIGHT * NUMBER_OF_CHANNELS, dtype=tf.float32)) # bias
y_ = training_label_batch
# y = tf.matmul(x, W) + b
y = tf.add(x,b)
# define loss and optimizer
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y, labels=y_))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
# do the training
sess = tf.InteractiveSession()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
tf.initialize_all_variables().run()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
# with tf.name_scope('accuracy'):
# correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
# correct_prediction = tf.cast(correct_prediction, tf.float32)
# accuracy = tf.reduce_mean(correct_prediction)
for i in range(TRAINING_SET_SIZE // BATCH_SIZE):
print('hello')
sess.run(train_step)
# train_accuracy = accuracy.eval(feed_dict={
# x: y, y_: y_})
# print('step %d, training accuracy %g' % (i, train_accuracy))
# stop the queue threads and properly close the session
coord.request_stop()
coord.join(threads)
sess.close()
我使用sess.run在这里调试。我发现地面真相图像无法加载。
labels_queue = tf.read_file(labels_names_queue) # convert filenames to content
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
k = sess.run(labels_queue)
print('k', k)
labels_queue = tf.image.decode_jpeg(labels_queue, channels=NUMBER_OF_CHANNELS)
它显示:
NotFoundError (see above for traceback): /home/lvjc2010/Documents/mnist/data/train/i05june05_static_street_boston_p1010764_fixMap.jpg
[[Node: ReadFile_1 = ReadFile[_device="/job:localhost/replica:0/task:0/cpu:0"](input_producer/Gather_1)]]