Tensorflow - 关于tf.WholeFileReader,协调器,线程,队列的错误

时间:2017-05-30 07:42:02

标签: multithreading tensorflow queue

我正在创建简单的代码,以灰度创建RGB图像。即使这不能正常工作,我希望代码将被执行。我有一个关于线程使用的问题。以下是代码。

with tf.variable_scope("color"): -> make variable(similar to VGG16)
def conv_layer(x, weights, biases, stride, name="convlayer", padding='SAME'):
        return tf.nn.relu(tf.nn.conv2d(x, weights, strides=stride, padding=padding) + biases, name=name)


    def read_my_file_format(filename_queue, randomize=False):
        reader = tf.WholeFileReader()
        key, file = reader.read(filename_queue)
        uint8image = tf.image.decode_jpeg(file, channels=3)
        uint8image = tf.random_crop(uint8image, (224, 224, 3))
        if randomize:
            uint8image = tf.image.random_flip_left_right(uint8image)
            uint8image = tf.image.random_flip_up_down(uint8image, seed=None)
        float_image = tf.div(tf.cast(uint8image, tf.float32), 255)
        return float_image

    def input_pipeline(filenames, batch_size, num_epochs=None):
        filename_queue = tf.train.string_input_producer(
            filenames, num_epochs=num_epochs, shuffle=False)
        example = read_my_file_format(filename_queue, randomize=False)
        min_after_dequeue = 5
        capacity = min_after_dequeue + 3 * batch_size
        example_batch = tf.train.shuffle_batch(
            [example], batch_size=batch_size, capacity=capacity,
            min_after_dequeue=min_after_dequeue)
        return example_batch

    with tf.name_scope("images_setting"):
        filenames = sorted(glob.glob("C:/example/*.jpg"))
        # filenames = ['C:/example/000005.jpg', 'C:/example/000007.jpg ~~~~']
        batch_size = 2
        num_epochs = 100

        colorimage = input_pipeline(filenames, batch_size, num_epochs=num_epochs)
        grayscale = tf.image.rgb_to_grayscale(colorimage)

    with tf.name_scope("layer_explain"):
        expand = tf.image.grayscale_to_rgb(grayscale)
        conv1_1 = conv_layer(expand, conv1_1_weights, conv1_1_biases, stride1, 'conv1_1')
        conv1_2 = conv_layer(conv1_1, conv1_2_weights, conv1_2_biases, stride1, 'conv1_2')

        conv2_1 = conv_layer(conv1_2, conv2_1_weights, conv2_1_biases, stride1, 'conv2_1')
        conv2_2 = conv_layer(conv2_1, conv2_2_weights, conv2_2_biases, stride1, 'conv2_2')

        conv3_1 = conv_layer(conv2_2, conv3_1_weights, conv3_1_biases, stride1, 'conv3_1')
        conv3_2 = conv_layer(conv3_1, conv3_2_weights, conv3_2_biases, stride1, 'conv3_2')
        conv3_3 = conv_layer(conv3_2, conv3_3_weights, conv3_3_biases, stride1, 'conv3_3')

        conv4_1 = conv_layer(conv3_3, conv4_1_weights, conv4_1_biases, stride1, 'conv4_1')
        conv4_2 = conv_layer(conv4_1, conv4_2_weights, conv4_2_biases, stride1, 'conv4_2')
        conv4_3 = conv_layer(conv4_2, conv4_3_weights, conv4_3_biases, stride1, 'conv4_3')

        conv5_1 = conv_layer(conv4_3, conv5_1_weights, conv5_1_biases, stride1, 'conv5_1')
        conv5_2 = conv_layer(conv5_1, conv5_2_weights, conv5_2_biases, stride1, 'conv5_2')
        conv5_3 = conv_layer(conv5_2, conv5_3_weights, conv5_3_biases, stride1, 'conv5_3')


    print("conv5_3: ", conv5_3)
    print("colorimage: ", colorimage)
    loss = tf.reduce_mean(tf.square(conv5_3 - colorimage))
    optimizer = tf.train.GradientDescentOptimizer(0.001)
    opt = optimizer.minimize(loss)


    init_global = tf.global_variables_initializer()
    init_local = tf.local_variables_initializer()
    sess = tf.Session()
    sess.run(init_global)
    sess.run(init_local)

    # Start input enqueue threads.
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    print("expand: ", expand)
    print("conv1_1: ", conv1_1)

    print("grayscale: ", grayscale)
    print(filenames, '**********************')
    try:
        while not coord.should_stop():
            training_opt = sess.run(opt)

            for i in range(10):
                loss = sess.run(loss)
                print("cost: {}".format(loss))
    except Exception as ex:
        print(ex)
        print("Done training -- epoch limit reached")
    finally:
        coord.request_stop()
        coord.join(threads)
    sess.close()

错误讯息:

  

(费用:0.2219611406326294)

     

Fetch参数0.22196114具有无效类型   ,必须是字符串或Tensor。 (无法转换   一个float32进入Tensor或Operation。)    - >这是错误......我认为这个消息在“损失功能”中意味着错误

     

(完成训练 - 达到纪元限制)

1 个答案:

答案 0 :(得分:0)

问题是以下几行:

                loss = sess.run(loss)

它第一次运行时,loss是一个Tensor,因此,在session.run返回其值时,python变量loss不是python浮点数,您不能将其传递给session.run。

做类似的事情

            loss_value = sess.run(loss)

你会没事的。