如何解决:" ValueError:验证大小应介于0和0之间。收到:5000。"?

时间:2018-02-21 13:20:13

标签: python tensorflow mnist

我正在尝试为bangla字母表制作字符识别分类器。图像大小为50x50。总共有50个班级。使用下面的CNN模型进行训练但我遇到了这个错误:" ValueError:验证大小应该在0到0之间。收到:5000。" 我该如何解决这个问题?

模型



# Python 3.6.0
# tensorflow 1.1.0

import os
import os.path as path

import tensorflow as tf
from tensorflow.python.tools import freeze_graph
from tensorflow.python.tools import optimize_for_inference_lib

from tensorflow.examples.tutorials.mnist import input_data

MODEL_NAME = 'mnist_convnet'
NUM_STEPS = 3000
BATCH_SIZE = 16

def model_input(input_node_name, keep_prob_node_name):
    x = tf.placeholder(tf.float32, shape=[None, 50*50], name=input_node_name)
    keep_prob = tf.placeholder(tf.float32, name=keep_prob_node_name)
    y_ = tf.placeholder(tf.float32, shape=[None, 50])
    return x, keep_prob, y_

def build_model(x, keep_prob, y_, output_node_name):
    x_image = tf.reshape(x, [-1, 50, 50, 1])
    # 50*50*1

    conv1 = tf.layers.conv2d(x_image, 64, 3, 1, 'same', activation=tf.nn.relu)
    # 50*50*64
    pool1 = tf.layers.max_pooling2d(conv1, 2, 2, 'same')
    # 14*14*64

    conv2 = tf.layers.conv2d(pool1, 128, 3, 1, 'same', activation=tf.nn.relu)
    # 14*14*128
    pool2 = tf.layers.max_pooling2d(conv2, 2, 2, 'same')
    # 7*7*128

    conv3 = tf.layers.conv2d(pool2, 256, 3, 1, 'same', activation=tf.nn.relu)
    # 7*7*256
    pool3 = tf.layers.max_pooling2d(conv3, 2, 2, 'same')
    # 4*4*256

    flatten = tf.reshape(pool3, [-1, 4*4*256])
    fc = tf.layers.dense(flatten, 1024, activation=tf.nn.relu)
    dropout = tf.nn.dropout(fc, keep_prob)
    logits = tf.layers.dense(dropout, 50)
    outputs = tf.nn.softmax(logits, name=output_node_name)

    # loss
    loss = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=logits))

    # train step
    train_step = tf.train.AdamOptimizer(1e-4).minimize(loss)

    # accuracy
    correct_prediction = tf.equal(tf.argmax(outputs, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    tf.summary.scalar("loss", loss)
    tf.summary.scalar("accuracy", accuracy)
    merged_summary_op = tf.summary.merge_all()

    return train_step, loss, accuracy, merged_summary_op

def train(x, keep_prob, y_, train_step, loss, accuracy,
        merged_summary_op, saver):
    print("training start...")

    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

    init_op = tf.global_variables_initializer()

    with tf.Session() as sess:
        sess.run(init_op)

        tf.train.write_graph(sess.graph_def, 'out',
            MODEL_NAME + '.pbtxt', True)

        # op to write logs to Tensorboard
        summary_writer = tf.summary.FileWriter('logs/',
            graph=tf.get_default_graph())

        for step in range(NUM_STEPS):
            batch = mnist.train.next_batch(BATCH_SIZE)
            if step % 100 == 0:
                train_accuracy = accuracy.eval(feed_dict={
                    x: batch[0], y_: batch[1], keep_prob: 1.0})
                print('step %d, training accuracy %f' % (step, train_accuracy))
            _, summary = sess.run([train_step, merged_summary_op],
                feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
            summary_writer.add_summary(summary, step)

        saver.save(sess, 'out/' + MODEL_NAME + '.chkp')

        test_accuracy = accuracy.eval(feed_dict={x: mnist.test.images,
                                    y_: mnist.test.labels,
                                    keep_prob: 1.0})
        print('test accuracy %g' % test_accuracy)

    print("training finished!")

def export_model(input_node_names, output_node_name):
    freeze_graph.freeze_graph('out/' + MODEL_NAME + '.pbtxt', None, False,
        'out/' + MODEL_NAME + '.chkp', output_node_name, "save/restore_all",
        "save/Const:0", 'out/frozen_' + MODEL_NAME + '.pb', True, "")

    input_graph_def = tf.GraphDef()
    with tf.gfile.Open('out/frozen_' + MODEL_NAME + '.pb', "rb") as f:
        input_graph_def.ParseFromString(f.read())

    output_graph_def = optimize_for_inference_lib.optimize_for_inference(
            input_graph_def, input_node_names, [output_node_name],
            tf.float32.as_datatype_enum)

    with tf.gfile.FastGFile('out/opt_' + MODEL_NAME + '.pb', "wb") as f:
        f.write(output_graph_def.SerializeToString())

    print("graph saved!")

def main():
    if not path.exists('out'):
        os.mkdir('out')

    input_node_name = 'input'
    keep_prob_node_name = 'keep_prob'
    output_node_name = 'output'

    x, keep_prob, y_ = model_input(input_node_name, keep_prob_node_name)

    train_step, loss, accuracy, merged_summary_op = build_model(x, keep_prob, y_, output_node_name)
    saver = tf.train.Saver()

    train(x, keep_prob, y_, train_step, loss, accuracy, merged_summary_op, saver)

    export_model([input_node_name, keep_prob_node_name], output_node_name)

if __name__ == '__main__':
    main()




错误



ValueError                                Traceback (most recent call last)
<ipython-input-2-2015e0ea466d> in <module>()
    136 
    137 if __name__ == '__main__':
--> 138     main()

<ipython-input-2-2015e0ea466d> in main()
    131     saver = tf.train.Saver()
    132 
--> 133     train(x, keep_prob, y_, train_step, loss, accuracy, merged_summary_op, saver)
    134 
    135     export_model([input_node_name, keep_prob_node_name], output_node_name)

<ipython-input-2-2015e0ea466d> in train(x, keep_prob, y_, train_step, loss, accuracy, merged_summary_op, saver)
     67     print("training start...")
     68 
---> 69     mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
     70 
     71     init_op = tf.global_variables_initializer()

/anaconda3/envs/nlpTFnltk/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py in read_data_sets(train_dir, fake_data, one_hot, dtype, reshape, validation_size)
    247     raise ValueError(
    248         'Validation size should be between 0 and {}. Received: {}.'
--> 249         .format(len(train_images), validation_size))
    250 
    251   validation_images = train_images[:validation_size]

ValueError: Validation size should be between 0 and 0. Received: 5000.
&#13;
&#13;
&#13;

2 个答案:

答案 0 :(得分:0)

您的异常报告len(train_images)为零。

'Validation size should be between 0 and {}. Received: {}.'
    .format(len(train_images), validation_size))
  

验证大小应介于0和0之间。

我认为您最好先查看图片目录MNIST_data/

答案 1 :(得分:0)

您正在使用MNIST教程代码,该代码从here调用read_data_sets;请注意,validation_size的5000来自该函数的默认参数。它期望从以下文件中获取数据:

TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
TEST_LABELS = 't10k-labels-idx1-ubyte.gz'

通常情况下,如果找不到这些文件会尝试下载这些文件,但事实上你得到的validation_size为0表明它没有这样做。无论如何,这对你没有帮助,因为你不想使用MNIST数据。

即使您重新命名您的火车和测试文件以匹配上述文件名,您的代码也无法正常工作,因为MNIST代码也在调用extract_labels,它有一个默认参数num_classes=10这是50。你最好的选择可能是完全摆脱MNIST的导入,并阅读如何设置input pipeline;与你已经完成的事情相比,这并不难。