使用TFLearn计算训练和验证准确性

时间:2018-07-04 18:37:27

标签: tensorflow tflearn

This 是TFLearn文档的示例。它显示了如何使用TFLearn训练器和常规Tensorflow图来结合TFLearn和Tensorflow。但是,省略了训练,测试和验证准确性的计算。

from __future__ import print_function

import tensorflow as tf
import tflearn

# --------------------------------------
# High-Level API: Using TFLearn wrappers
# --------------------------------------

# Using MNIST Dataset
import tflearn.datasets.mnist as mnist
mnist_data = mnist.read_data_sets(one_hot=True)

# User defined placeholders
with tf.Graph().as_default():
    # Placeholders for data and labels
    X = tf.placeholder(shape=(None, 784), dtype=tf.float32)
    Y = tf.placeholder(shape=(None, 10), dtype=tf.float32)

    net = tf.reshape(X, [-1, 28, 28, 1])

    # Using TFLearn wrappers for network building
    net = tflearn.conv_2d(net, 32, 3, activation='relu')
    net = tflearn.max_pool_2d(net, 2)
    net = tflearn.local_response_normalization(net)
    net = tflearn.dropout(net, 0.8)
    net = tflearn.conv_2d(net, 64, 3, activation='relu')
    net = tflearn.max_pool_2d(net, 2)
    net = tflearn.local_response_normalization(net)
    net = tflearn.dropout(net, 0.8)
    net = tflearn.fully_connected(net, 128, activation='tanh')
    net = tflearn.dropout(net, 0.8)
    net = tflearn.fully_connected(net, 256, activation='tanh')
    net = tflearn.dropout(net, 0.8)
    net = tflearn.fully_connected(net, 10, activation='linear')

    # Defining other ops using Tensorflow
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=net, labels=Y))
    optimizer = tf.train.AdamOptimizer(learning_rate=0.01).minimize(loss)

    # Initializing the variables
    init = tf.global_variables_initializer()

    # Launch the graph
    with tf.Session() as sess:
        sess.run(init)

        batch_size = 128
        for epoch in range(2):  # 2 epochs
            avg_cost = 0.
            total_batch = int(mnist_data.train.num_examples / batch_size)
            for i in range(total_batch):
                batch_xs, batch_ys = mnist_data.train.next_batch(batch_size)
                sess.run(optimizer, feed_dict={X: batch_xs, Y: batch_ys})
                cost = sess.run(loss, feed_dict={X: batch_xs, Y: batch_ys})
                avg_cost += cost / total_batch
                if i % 20 == 0:
                    print("Epoch:", '%03d' % (epoch + 1), "Step:", '%03d' % i,
                          "Loss:", str(cost))`

最后一行是计算成本的地方。如果我想同时计算训练和验证准确性,代码应该是什么?


编辑: 我拼凑了一段代码,我相信这段代码可以计算出循环中的训练和验证准确性。

我的解决方案是否按照我的想法做:在模型训练时计算运行精度。

在TFLearn中是否有更好的方法?我注意到张量板相当广泛。可以从事件日志中检索这些数据吗?

def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
                / predictions.shape[0])

tf_train_dataset = tf.placeholder(
    tf.float32, shape=(batch_size, image_size, image_size, num_channels))

data = reshape(tf_train_dataset,[-1, image_size, image_size, num_channels])

network = input_data(shape=[None, image_size, image_size, num_channels],
                        #placeholder=data,
                        data_preprocessing=feature_normalization,
                        data_augmentation=None,
                        name='input_d')

network = conv_2d(network,
            nb_filter=num_channels,
            filter_size=patch_size,
            strides=[1, 2, 2, 1],
            padding='SAME',
            activation='relu',
            bias=True,
            weights_init=weight_init_trn,
            bias_init=weight_init_zro,
            restore=True,
            regularizer=None)

network = conv_2d(network,
            nb_filter=depth,
            filter_size=patch_size,
            strides=[1, 2, 2, 1],
            padding='SAME',
            activation='relu',
            bias=True,
            weights_init=weight_init_trn,
            bias_init=tf.constant(1.0, shape=[depth]),
            restore=True,
            regularizer=None)

network = fully_connected(network,
                            n_units=num_hidden,
                            activation='relu',
                            bias=True,
                            weights_init=weight_init_trn,
                            bias_init=tf.constant(1.0, shape=[num_hidden]),
                            regularizer=None,
                            restore=True
                            )

network = fully_connected(network,
                            n_units=num_labels,
                            activation=None,
                            bias=True,
                            weights_init=weight_init_trn,
                            bias_init=tf.constant(1.0, shape=[num_labels]),
                            regularizer=None,
                            restore=True,
                            name='fullc'
                            )
network = activation(network,'softmax')

network = regression(network, optimizer='SGD',
                    loss='categorical_crossentropy',
                    learning_rate=0.05, name='targets')

model_dnn_tr = tflearn.DNN(network, tensorboard_verbose=0)

num_steps = 1001
with tf.Session(graph=graph) as session:

    tf.global_variables_initializer().run()
    print('Initialized')

    for step in range(num_steps):
        offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
        batch_data = train_dataset[offset:(offset + batch_size), :, :, :]
        batch_labels = train_labels[offset:(offset + batch_size), :]

        loss = model_dnn_tr.fit_batch({'input_d' : batch_data}, {'targets': 
            batch_labels})

        if (step % 50 == 0):
            trainAccr = accuracy(model_dnn_tr.predict({'input_d' : 
                batch_data}), batch_labels)

            validAccr = accuracy(model_dnn_tr.predict({'input_d' : 
                valid_dataset}), valid_labels)

            print("Minibatch accuracy: %.1f%%" % trainAccr)
            print("Validation accuracy: %.1f%%" % validAccr)

testAccr = accuracy(model_dnn_tr.predict({'input_d' : test_dataset}), 
    test_labels)

print("testAccr time:", round(time()-t0,3),"s")

print("Test accuracy: %.1f%%" % testAccr)

1 个答案:

答案 0 :(得分:0)

到目前为止我找到的最令人满意的解决方案:

  • 使用数据集对象和迭代器来馈送数据。

    def accuracy(predictions, labels):
        return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
                    / predictions.shape[0])
    
    image_size = 28
    num_labels = 10
    num_channels = 1 # grayscale
    
    batch_size = 16
    patch_size = 5
    depth = 16
    num_hidden = 64
    
    graph = tf.Graph()
    
    with graph.as_default():
    
        data_prep = DataPreprocessing()
        data_prep.add_featurewise_stdnorm()
        data_prep.add_featurewise_zero_center()
        img_prep = ImagePreprocessing()
        img_prep.add_featurewise_zero_center()
        img_prep.add_featurewise_stdnorm()
    
        feature_normalization = None
        weight_init_trn = initializations.truncated_normal(seed=None, dtype=tf.float32, stddev=0.1)
        weight_init_zro = initializations.zeros(seed=None, dtype=tf.float32)
        weight_init_cns = tf.constant(1.0)
    
        # Input data.
        # create a placeholder to dynamically switch between batch sizes
        batch_size_x = tf.placeholder(tf.int64)
    
        data_placeholder =  tf.placeholder(tf.float32, shape=(None, image_size, image_size, num_channels))
    
        labels_placeholder = tf.placeholder(tf.float32, shape=(None, num_labels))
    
        # create dataset: one for training and one for test etc
        dataset = tf.data.Dataset.from_tensor_slices((data_placeholder, labels_placeholder)).batch(batch_size_x).repeat()
    
    
        # create a iterator of the correct shape and type
        iterator = tf.data.Iterator.from_structure(dataset.output_types, dataset.output_shapes)
        # get the tensor that will contain your data
        feature, label = iterator.get_next()
    
        # create the initialisation operations
        init_op = iterator.make_initializer(dataset)   
    
        valid_data_x = tf.constant(valid_data)
        test_data_x = tf.constant(test_data)
    
        # Model.
    
        network = input_data(shape=[None, image_size, image_size, num_channels],
                            placeholder=data_placeholder,
                            data_preprocessing=feature_normalization,
                            data_augmentation=None,
                            name='input_d')
    
        network = conv_2d(network,
                nb_filter=num_channels,
                filter_size=patch_size,
                strides=[1, 2, 2, 1],
                padding='SAME',
                activation='relu',
                bias=True,
                weights_init=weight_init_trn,
                bias_init=weight_init_zro,
                restore=True,
                regularizer=None)
    
        network = conv_2d(network,
            nb_filter=depth,
            filter_size=patch_size,
            strides=[1, 2, 2, 1],
            padding='SAME',
            activation='relu',
            bias=True,
            weights_init=weight_init_trn,
            bias_init=tf.constant(1.0, shape=[depth]),
            restore=True,
            regularizer=None)
    
    
        network = fully_connected(network,
                                n_units=num_hidden,
                                activation='relu',
                                bias=True,
                                weights_init=weight_init_trn,
                                bias_init=tf.constant(1.0, shape=[num_hidden]),
                                regularizer=None,
                                restore=True
                                )
        logits = fully_connected(network,
                                n_units=num_labels,
                                activation=None,
                                bias=True,
                                weights_init=weight_init_trn,
                                bias_init=tf.constant(1.0, shape=[num_labels]),
                                regularizer=None,
                                restore=True,
                                name='fullc2'
                                )
    
        # Training computation.
        loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=labels_placeholder,logits=logits))
        # Optimizer.
        optimizer = tf.train.GradientDescentOptimizer(0.01).minimize(loss)
    
        prediction = tf.nn.softmax(logits)
    
    num_steps = 1001
    with tf.Session(graph=graph) as session:
        tf.global_variables_initializer().run()
        print('Initialized')
        # initialise iterator with train data
        print('Training...')
        feed_dict =  {data_placeholder: train_data,
                  labels_placeholder: train_data_labels,
                  batch_size_x: batch_size}
       session.run(init_op, feed_dict = feed_dict)
    
       for step in range(num_steps):
    
           batch_data,batch_labels = session.run( [feature, label], feed_dict = 
            feed_dict )
    
           t0 = time()
            feed_dict2 = {data_placeholder: batch_data, labels_placeholder: batch_labels}
            _, l, predictions = session.run([optimizer, loss, prediction],
                                       feed_dict=feed_dict2)
            print("fit time:", round(time()-t0,3),"s")
            if (step % 50 == 0):
    
                t0 = time()
                trainAccrMb = accuracy(predictions, batch_labels)
                print("trainAccr time:", round(time()-t0,3),"s")
    
                t0 = time()
                feed_dict = {data_placeholder: valid_data_x.eval(), labels_placeholder: valid_data_labels }
                valid_prediction = session.run(prediction,
                                            feed_dict=feed_dict)
                validAccr= accuracy(valid_prediction, valid_data_labels)
                print("validAccr time:", round(time()-t0,3),"s")
    
    
                print("Minibatch loss at step %d: %f" % (step, l))
                print("Minibatch accuracy: %.1f%%" % trainAccrMb)
                print("Validation accuracy: %.1f%%" %  validAccr)
        t0 = time()
    
    feed_dict = {data_placeholder: test_data_x.eval(), labels_placeholder: 
        test_data_labels }#, batch_size_x: len(valid_data)}
    test_prediction = session.run(prediction,
                                    feed_dict=feed_dict)
    
    testAccr = accuracy(test_prediction, test_data_labels)
    print("testAccr time:", round(time()-t0,3),"s")
    print("Test accuracy: %.1f%%" % testAccr)