Tensorflow占位符更改结果

时间:2017-04-01 20:13:38

标签: python tensorflow placeholder

我现在正在学习一周的张量流,并尝试从斯坦福的代码(下面的代码)开始为CNN编写课程

import os
import tensorflow as tf
from time import time


class Cnn:

def __init__(self, batch_size=128, skip_step=10, epochs=1, dropout_ratio=0.75):
    self.batch_size = batch_size
    self.skip_step = skip_step
    self.epochs = epochs
    self.dropout_ratio = dropout_ratio
    self.x = tf.placeholder(tf.float32, [None, 784], name="X_placeholder")
    self.images = tf.reshape(self.x, shape=[-1, 28, 28, 1])
    self.y = tf.placeholder(tf.float32, [None, 10], name="Y_placeholder")
    self.predictions = tf.placeholder(tf.float32, [None, 10], name="predictions")
    self.dropout = tf.placeholder(tf.float32, name="dropout")
    self.global_step = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
    self.layers = [self.images]
    self.n_layers = 0

def convolution(self, kernel_size, n_output,  strides, scope_name):
    previous_layer = self.layers[self.n_layers]
    with tf.variable_scope(scope_name) as scope:
        kernel_shape = [kernel_size[0], kernel_size[1], previous_layer.get_shape()[3], n_output]
        kernel = tf.get_variable("kernels", kernel_shape,
                                 initializer=tf.truncated_normal_initializer())
        biases = tf.get_variable("biases", [n_output],
                                 initializer=tf.random_normal_initializer())
        convolution_2d = tf.nn.conv2d(previous_layer, kernel, strides=strides,
                                      padding="SAME")
        convolution = tf.nn.relu(convolution_2d + biases, name=scope.name)
    self.layers.append(convolution)
    self.n_layers += 1
    return convolution

def pool(self, size, strides, scope_name, padding="SAME"):
    previous_layer = self.layers[self.n_layers]
    with tf.variable_scope(scope_name):
        pool = tf.nn.max_pool(previous_layer, ksize=size, strides=strides,
                              padding=padding)
    self.layers.append(pool)
    self.n_layers += 1
    return pool

def fully_connected(self, n_input, n_output, scope_name, relu=False, dropout=True):
    previous_layer = self.layers[self.n_layers]
    with tf.variable_scope(scope_name):
        weights = tf.get_variable("weights", [n_input, n_output],
                                  initializer=tf.truncated_normal_initializer())
        biases = tf.get_variable("biases", [n_output],
                                 initializer=tf.random_normal_initializer())
        pool_reshaped = tf.reshape(previous_layer, [-1, n_input])
        logits = tf.add(tf.matmul(pool_reshaped, weights), biases)
        fc = logits
        if relu:
            fc = tf.nn.relu(fc, name="relu")
        if dropout:
            fc = tf.nn.dropout(fc, self.dropout, name="dropout")
    self.layers.append(fc)
    self.n_layers += 1
    if relu is False and dropout is False:
        self.logits = tf.add(tf.matmul(pool_reshaped, weights), biases, name="logits")
        self.predictions = tf.nn.softmax(logits, name="predictions")
    return fc

def set_loss(self, scope_name):
    previous_layer = self.layers[self.n_layers]
    with tf.name_scope(scope_name):
        entropy = tf.nn.softmax_cross_entropy_with_logits(logits=previous_layer, labels=self.y)
        loss = tf.reduce_mean(entropy, name='loss')
    self.layers.append(loss)
    self.n_layers += 1
    self.loss = loss

def set_optimizer(self):
    previous_layer = self.layers[self.n_layers]
    optimizer = tf.train.AdamOptimizer(0.001).minimize(previous_layer, global_step=self.global_step)
    self.layers.append(optimizer)
    self.n_layers += 1
    self.optimizer = optimizer

def run(self, train_x, train_y):
    sess = tf.Session()
    with sess.as_default():
        sess.run(tf.global_variables_initializer())
        saver = tf.train.Saver()
        writer = tf.summary.FileWriter('./my_graph/mnist', sess.graph)
        ckpt = tf.train.get_checkpoint_state(os.path.dirname('results/checkpoint'))
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
        initial_step = self.global_step.eval()

        start_time = time()

        n_batches = int(train_x.shape[0] / self.batch_size)
        total_correct_preds = 0
        total_loss = 0.0
        current_position = 0
        for index in range(initial_step, int(n_batches * self.epochs)):  # train the model n_epochs times
            x_batch = train_x[current_position: current_position + self.batch_size, :]
            y_batch = train_y[current_position: current_position + self.batch_size, :]
            feed_dict = {self.x: x_batch, self.y: y_batch, self.dropout: self.dropout_ratio}
            _, loss_batch, logits_batch = sess.run([self.optimizer, self.loss, self.logits],
                                                   feed_dict=feed_dict)
            total_loss += loss_batch
            preds = tf.nn.softmax(logits_batch)
            correct_preds = tf.equal(tf.argmax(preds, 1), tf.argmax(y_batch, 1))
            accuracy = tf.reduce_sum(tf.cast(correct_preds, tf.float32))
            total_correct_preds += sess.run(accuracy)

            if (index + 1) % self.skip_step == 0:
                print('Average loss at step {}: {:5.1f}'.format(index + 1, total_loss / self.skip_step))
                total_loss = 0.0
                saver.save(sess, 'results/mnist-convnet', index)
            current_position += self.batch_size

        print("Optimization Finished!")  # should be around 0.35 after 25 epochs
        print("Total time: {0} seconds".format(time() - start_time))
        print("Accuracy {0}".format(total_correct_preds / train_x.shape[0]))

def test(self, val_x, val_y):
    print("start testing")
    checkpoint_file = tf.train.latest_checkpoint("results/")
    graph = tf.Graph()
    with graph.as_default():
        sess = tf.Session()
        with sess.as_default():
            saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
            saver.restore(sess, checkpoint_file)
            self.x = graph.get_operation_by_name("X_placeholder").outputs[0]
            self.y = graph.get_operation_by_name("Y_placeholder").outputs[0]
            self.dropout = graph.get_operation_by_name("dropout").outputs[0]
            self.global_step = graph.get_operation_by_name("global_step").outputs[0]
            self.logits = graph.get_operation_by_name("logits").outputs[0]

            current_position = 0
            n_batches = int(val_x.shape[0] / self.batch_size)
            total_correct_preds = 0
            for i in range(n_batches):
                x_batch = val_x[current_position: current_position + self.batch_size]
                y_batch = val_y[current_position: current_position + self.batch_size]
                feed_dict = {self.x: x_batch, self.y: y_batch, self.dropout: self.dropout_ratio}
                logits_batch = sess.run(self.logits, feed_dict=feed_dict)
                preds = tf.nn.softmax(logits_batch)
                correct_preds = tf.equal(tf.argmax(preds, 1), tf.argmax(y_batch, 1))
                accuracy = tf.reduce_sum(tf.cast(correct_preds, tf.float32))
                total_correct_preds += sess.run(accuracy)

                current_position += self.batch_size
            print("Accuracy {0}".format(total_correct_preds / val_x.shape[0]))

测试代码

from cnn import Cnn

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

tf.set_random_seed(1)

N_CLASSES = 10

# load datas
mnist = input_data.read_data_sets("mnist", one_hot=True)
train_x, train_y = mnist.train.images, mnist.train.labels
val_x, val_y = mnist.test.images, mnist.test.labels

model = Cnn(batch_size=128, skip_step=10, epochs=0.25, dropout_ratio=0.75)

model.convolution(kernel_size=[5, 5], strides=[1, 1, 1, 1], n_output=32, scope_name="conv1")
model.pool(size=[1, 2, 2, 1], strides=[1, 2, 2, 1], scope_name="pool1")
model.convolution(kernel_size=[5, 5], strides=[1, 1, 1, 1], n_output=64, scope_name="conv2")
model.pool(size=[1, 2, 2, 1], strides=[1, 2, 2, 1], scope_name="pool2")
input_features = 7 * 7 * 64
model.fully_connected(n_input=input_features, n_output=1024, scope_name="fc", relu=True, dropout=True)
model.fully_connected(n_input=1024, n_output=N_CLASSES, scope_name="softmax_linear", dropout=False)
model.set_loss(scope_name="loss")
model.set_optimizer()
model.run(train_x=train_x, train_y=train_y)
model.test(val_x, val_y)

评论这一行 self.predictions = tf.placeholder(tf.float32,[None,10],name =" predictions")更改培训结果。

有人已经遇到过这个问题吗?

谢谢!

0 个答案:

没有答案