我是TensorFlow的新手。过去,我已经建立了有效的Logistic回归分类器和多层感知器。现在,我进入了卷积神经网络,我在测试准确性方面遇到了一些问题。我的代码如下。我遇到问题的那条线只是我尝试打印测试准确度数字的最后一行。印刷品1、2、3语句旨在显示这一点。
### import libraries ###
import tensorflow as tf
import numpy as np
from tqdm import trange
### import mnist data ###
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot = True)
##### Begin Computational Graph #####
## initial variable values chosen for ease of use with ReLU ##
# input image vector and reshape to 28x28x1
# 28x28x1 is a single image
# the first dimension will be minibatch size
x = tf.placeholder(
dtype = tf.float32,
shape = [None, 784],
name = "x")
xReshape = tf.reshape(x, [-1, 28, 28, 1])
# placeholder for data labels
y_ = tf.placeholder(
dtype = tf.float32,
shape = [None, 10],
name = "y_")
### First Convolutional Layer ###
# define kernel for first convolution layer
# initial values are random small numbers
K1 = tf.Variable(tf.truncated_normal([5, 5, 1, 32],
stddev = 0.01))
# define bias for first convolution layer
# initial values of 0.1
b1 = tf.Variable(tf.ones([32]) / 10)
# perform convolution
C1 = tf.nn.conv2d(
input = xReshape,
filter = K1,
strides = [1, 1, 1, 1],
padding = "SAME") + b1
# use activation function
C1_act = tf.nn.relu(C1)
# 2x2 max pool
maxPool1 = tf.nn.max_pool(
value = C1_act,
ksize = [1,2,2,1],
strides = [1,2,2,1],
padding = "SAME")
### Second Convolutional Layer ###
# define kernel for first convolution layer
# initial values are random small numbers
K2 = tf.Variable(tf.truncated_normal([5, 5, 32, 64],
stddev = 0.01))
# define bias for first convolution layer
# initial values of 0.1
b2 = tf.Variable(tf.ones([64]) / 10)
# perform convolution
C2 = tf.nn.conv2d(
input = maxPool1,
filter = K2,
strides = [1, 1, 1, 1],
padding = "SAME") + b2
# use activation function
C2_act = tf.nn.relu(C2)
# 2x2 max pool
maxPool2 = tf.nn.max_pool(
value = C2_act,
ksize = [1,2,2,1],
strides = [1,2,2,1],
padding = "SAME")
### First Fully Connected Layer w/ 256 Hidden Units ###
# flatten maps into one vector
fVect = tf.reshape(maxPool2, [-1, 7 * 7 * 64])
W1 = tf.Variable(tf.truncated_normal([7 * 7 * 64, 256],
stddev = 0.01))
fcBias1 = tf.Variable(tf.ones([256]) / 10)
prob_y1 = tf.nn.relu(tf.matmul(fVect, W1) + fcBias1)
### Final Fully Connected layer with 10 hidden Units ###
W2 = tf.Variable(tf.truncated_normal([256, 10],
stddev = 0.01))
fcBias2 = tf.Variable(tf.ones([10]) / 10)
prob_y2 = tf.nn.softmax(logits = (tf.matmul(prob_y1, W2) + fcBias2))
### Loss Function and Optimizer ###
# define loss function
cross_entropy_loss = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(prob_y2), axis = 1))
# set up gradient descent optimizer
train_step = tf.train.GradientDescentOptimizer(learning_rate = 0.05).minimize(cross_entropy_loss)
##### Train the Network #####
### start the session and initialize global variables ###
# Variable Initializer
init_op = tf.global_variables_initializer()
# Create a Session object, initialize all variables
sess = tf.Session()
sess.run(init_op)
for _ in trange(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict = {x: batch_xs, y_: batch_ys})
### Test Prediction Accuracy ###
# test trained model
print(1)
correct_prediction = tf.equal(tf.argmax(prob_y2, axis = 1), tf.argmax(y_, axis = 1))
print(2)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(3)
print('Test accuracy: {0}'.format(sess.run(accuracy, feed_dict = {x: mnist.test.images, y_: mnist.test.labels})))
sess.close()
道歉的大代码转储。我要确保该问题是可重现的。笔记本中此代码的结果是一个弹出窗口,其中显示“内核似乎已死亡。它将自动重新启动。”我希望这是我的语法或某些小错误,但是我已经搜索了所有功能性文档和论坛,但没有发现我的问题。
感谢您的帮助!