我已经在MNIST数据集上训练了一个深度神经网络。这是培训代码。
n_classes = 10
batch_size = 100
x = tf.placeholder(tf.float32, [None, 784],name='Xx')
y = tf.placeholder(tf.float32,[None,10],name='Yy')
input = 784
n_nodes_1 = 300
n_nodes_2 = 300
def neural_network_model(data):
variables = {'w1':tf.Variable(tf.random_normal([input,n_nodes_1])),
'w2':tf.Variable(tf.random_normal([n_nodes_1,n_nodes_2])),
'w3':tf.Variable(tf.random_normal([n_nodes_2,n_classes])),
'b1':tf.Variable(tf.random_normal([n_nodes_1])),
'b2':tf.Variable(tf.random_normal([n_nodes_2])),
'b3':tf.Variable(tf.random_normal([n_classes]))}
output1 = tf.add(tf.matmul(data,variables['w1']),variables['b1'])
output2 = tf.nn.relu(output1)
output3 = tf.add(tf.matmul(output2, variables['w2']), variables['b2'])
output4 = tf.nn.relu(output3)
output5 = tf.add(tf.matmul(output4, variables['w3']), variables['b3'],name='last')
return output5
def train_neural_network(x):
prediction = neural_network_model(x)
name_of_final_layer = 'fin'
final = tf.nn.softmax_cross_entropy_with_logits_v2(logits=prediction,
labels=y,name=name_of_final_layer)
cost = tf.reduce_mean(final)
optimizer = tf.train.AdamOptimizer().minimize(cost)
hm_epochs = 3
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(hm_epochs):
epoch_loss = 0
for _ in range(int(mnist.train.num_examples/batch_size)):
epoch_x, epoch_y = mnist.train.next_batch(batch_size)
_,c=sess.run([optimizer,cost],feed_dict={x:epoch_x,y:epoch_y})
epoch_loss += c
print("Epoch",epoch+1,"Completed Total Loss:",epoch_loss)
correct = tf.equal(tf.argmax(prediction,1),tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct,'float'))
print('Accuracy on val_set:',accuracy.eval({x:mnist.test.images,y:mnist.test.labels}))
path = saver.save(sess,"net/network")
print("Saved to",path)
这是我的用于评估单个数据点的代码
def eval_neural_network():
with tf.Session() as sess:
new_saver = tf.train.import_meta_graph('net/network.meta')
new_saver.restore(sess, "net/network")
sing = np.reshape(mnist.test.images[0],(-1,784))
output = sess.run([y],feed_dict={x:sing})
print(output)
eval_neural_network()
弹出的错误是:
InvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'Yy' with dtype float and shape [?,10]
[[Node: Yy = Placeholder[dtype=DT_FLOAT, shape=[?,10], _device="/job:localhost/replica:0/task:0/device:CPU:0"]()]]
我已经在网上进行了多天的研究,但仍然无法正常工作。有什么建议吗?
答案 0 :(得分:0)
这个基于tensorflow github的完整示例为我工作: (我通过删除x的名称范围,keep_prob并更改为tf.placeholder_with_default来修改了几行代码。在某个地方可能有更好的方法。
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pandas as pd
import argparse
import sys
import tempfile
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
FLAGS = None
def deepnn(x):
"""deepnn builds the graph for a deep net for classifying digits.
Args:
x: an input tensor with the dimensions (N_examples, 784), where 784 is the
number of pixels in a standard MNIST image.
Returns:
A tuple (y, keep_prob). y is a tensor of shape (N_examples, 10), with values
equal to the logits of classifying the digit into one of 10 classes (the
digits 0-9). keep_prob is a scalar placeholder for the probability of
dropout.
"""
# Reshape to use within a convolutional neural net.
# Last dimension is for "features" - there is only one here, since images are
# grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.
with tf.name_scope('reshape'):
x_image = tf.reshape(x, [-1, 28, 28, 1])
# First convolutional layer - maps one grayscale image to 32 feature maps.
with tf.name_scope('conv1'):
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
# Pooling layer - downsamples by 2X.
with tf.name_scope('pool1'):
h_pool1 = max_pool_2x2(h_conv1)
# Second convolutional layer -- maps 32 feature maps to 64.
with tf.name_scope('conv2'):
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
# Second pooling layer.
with tf.name_scope('pool2'):
h_pool2 = max_pool_2x2(h_conv2)
# Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image
# is down to 7x7x64 feature maps -- maps this to 1024 features.
with tf.name_scope('fc1'):
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# Dropout - controls the complexity of the model, prevents co-adaptation of
# features.
keep_prob = tf.placeholder_with_default(1.0,())
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# Map the 1024 features to 10 classes, one for each digit
with tf.name_scope('fc2'):
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
return y_conv, keep_prob
def conv2d(x, W):
"""conv2d returns a 2d convolution layer with full stride."""
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
"""max_pool_2x2 downsamples a feature map by 2X."""
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def weight_variable(shape):
"""weight_variable generates a weight variable of a given shape."""
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
"""bias_variable generates a bias variable of a given shape."""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
# Import data
mnist = input_data.read_data_sets("/tmp")
# Create the model
x = tf.placeholder(tf.float32, [None, 784],name='x')
# Define loss and optimizer
y_ = tf.placeholder(tf.int64, [None])
# Build the graph for the deep net
y_conv, keep_prob = deepnn(x)
with tf.name_scope('loss'):
cross_entropy = tf.losses.sparse_softmax_cross_entropy(
labels=y_, logits=y_conv)
cross_entropy = tf.reduce_mean(cross_entropy)
with tf.name_scope('adam_optimizer'):
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(tf.argmax(y_conv, 1), y_)
correct_prediction = tf.cast(correct_prediction, tf.float32)
accuracy = tf.reduce_mean(correct_prediction)
graph_location = tempfile.mkdtemp()
print('Saving graph to: %s' % graph_location)
train_writer = tf.summary.FileWriter(graph_location)
train_writer.add_graph(tf.get_default_graph())
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(1000):
batch = mnist.train.next_batch(50)
if i % 100 == 0:
train_accuracy = accuracy.eval(feed_dict={
x: batch[0], y_: batch[1], keep_prob: 1.0})
print('step %d, training accuracy %g' % (i, train_accuracy))
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
print('test accuracy %g' % accuracy.eval(feed_dict={
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
sing = np.reshape(mnist.test.images[0],(-1,784))
output = sess.run(y_conv,feed_dict={x:sing,keep_prob:1.0})
print(tf.argmax(output,1).eval())
saver = tf.train.Saver()
saver.save(sess,"/tmp/network")
Extracting /tmp/train-images-idx3-ubyte.gz
Extracting /tmp/train-labels-idx1-ubyte.gz
Extracting /tmp/t10k-images-idx3-ubyte.gz
Extracting /tmp/t10k-labels-idx1-ubyte.gz
Saving graph to: /tmp/tmp17hf_6c7
step 0, training accuracy 0.2
step 100, training accuracy 0.86
step 200, training accuracy 0.8
step 300, training accuracy 0.94
step 400, training accuracy 0.94
step 500, training accuracy 0.96
step 600, training accuracy 0.88
step 700, training accuracy 0.98
step 800, training accuracy 0.98
step 900, training accuracy 0.98
test accuracy 0.9663
[7]
如果要从新的python运行中恢复:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import argparse
import sys
import tempfile
from tensorflow.examples.tutorials.mnist import input_data
sess = tf.Session()
saver = tf.train.import_meta_graph('/tmp/network.meta')
saver.restore(sess,tf.train.latest_checkpoint('/tmp'))
graph = tf.get_default_graph()
mnist = input_data.read_data_sets("/tmp")
simg = np.reshape(mnist.test.images[0],(-1,784))
op_to_restore = graph.get_tensor_by_name("fc2/MatMul:0")
x = graph.get_tensor_by_name("x:0")
output = sess.run(op_to_restore,feed_dict= {x:simg})
print("Result = ", np.argmax(output))
答案 1 :(得分:0)
损失正在像这样振荡,但预测似乎并不糟糕。有用。
它还会重复提取mnist存档。使用更简单的网络,精度也可以达到0.98。
Epoch 1 Completed Total Loss: 47.47844
Accuracy on val_set: 0.8685
Epoch 2 Completed Total Loss: 10.217445
Accuracy on val_set: 0.9
Epoch 3 Completed Total Loss: 14.013474
Accuracy on val_set: 0.9104
[2]
import tensorflow as tf
import tensorflow.examples.tutorials.mnist.input_data as input_data
import numpy as np
import matplotlib.pyplot as plt
n_classes = 10
batch_size = 100
x = tf.placeholder(tf.float32, [None, 784],name='Xx')
y = tf.placeholder(tf.float32,[None,10],name='Yy')
input = 784
n_nodes_1 = 300
n_nodes_2 = 300
mnist = input_data.read_data_sets('MNIST_data/', one_hot=True)
def neural_network_model(data):
variables = {'w1':tf.Variable(tf.random_normal([input,n_nodes_1])),
'w2':tf.Variable(tf.random_normal([n_nodes_1,n_nodes_2])),
'w3':tf.Variable(tf.random_normal([n_nodes_2,n_classes])),
'b1':tf.Variable(tf.random_normal([n_nodes_1])),
'b2':tf.Variable(tf.random_normal([n_nodes_2])),
'b3':tf.Variable(tf.random_normal([n_classes]))}
output1 = tf.add(tf.matmul(data,variables['w1']),variables['b1'])
output2 = tf.nn.relu(output1)
output3 = tf.add(tf.matmul(output2, variables['w2']), variables['b2'])
output4 = tf.nn.relu(output3)
output5 = tf.add(tf.matmul(output4, variables['w3']), variables['b3'],name='last')
return output5
def train_neural_network(x):
prediction = neural_network_model(x)
name_of_final_layer = 'fin'
final = tf.nn.softmax_cross_entropy_with_logits_v2(logits=prediction,
labels=y,name=name_of_final_layer)
cost = tf.reduce_mean(final)
optimizer = tf.train.AdamOptimizer().minimize(cost)
hm_epochs = 3
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(hm_epochs):
for _ in range(int(mnist.train.num_examples/batch_size)):
epoch_x, epoch_y = mnist.train.next_batch(batch_size)
_,c=sess.run([optimizer,cost],feed_dict={x:epoch_x,y:epoch_y})
print("Epoch",epoch+1,"Completed Total Loss:",c)
correct = tf.equal(tf.argmax(prediction,1),tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct,'float'))
print('Accuracy on val_set:',accuracy.eval({x:mnist.test.images,y:mnist.test.labels}))
#path = saver.save(sess,"net/network")
#print("Saved to",path)
return prediction
def eval_neural_network(prediction):
with tf.Session() as sess:
new_saver = tf.train.import_meta_graph('net/network.meta')
new_saver.restore(sess, "net/network")
singleprediction = tf.argmax(prediction, 1)
sing = np.reshape(mnist.test.images[1], (-1, 784))
output = singleprediction.eval(feed_dict={x:sing},session=sess)
digit = mnist.test.images[1].reshape((28, 28))
plt.imshow(digit, cmap='gray')
plt.show()
print(output)
prediction = train_neural_network(x)
eval_neural_network(prediction)