我正在尝试使用卷积神经网络进行图像分类。我已经完成了关于深度学习的这个tutorial,并通过许多修改实现了给定的code。我添加了更多层的卷积和最大池并更改了输入以接受166x166的输入。为了在训练之后保存参数,我们在函数save()中使用了cPickle.dump(),它分别为每个层(ConvPool,FullyConnected和Softmax)定义。在训练完成后,在sgd()方法中为所有层调用此函数。在另一个程序中,softmax,完全连接的层和卷积层的参数从另一个程序中的.p pickle文件加载回来,除非我们没有调用SGD方法。问题是,我想打印Softmax层的y_out(y_out用于计算我们网络的精度),以获得图像类的预测。但是在尝试之后
#print net.layers[-1].y_out.eval()
#x2 = net.layers[-1].y_out
#y2 = T.cast(x2, 'int32')
#print (pp(net.layers[-1].y_out))
#help(T.argmax)
#print net.layers[-1].y_out.shape.eval()
我仍然得到了#argmax'作为Tensor Variable的值,'当我使用eval()函数获取Tensor Variable的值时,y_out或者是输入错误的错误。
因此需要帮助打印单个测试图像的预测。
这是我们修改后network3.py(重命名为net3.py)的代码:
"""net3.py
~~~~~~~~~~~~~~
A Theano-based program for training and running simple neural
networks.
Supports several layer types (fully connected, convolutional, max
pooling, softmax), and activation functions (sigmoid, tanh, and
rectified linear units, with more easily added).
When run on a CPU, this program is much faster than network.py and
network2.py. However, unlike network.py and network2.py it can also
be run on a GPU, which makes it faster still.
Because the code is based on Theano, the code is different in many
ways from network.py and network2.py. However, where possible I have
tried to maintain consistency with the earlier programs. In
particular, the API is similar to network2.py. Note that I have
focused on making the code simple, easily readable, and easily
modifiable. It is not optimized, and omits many desirable features.
This program incorporates ideas from the Theano documentation on
convolutional neural nets (notably,
http://deeplearning.net/tutorial/lenet.html ), from Misha Denil's
implementation of dropout (https://github.com/mdenil/dropout ), and
from Chris Olah (http://colah.github.io ).
"""
#tts refers to trying to save
#### Libraries
# Standard library
import json
import cPickle
import gzip
import load
# Third-party libraries
import numpy as np
import theano
import theano.tensor as T
from theano.tensor.nnet import conv
from theano.tensor.nnet import softmax
from theano.tensor import shared_randomstreams
from theano.tensor.signal import downsample
from theano import pp
# Activation functions for neurons
def linear(z): return z
def ReLU(z): return T.maximum(0.0, z)
from theano.tensor.nnet import sigmoid
from theano.tensor import tanh
'''
#### Constants
GPU = True
if GPU:
try: theano.config.device = 'gpu'
except: pass # it's already set
print "Trying to run under a GPU. If this is not desired, then modify "+\
"network3.py\nto set the GPU flag to False."
theano.config.floatX = 'float32'
else:
print "Running with a CPU. If this is not desired, then the modify "+\
"network3.py to set\nthe GPU flag to True."
'''
print "DEVICE IS:" ,theano.config.device
#### Load the MNIST data
def load_data_shared(filename="../data/mnist.pkl.gz"):
f = gzip.open(filename, 'rb')
training_data, validation_data, test_data = cPickle.load(f)
f.close()
def shared(data):
"""Place the data into shared variables. This allows Theano to copy
the data to the GPU, if one is available.
"""
shared_x = theano.shared(
np.asarray(data[0], dtype=theano.config.floatX), borrow=True)
shared_y = theano.shared(
np.asarray(data[1], dtype=theano.config.floatX), borrow=True)
return shared_x, T.cast(shared_y, "int32")
return [shared(training_data), shared(validation_data), shared(test_data)]
def load_mydata_shared():
test_data = load.load_data()
def shared(data):
"""Place the data into shared variables. This allows Theano to copy
the data to the GPU, if one is available.
"""
print "data:",data
shared_x = theano.shared(
np.asarray(data[0], dtype=theano.config.floatX))
shared_y = theano.shared(
np.asarray(data[1], dtype=theano.config.floatX))
return shared_x, T.cast(shared_y, "int32")
return [shared(test_data)]
#### Main class used to construct and train networks
class Network(object):
def __init__(self, layers, mini_batch_size):
"""Takes a list of `layers`, describing the network architecture, and
a value for the `mini_batch_size` to be used during training
by stochastic gradient descent.
"""
self.layers = layers
self.mini_batch_size = mini_batch_size
self.params = [param for layer in self.layers for param in layer.params]
self.x = T.matrix("x")
self.y = T.ivector("y")
# self.x1 = T.matrix('x')
# self.y1 = T.ivector('y')
# self.x2 = T.matrix('x')
# self.y2 = T.ivector('y')
init_layer = self.layers[0]
init_layer.set_inpt(self.x, self.x, self.mini_batch_size)
for j in xrange(1, len(self.layers)):
prev_layer, layer = self.layers[j-1], self.layers[j]
layer.set_inpt(
prev_layer.output, prev_layer.output_dropout, self.mini_batch_size)
self.output = self.layers[-1].output
self.output_dropout = self.layers[-1].output_dropout
print "class issss:",pp(T.cast(self.layers[-1].y_out.shape,'int32'))
def SGD(self, epochs, mini_batch_size, eta,
test_data, lmbda=0.0):
"""Train the network using mini-batch stochastic gradient descent."""
test_x, test_y = test_data
print "tex:",test_x
print "tey:",test_y
# compute number of minibatches for training, validation and testing
print "Epochs:"+str(epochs)
print "Mini-batch size:"+str(mini_batch_size)
print "Eta:"+str(eta)
num_test_batches = size(test_data)/mini_batch_size
# define the (regularized) cost function, symbolic gradients, and updates
l2_norm_squared = sum([(layer.w**2).sum() for layer in self.layers])
# define functions to train a mini-batch, and to compute the
# accuracy in validation and test mini-batches.
i = T.lscalar() # mini-batch index
test_mb_accuracy = theano.function(
[i], self.layers[-1].accuracy(self.y),
givens={
self.x:
test_x[i*self.mini_batch_size: (i+1)*self.mini_batch_size],
self.y:
test_y[i*self.mini_batch_size: (i+1)*self.mini_batch_size]
})
self.test_mb_predictions = theano.function(
[i], self.layers[-1].y_out,
givens={
self.x:
test_x[i*self.mini_batch_size: (i+1)*self.mini_batch_size]
})
test_accuracy = np.mean([test_mb_accuracy(j) for j in xrange(num_test_batches)])
print('The corresponding test accuracy is {0:.2%}'.format(test_accuracy))
print("Finished training network.")
print("Best validation accuracy of {0:.2%} obtained at iteration {1}".format(
best_validation_accuracy, best_iteration))
print("Corresponding test accuracy of {0:.2%}".format(test_accuracy))
#### Define layer types
class ConvPoolLayer(object):
"""Used to create a combination of a convolutional and a max-pooling
layer. A more sophisticated implementation would separate the
two, but for our purposes we'll always use them together, and it
simplifies the code, so it makes sense to combine them.
"""
def __init__(self, filter_shape, image_shape, poolsize=(2, 2),
activation_fn=ReLU):
"""`filter_shape` is a tuple of length 4, whose entries are the number
of filters, the number of input feature maps, the filter height, and the
filter width.
`image_shape` is a tuple of length 4, whose entries are the
mini-batch size, the number of input feature maps, the image
height, and the image width.
`poolsize` is a tuple of length 2, whose entries are the y and
x pooling sizes.
"""
self.filter_shape = filter_shape
self.image_shape = image_shape
self.poolsize = poolsize
self.activation_fn=activation_fn
# initialize weights and biases
n_out = (filter_shape[0]*np.prod(filter_shape[2:])/np.prod(poolsize))
self.w = theano.shared(
np.asarray(
np.random.normal(loc=0, scale=np.sqrt(1.0/n_out), size=filter_shape),
dtype=theano.config.floatX),
borrow=True)
self.b = theano.shared(
np.asarray(
np.random.normal(loc=0, scale=1.0, size=(filter_shape[0],)),
dtype=theano.config.floatX),
borrow=True)
self.load()
self.params = [self.w, self.b]
def set_inpt(self, inpt, inpt_dropout, mini_batch_size):
self.inpt = inpt.reshape(self.image_shape)
conv_out = conv.conv2d(
input=self.inpt, filters=self.w, filter_shape=self.filter_shape,
image_shape=self.image_shape)
pooled_out = downsample.max_pool_2d(
input=conv_out, ds=self.poolsize, ignore_border=True)
self.output = self.activation_fn(
pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
self.output_dropout = self.output # no dropout in the convolutional layers
def load(self):
"""Save the neural network to the file ``filename``."""
save_file=open('/home/sweta/BE_PROJECT/tryingtosave/cl.p')
self.w.set_value(cPickle.load(save_file),borrow=True)
self.b.set_value(cPickle.load(save_file),borrow=True)
class FullyConnectedLayer(object):
def __init__(self, n_in, n_out, activation_fn=ReLU, p_dropout=0.5):
self.n_in = n_in
self.n_out = n_out
self.activation_fn = activation_fn
self.p_dropout = p_dropout
# Initialize weights and biases
self.w = theano.shared(
np.asarray(
np.random.normal(
loc=0.0, scale=np.sqrt(1.0/n_out), size=(n_in, n_out)),
dtype=theano.config.floatX),
name='w', borrow=True)
self.b = theano.shared(
np.asarray(np.random.normal(loc=0.0, scale=1.0, size=(n_out,)),
dtype=theano.config.floatX),
name='b', borrow=True)
self.load()
self.params = [self.w, self.b]
def set_inpt(self, inpt, inpt_dropout, mini_batch_size):
self.inpt = inpt.reshape((mini_batch_size, self.n_in))
self.output = self.activation_fn(
(1-self.p_dropout)*T.dot(self.inpt, self.w) + self.b)
self.y_out = T.argmax(self.output, axis=1)
self.inpt_dropout = dropout_layer(
inpt_dropout.reshape((mini_batch_size, self.n_in)), self.p_dropout)
self.output_dropout = self.activation_fn(
T.dot(self.inpt_dropout, self.w) + self.b)
def accuracy(self, y):
"Return the accuracy for the mini-batch."
return T.mean(T.eq(y, self.y_out))
def load(self):
"""Save the neural network to the file ``filename``."""
save_file=open('/home/sweta/BE_PROJECT/tryingtosave/fcl.p')
self.w.set_value(cPickle.load(save_file),borrow=True)
self.b.set_value(cPickle.load(save_file),borrow=True)
class SoftmaxLayer(object):
def __init__(self, n_in, n_out, p_dropout=0.5):
self.n_in = n_in
self.n_out = n_out
self.p_dropout = p_dropout
# Initialize weights and biases
self.w = theano.shared(
np.zeros((n_in, n_out), dtype=theano.config.floatX),
name='w', borrow=True)
self.b = theano.shared(
np.zeros((n_out,), dtype=theano.config.floatX),
name='b', borrow=True)
self.load()
self.params = [self.w, self.b]
def set_inpt(self, inpt, inpt_dropout, mini_batch_size):
self.inpt = inpt.reshape((mini_batch_size, self.n_in))
self.output = softmax((1-self.p_dropout)*T.dot(self.inpt, self.w) + self.b)
self.y_out = T.argmax(self.output, axis=1)
self.inpt_dropout = dropout_layer(
inpt_dropout.reshape((mini_batch_size, self.n_in)), self.p_dropout)
self.output_dropout = softmax(T.dot(self.inpt_dropout, self.w) + self.b)
def cost(self, net):
"Return the log-likelihood cost."
return -T.mean(T.log(self.output_dropout)[T.arange(net.y.shape[0]), net.y])
def accuracy(self, y):
"Return the accuracy for the mini-batch."
print "class is:", self.y_out
return T.mean(T.eq(y, self.y_out))
def load(self):
"""Save the neural network to the file ``filename``."""
save_file=open('/home/sweta/BE_PROJECT/tryingtosave/sml.p')
self.w.set_value(cPickle.load(save_file),borrow=True)
self.b.set_value(cPickle.load(save_file),borrow=True)
#### Miscellanea
def size(data):
"Return the size of the dataset `data`."
return data[0].get_value(borrow=True).shape[0]
def dropout_layer(layer, p_dropout):
srng = shared_randomstreams.RandomStreams(
np.random.RandomState(0).randint(999999))
mask = srng.binomial(n=1, p=1-p_dropout, size=layer.shape)
return layer*T.cast(mask, theano.config.floatX)
主要代码是:
import net3
import load
import theano.tensor as T
from theano import pp
from net3 import Network
from net3 import ConvPoolLayer, FullyConnectedLayer, SoftmaxLayer
test_data = net3.load_mydata_shared()
mini_batch_size = 10
net = Network([ ConvPoolLayer(image_shape=(mini_batch_size, 1, 166, 166),filter_shape=(5, 1, 5,5), poolsize=(2, 2)),
ConvPoolLayer(image_shape=(mini_batch_size, 5, 81, 81),filter_shape=(10, 5, 6,6), poolsize=(2, 2)),
ConvPoolLayer(image_shape=(mini_batch_size, 10, 38, 38),filter_shape=(15, 10, 5, 5 ),poolsize=(2, 2)),
ConvPoolLayer(image_shape=(mini_batch_size, 15, 17, 17),filter_shape=(20, 15, 4, 4 ),poolsize=(2, 2)),
ConvPoolLayer(image_shape=(mini_batch_size, 20, 7, 7),filter_shape=(40, 20, 2, 2 ),poolsize=(2, 2)),
FullyConnectedLayer(n_in=40*3*3, n_out=100),SoftmaxLayer(n_in=100, n_out=3)], mini_batch_size)
#print net.layers[-1].y_out.eval()
#x2 = net.layers[-1].y_out
#y2 = T.cast(x2, 'int32')
#print (pp(net.layers[-1].y_out))
#help(T.argmax)
#print net.layers[-1].y_out.shape.eval()
#net.SGD( 2, mini_batch_size, 0.03 ,test_data)
以.mat格式加载单个测试图像的代码如下:
"""
mnist_loader
~~~~~~~~~~~~
A library to load the MNIST image data. For details of the data
structures that are returned, see the doc strings for ``load_data``
and ``load_data_wrapper``. In practice, ``load_data_wrapper`` is the
function usually called by our neural network code.
"""
#### Libraries
# Standard library
import cPickle
import gzip
# Third-party libraries
import numpy as np
import scipy.io as sio
from random import shuffle
def load_data():
matx = sio.loadmat('exact_one_x.mat')
datax = matx['X']
datax=datax.transpose()
print datax.shape
maty = sio.loadmat('one_y.mat')
datay = maty['M']
datay=datay.transpose()
print datay.shape
test_data = (datax,datay[0])
return ( test_data)
def load_data_wrapper():
"""Return a tuple containing ``(training_data, validation_data,
test_data)``. Based on ``load_data``, but the format is more
convenient for use in our implementation of neural networks.
In particular, ``training_data`` is a list containing 50,000
2-tuples ``(x, y)``. ``x`` is a 784-dimensional numpy.ndarray
containing the input image. ``y`` is a 10-dimensional
numpy.ndarray representing the unit vector corresponding to the
correct digit for ``x``.
``validation_data`` and ``test_data`` are lists containing 10,000
2-tuples ``(x, y)``. In each case, ``x`` is a 784-dimensional
numpy.ndarry containing the input image, and ``y`` is the
corresponding classification, i.e., the digit values (integers)
corresponding to ``x``.
Obviously, this means we're using slightly different formats for
the training data and the validation / test data. These formats
turn out to be the most convenient for use in our neural network
code."""
tr_d, va_d, te_d = load_data()
training_inputs = [np.reshape(x, (27556, 1)) for x in tr_d[0]]
training_results = [vectorized_result(y) for y in tr_d[1]]
training_data = zip(training_inputs, training_results)
shuffle(training_data)
validation_inputs = [np.reshape(x, (27556, 1)) for x in va_d[0]]
validation_data = zip(validation_inputs, va_d[1])
test_inputs = [np.reshape(x, (27556, 1)) for x in te_d[0]]
test_data = zip(test_inputs, te_d[1])
return (training_data, validation_data, test_data)
def vectorized_result(j):
"""Return a 10-dimensional unit vector with a 1.0 in the jth
position and zeroes elsewhere. This is used to convert a digit
(0...9) into a corresponding desired output from the neural
network."""
e = np.zeros((3, 1))
e[j] = 1.0
return e
答案 0 :(得分:0)
您可以使用已在代码中定义的test_mb_predictions函数。您只需要在测试代码中加载批量大小为1(而不是10)的单个图像,然后打印test_mb_predictions返回的类。