我正在尝试使用DeepLearning教程中讲授的CNN代码(Convolutional_mlp.py)对大小为166x166(Matlab裁剪)的图像进行分类。输入是.mat文件,其中X_2k.mat用于输入图像像素值,y_2k.mat用于这些图像的输出标签。目前,我们提供2115张图像及其标签作为训练数据,测试数据和验证数据。我已经将从.mat文件中获得的numpy nd数组转换为相应数据类型的相应Tensor共享变量,即从uint8矩阵转换为X的float64矩阵,并转换为Y的int32向量。(另请注释此部分在代码),为了解决错误"在X"的情况下,不能将张量类型(uint8,矩阵)转换为张量类型(浮点数64,矩阵),这在给出.mat输入时发生。图像有3个类,0,1和2.但需要帮助解决错误:" ValueError:没有足够的尺寸在Elemwise上{neg,no_inplace} .0来减少轴5000"
import os
import sys
import timeit
import random
import numpy
import cPickle
import gzip
import scipy.io as sio
from random import shuffle
import theano
from theano.tensor import *
import theano.tensor as T
from theano.tensor.signal import downsample
from theano.tensor.nnet import conv
from logistic_sgd import LogisticRegression, load_data
from mlp import HiddenLayer
class LeNetConvPoolLayer(object):
"""Pool Layer of a convolutional network """
def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2)):
"""
Allocate a LeNetConvPoolLayer with shared variable internal parameters.
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dtensor4
:param input: symbolic image tensor, of shape image_shape
:type filter_shape: tuple or list of length 4
:param filter_shape: (number of filters, num input feature maps,
filter height, filter width) #FILTER IS LOCAL RECEPTIVE FIELD
:type image_shape: tuple or list of length 4
:param image_shape: (batch size, num input feature maps,
image height, image width)
:type poolsize: tuple or list of length 2
:param poolsize: the downsampling (pooling) factor (#rows, #cols)
"""
assert image_shape[1] == filter_shape[1]
self.input = input
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = numpy.prod(filter_shape[1:])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) /
numpy.prod(poolsize))
# initialize weights with random weights
W_bound = numpy.sqrt(6. / (fan_in + fan_out))
self.W = theano.shared(
numpy.asarray(
rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
dtype=theano.config.floatX
),
borrow=True
)
# the bias is a 1D tensor -- one bias per output feature map
b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX)
self.b = theano.shared(value=b_values, borrow=True)
# convolve input feature maps with filters
conv_out = conv.conv2d(
input=input,
filters=self.W,
filter_shape=filter_shape,
image_shape=image_shape
)
# downsample each feature map individually, using maxpooling
pooled_out = downsample.max_pool_2d(
input=conv_out,
ds=poolsize,
ignore_border=True
)
# add the bias term. Since the bias is a vector (1D array), we first
# reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
# thus be broadcasted across mini-batches and feature map
# width & height
self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
# store parameters of this layer
self.params = [self.W, self.b]
# keep track of model input
self.input = input
def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
dataset='mnist.pkl.gz',
nkerns=[20, 50], batch_size=500):
""" Demonstrates lenet on MNIST dataset
:type learning_rate: float
:param learning_rate: learning rate used (factor for the stochastic
gradient)
:type n_epochs: int
:param n_epochs: maximal number of epochs to run the optimizer AKA ITERATIONS
:type dataset: string
:param dataset: path to the dataset used for training /testing (MNIST here)
:type nkerns: list of ints
:param nkerns: number of kernels on each layer
"""
matx = sio.loadmat('X_2k.mat')
datax = matx['X']
datax=datax.transpose()
maty = sio.loadmat('y_2k.mat')
datay = maty['M'] #M was used while creating the y MAT file
training_data = (datax , datay)
validation_data = training_data
test_data = training_data
train_set_x = training_data[0]
train_set_y = training_data[1]
valid_set_x = training_data[0]
valid_set_y = training_data[1]
test_set_x = training_data[0]
test_set_y = training_data[1]
rng = numpy.random.RandomState(23455)
print ' type of train_set_x '
print train_set_x.dtype
train_set_x=numpy.ndarray.astype(train_set_x,dtype='float64') #converted from uint8 to float64 matrix
valid_set_x=numpy.ndarray.astype(valid_set_x,dtype='float64')
test_set_x=numpy.ndarray.astype(test_set_x,dtype='float64')
train_set_y=numpy.ndarray.astype(train_set_y,dtype='int32') #converted from uint8 to int32 matrix
valid_set_y=numpy.ndarray.astype(valid_set_y,dtype='int32')
test_set_y=numpy.ndarray.astype(test_set_y,dtype='int32')
train_set_y=train_set_y.reshape((-1,)) #converted from int32 matrix to int32 vector
valid_set_y=valid_set_y.reshape((-1,))
test_set_y=test_set_y.reshape((-1,))
print train_set_x.dtype
print 'printing for y'
print train_set_y.dtype
train_set_xx=theano.shared(train_set_x)
train_set_yy=theano.shared(train_set_y)
valid_set_xx=theano.shared(valid_set_x)
valid_set_yy=theano.shared(valid_set_y)
test_set_xx=theano.shared(test_set_x)
test_set_yy=theano.shared(test_set_y)
'''datasets = load_data(dataset)
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]'''
print training_data
print 'datax: '
print datax
print 'datay: '
print datay
print ' '
print 'type of train_set_xx is :'
print train_set_xx.type
print 'type of train_set_yy is :'
print train_set_yy.type
print ' '
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_xx.get_value(borrow=True).shape[0]
n_valid_batches = valid_set_xx.get_value(borrow=True).shape[0]
n_test_batches = test_set_xx.get_value(borrow=True).shape[0]
n_train_batches /= batch_size
n_valid_batches /= batch_size
n_test_batches /= batch_size
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
# start-snippet-1
x = T.matrix('x') # the data is presented as rasterized images
y = T.ivector('y') # the labels are presented as 1D vector of
# [int] labels
print 'type of x is:'
print x.type
print 'type of y is :'
print y.type
######################
# BUILD ACTUAL MODEL #
######################
print '... building the model'
# Reshape matrix of rasterized images of shape (batch_size, 28 * 28)
# to a 4D tensor, compatible with our LeNetConvPoolLayer
# (28, 28) is the size of MNIST images.
layer0_input = x.reshape((batch_size, 1, 166, 166))
print ' LAYER 0 INPUT IS:'
print layer0_input
# Construct the first convolutional pooling layer:
# filtering reduces the image size to (28-5+1 , 28-5+1) = (24, 24)
# maxpooling reduces this further to (24/2, 24/2) = (12, 12)
# 4D output tensor is thus of shape (batch_size, nkerns[0], 12, 12)
layer0 = LeNetConvPoolLayer(
rng,
input=layer0_input,
image_shape=(batch_size, 1, 166, 166),
filter_shape=(nkerns[0], 1, 11, 11),
poolsize=(2, 2)
)
# Construct the second convolutional pooling layer
# filtering reduces the image size to (12-5+1, 12-5+1) = (8, 8)
# maxpooling reduces this further to (8/2, 8/2) = (4, 4)
# 4D output tensor is thus of shape (batch_size, nkerns[1], 4, 4)
layer1 = LeNetConvPoolLayer(
rng,
input=layer0.output,
image_shape=(batch_size, nkerns[0], 78, 78),
filter_shape=(nkerns[1], nkerns[0], 11, 11),
poolsize=(2, 2)
)
# the HiddenLayer being fully-connected, it operates on 2D matrices of
# shape (batch_size, num_pixels) (i.e matrix of rasterized images).
# This will generate a matrix of shape (batch_size, nkerns[1] * 4 * 4),
# or (500, 50 * 4 * 4) = (500, 800) with the default values.
layer2_input = layer1.output.flatten(2)
# construct a fully-connected sigmoidal layer
layer2 = HiddenLayer(
rng,
input=layer2_input,
n_in=nkerns[1] * 34 * 34,
n_out=500,
activation=T.tanh
)
# classify the values of the fully-connected sigmoidal layer
layer3 = LogisticRegression(input=layer2.output, n_in=500, n_out=3)
# the cost we minimize during training is the NLL of the model
cost = layer3.negative_log_likelihood(y)
# create a function to compute the mistakes that are made by the model
test_model = theano.function(
[index],
layer3.errors(y),
givens={
x: test_set_xx[index * batch_size: (index + 1) * batch_size],
y: test_set_yy[index * batch_size: (index + 1) * batch_size]
}
)
#type of x is float64,matrix
validate_model = theano.function(
[index],
layer3.errors(y),
givens={
x: valid_set_xx[index * batch_size: (index + 1) * batch_size],
y: valid_set_yy[index * batch_size: (index + 1) * batch_size]
}
)
# create a list of all model parameters to be fit by gradient descent
params = layer3.params + layer2.params + layer1.params + layer0.params
# create a list of gradients for all model parameters
grads = T.grad(cost, params)
# train_model is a function that updates the model parameters by
# SGD Since this model has many parameters, it would be tedious to
# manually create an update rule for each model parameter. We thus
# create the updates list by automatically looping over all
# (params[i], grads[i]) pairs.
updates = [
(param_i, param_i - learning_rate * grad_i)
for param_i, grad_i in zip(params, grads)
]
train_model = theano.function(
[index],
cost,
updates=updates,
givens={
x: train_set_xx[index * batch_size: (index + 1) * batch_size],
y: train_set_yy[index * batch_size: (index + 1) * batch_size]
}
)
# end-snippet-1
###############
# TRAIN MODEL #
###############
print '... training'
# early-stopping parameters
patience = 10000 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
validation_frequency = min(n_train_batches, patience / 2) #Error on this Line !!!!
# go through this many
# minibatche before checking the network
# on the validation set; in this case we
# check every epoch
best_validation_loss = numpy.inf
best_iter = 0
test_score = 0.
start_time = timeit.default_timer()
epoch = 0
done_looping = False
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in xrange(n_train_batches):
iter = (epoch - 1) * n_train_batches + minibatch_index
if iter % 100 == 0:
print 'training @ iter = ', iter
cost_ij = train_model(minibatch_index)
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i) for i
in xrange(n_valid_batches)]
this_validation_loss = numpy.mean(validation_losses)
print('epoch %i, minibatch %i/%i, validation error %f %%' %
(epoch, minibatch_index + 1, n_train_batches,
this_validation_loss * 100.))
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
if this_validation_loss < best_validation_loss * \
improvement_threshold:
patience = max(patience, iter * patience_increase)
# save best validation score and iteration number
best_validation_loss = this_validation_loss
best_iter = iter
# test it on the test set
test_losses = [
test_model(i)
for i in xrange(n_test_batches)
]
test_score = numpy.mean(test_losses)
print((' epoch %i, minibatch %i/%i, test error of '
'best model %f %%') %
(epoch, minibatch_index + 1, n_train_batches,
test_score * 100.))
if patience <= iter:
done_looping = True
break
end_time = timeit.default_timer()
print('Optimization complete.')
print('Best validation score of %f %% obtained at iteration %i, '
'with test performance %f %%' %
(best_validation_loss * 100., best_iter + 1, test_score * 100.))
print >> sys.stderr, ('The code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.))
if __name__ == '__main__':
evaluate_lenet5()
def experiment(state, channel):
evaluate_lenet5(state.learning_rate, dataset=state.dataset)
输出:
sweta@sweta-HP-Pavilion-15-Notebook-PC:~//Deep_nn$ python convolutional_mlp.py optimizer=fast_compile exception_verbosity=high
type of train_set_x
uint8
float64
printing for y
int32
(array([[164, 164, 164, ..., 164, 164, 0],
[164, 164, 164, ..., 156, 164, 0],
[164, 164, 164, ..., 164, 164, 0],
...,
[164, 92, 82, ..., 247, 247, 0],
[156, 156, 82, ..., 247, 247, 0],
[164, 156, 82, ..., 247, 247, 0]], dtype=uint8), array([[1],
[1],
[1],
...,
[0],
[0],
[0]], dtype=uint8))
datax:
[[164 164 164 ..., 164 164 0]
[164 164 164 ..., 156 164 0]
[164 164 164 ..., 164 164 0]
...,
[164 92 82 ..., 247 247 0]
[156 156 82 ..., 247 247 0]
[164 156 82 ..., 247 247 0]]
datay:
[[1]
[1]
[1]
...,
[0]
[0]
[0]]
type of train_set_xx is :
TensorType(float64, matrix)
type of train_set_yy is :
TensorType(int32, vector)
type of x is:
TensorType(float64, matrix)
type of y is :
TensorType(int32, vector)
... building the model
LAYER 0 INPUT IS:
Reshape{4}.0
... training
Traceback (most recent call last):
File "convolutional_mlp.py", line 410, in <module>
evaluate_lenet5()
File "convolutional_mlp.py", line 339, in evaluate_lenet5
validation_frequency = min(n_train_batches, patience / 2)
File "/usr/local/lib/python2.7/dist-packages/theano/tensor/basic.py", line 1582, in min
return -max(-x, axis=axis, keepdims=keepdims)
File "/usr/local/lib/python2.7/dist-packages/theano/tensor/basic.py", line 1537, in max
out = CAReduce(scal.maximum, axis)(x)
File "/usr/local/lib/python2.7/dist-packages/theano/gof/op.py", line 507, in __call__
node = self.make_node(*inputs, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/theano/tensor/elemwise.py", line 1314, in make_node
% (input, axis)))
ValueError: Not enough dimensions on Elemwise{neg,no_inplace}.0 to reduce on axis 5000
答案 0 :(得分:1)
有一个名为'min'的内置函数被覆盖,因为你已经从张量中导入了所有内容。在第339行,尝试使用__ builtin__ .min作为函数调用。