运行3D CNN时,theano中的怪异错误

时间:2016-04-06 18:17:58

标签: machine-learning neural-network theano deep-learning

我正在将这个教程http://deeplearning.net/tutorial/lenet.html修改为3D卷积神经网络。

然而,当我跑步时遇到了一个问题,

Traceback (most recent call last):
  File "cnnProgram.py", line 191, in <module>
    minibatch_avg_cost = train_model(minibatch_index)
  File "/Volumes/TONY/anaconda/lib/python2.7/site-packages/theano/compile/function_module.py", line 871, in __call__
    storage_map=getattr(self.fn, 'storage_map', None))
  File "/Volumes/TONY/anaconda/lib/python2.7/site-packages/theano/gof/link.py", line 314, in raise_with_op
    reraise(exc_type, exc_value, exc_trace)
  File "/Volumes/TONY/anaconda/lib/python2.7/site-packages/theano/compile/function_module.py", line 859, in __call__
    outputs = self.fn()
ValueError: total size of new array must be unchanged
Apply node that caused the error: Reshape{4}(InplaceDimShuffle{0,4,1,2,3}.0, TensorConstant{[5000    5..  12    5]})
Toposort index: 102
Inputs types: [TensorType(float64, 5D), TensorType(int64, vector)]
Inputs shapes: [(200, 10, 5, 12, 5), (4,)]
Inputs strides: [(24000, 8, 4800, 400, 80), (8,)]
Inputs values: ['not shown', array([5000,    5,   12,    5])]
Outputs clients: [[CorrMM_gradWeights{valid, (1, 1)}(Reshape{4}.0, Reshape{4}.0), CorrMM{valid, (1, 1)}(Reshape{4}.0, Subtensor{::, ::, ::int64, ::int64}.0)]]

这是我的代码,当我用.dimshuffle更改维度顺序时,似乎会出现问题?这太奇怪了,我完全无法弄清楚原因。

这是我的代码。

from __future__ import print_function

import scipy.io as sio
import numpy as np
import theano.tensor as T
import theano
from theano import shared
from lasagne.layers import InputLayer, DenseLayer

import os
import sys
import timeit

from mlp import LogRegr, HiddenLayer, DropoutLayer
from convnet3d import ConvLayer, NormLayer, PoolLayer, RectLayer
from activations import relu, tanh, sigmoid, softplus

# Get data

dataReadyForCNN_withValid = sio.loadmat("DataReadyForCNN_withValid.mat")

xTrain = dataReadyForCNN_withValid["xTrain"]
xTrain = xTrain.astype("float64")

yTrainCond = dataReadyForCNN_withValid["yTrainCond"]
yTrainCond = yTrainCond.astype("int32")
yTrainWord = dataReadyForCNN_withValid["yTrainWord"]
yTrainWord = yTrainWord.astype("int32")

xValidate = dataReadyForCNN_withValid["xTrain"]
xValidate = xValidate.astype("float64")

yValidateCond = dataReadyForCNN_withValid["yValidateCond"]
yValidateCond = yValidateCond.astype("int32")
yValidateWord = dataReadyForCNN_withValid["yValidateWord"]
yValidateWord = yValidateWord.astype("int32")

xTest = dataReadyForCNN_withValid["xTest"]
xTest = xTest.astype("float64")

yTestCond = dataReadyForCNN_withValid["yTestCond"]
yTestCond = yTestCond.astype("int32")
yTestWord = dataReadyForCNN_withValid["yTestWord"]
yTestWord = yTestWord.astype("int32")


##################################
# Build Model
#################################
# xTrain = np.random.rand(500, 1, 51, 61, 23).astype('float64')

dtensor5 = T.TensorType('float64', (False,)*5)
x = dtensor5('x') # the input data
y = T.ivector()

# allocate symbolic variables for the data
index = T.lscalar()  # index to a [mini]batch

# input = (nImages, nChannel(nFeatureMaps), nDim1, nDim2, nDim3)

# layer1 (500, 5, 47, 56, 22)
# layer2 (500, 5, 10, 12, 5)
# layer3 (500, 3, 9, 11, 4)
# layer4 (500, 3, 5, 6, 2)

kernel_shape = (5,6,2)
fMRI_shape = (51, 61, 23)
n_in_maps = 1 # channel
n_out_maps = 5 # num of feature maps, aka the depth of the neurons
batch_size = 200

# 1st: Convolution Layer
layer1_input = x
layer1 = ConvLayer(layer1_input, 1, 5, (5, 6, 2), fMRI_shape, 
                       batch_size, tanh)

# print layer1.output.eval({x:xTrain[:500]}).shape

# 2nd: Pool layer
poolShape = (5, 5, 5)
layer2 = PoolLayer(layer1.output, poolShape)

# print layer2.output.eval({x:xTrain}).shape

# 3rd: Convolution Layer
layer3 = ConvLayer(layer2.output, 5, 3, (2, 2, 2), (10, 12, 5), 
                       500, tanh)

# print layer3.output.eval({x:xTrain[:500]}).shape

# 4th: Pool layer
layer4 = PoolLayer(layer3.output, (2, 2, 2))

# print layer4.output.eval({x:xTrain[:500]}).shape

# 5th: Dense layer
layer5_input = T.flatten(layer4.output, outdim=2)
layer5 = HiddenLayer(layer5_input, n_in=180, n_out=500, activation=tanh)

# layer5.output.eval({x:xTrain[:500]}).shape

# 6th: Logistic layer
layer6 = LogRegr(layer5.output, 500, 12, tanh)

cost = layer6.negative_log_likelihood(y)

# create a function to compute the mistakes that are made by the model
test_model = theano.function(
    [index],
    layer6.errors(y),
    givens={
        x: shared(xTest)[index * batch_size: (index + 1) * batch_size],
        y: shared(yTestCond[0])[index * batch_size: (index + 1) * batch_size]
    }
)

validate_model = theano.function(
    [index],
    layer6.errors(y),
    givens={
        x: shared(xValidate)[index * batch_size: (index + 1) * batch_size],
        y: shared(yValidateCond[0])[index * batch_size: (index + 1) * batch_size]
    }
)

# create a list of all model parameters to be fit by gradient descent
params = layer5.params + layer3.params + layer1.params + layer6.params

# create a list of gradients for all model parameters
grads = T.grad(cost, params)

# train_model is a function that updates the model parameters by
# SGD Since this model has many parameters, it would be tedious to
# manually create an update rule for each model parameter. We thus
# create the updates list by automatically looping over all
# (params[i], grads[i]) pairs.
learning_rate=0.1

updates = [
    (param_i, param_i - learning_rate * grad_i)
    for param_i, grad_i in zip(params, grads)
]

train_model = theano.function(
    [index],
    cost,
    updates=updates,
    givens={
        x: shared(xTrain)[index * batch_size: (index + 1) * batch_size],
        y: shared(yTrainCond[0])[index * batch_size: (index + 1) * batch_size]
    }
)

###############
# TRAIN MODEL #
###############
import timeit

print('... training')

n_train_batches = 10
n_test_batches = 10
n_validate_batches = 10

n_epochs=200

# early-stopping parameters
patience = 10000  # look as this many examples regardless
patience_increase = 2  # wait this much longer when a new best is
                       # found
improvement_threshold = 0.995  # a relative improvement of this much is
                               # considered significant
validation_frequency = min(n_train_batches, patience // 2)
                              # go through this many
                              # minibatche before checking the network
                              # on the validation set; in this case we
                              # check every epoch

best_validation_loss = np.inf
best_iter = 0
test_score = 0.
start_time = timeit.default_timer()

epoch = 0
done_looping = False

while (epoch < n_epochs) and (not done_looping):
    epoch = epoch + 1
    for minibatch_index in range(n_train_batches):

        minibatch_avg_cost = train_model(minibatch_index)
        # iteration number
        iter = (epoch - 1) * n_train_batches + minibatch_index

        if (iter + 1) % validation_frequency == 0:
            # compute zero-one loss on validation set
            validation_losses = [validate_model(i) for i
                                 in range(n_valid_batches)]
            this_validation_loss = numpy.mean(validation_losses)

            print(
                'epoch %i, minibatch %i/%i, validation error %f %%' %
                (
                    epoch,
                    minibatch_index + 1,
                    n_train_batches,
                    this_validation_loss * 100.
                )
            )

            # if we got the best validation score until now
            if this_validation_loss < best_validation_loss:
                #improve patience if loss improvement is good enough
                if (
                    this_validation_loss < best_validation_loss *
                    improvement_threshold
                ):
                    patience = max(patience, iter * patience_increase)

                best_validation_loss = this_validation_loss
                best_iter = iter

                # test it on the test set
                test_losses = [test_model(i) for i
                               in range(n_test_batches)]
                test_score = numpy.mean(test_losses)

                print(('     epoch %i, minibatch %i/%i, test error of '
                       'best model %f %%') %
                      (epoch, minibatch_index + 1, n_train_batches,
                       test_score * 100.))

        if patience <= iter:
            done_looping = True
            break


end_time = timeit.default_timer()
print(('Optimization complete. Best validation score of %f %% '
       'obtained at iteration %i, with test performance %f %%') %
      (best_validation_loss * 100., best_iter + 1, test_score * 100.))

1 个答案:

答案 0 :(得分:1)

因为我没有您的数据。所以我更改了代码的获取数据部分,然后对其进行了测试。我没有得到任何错误。

确保您的输入数据格式正确(在您的情况下为(50,1,51,61,23)形状)。