错误“ CUDNN_STATUS_BAD_PARAM”是什么?卑鄙

时间:2019-08-15 23:41:36

标签: matlab directed-acyclic-graphs cudnn

我正在尝试在MATLAB中重新创建TensorFlow Unet模型。我正在使用rand()函数分配权重(因为我将从检查点文件中加载权重)。网络创建得非常好,但是当我尝试对输入图像使用“预测”功能运行推理时,它会返回错误

使用nnet.internal.cnngpu.convolveBackwardData2DCore时出错 意外的错误调用cuDNN:CUDNN_STATUS_BAD_PARAM。

我尝试评论除了relu和卷积层之外的所有内容,但没有用。尺寸全部正常。随机设置权重可能会出错吗?

结果应该是分割的图像输出,或者是具有n个帧(表示层数)的体积

这是我的代码:

% %  general U-Net structure as used in this code:
% %  
% %  A1c A2c                                                        A1e A2e
% %        B1c B2c                                          B1e B2e
% %                 C1c C2c                         C1e C2e
% %                         D1c D2c         D1e D2e
% %                                 E1 E2
% 
inputs = imageInputLayer([192 400 1],'Name','input');

% ================================================================
% Convolutional Layer A1c
conv_A1c = convolution2dLayer(3,64,'Padding','same','Name','conv_A1c');
conv_A1c.Weights = rand(3,3,1,64);
bn_conv_A1c = batchNormalizationLayer('Name','bn_conv_A1c');
relu_A1c = reluLayer('Name','relu_A1c');
dropout_A1c = dropoutLayer(0.5, 'Name','dropout_A1c');

% Convolutional Layer A2c
conv_A2c = convolution2dLayer(3,64,'Padding','same','Name','conv_A2c');
conv_A2c.Weights = rand(3,3,64,64);
bn_conv_A2c = batchNormalizationLayer('Name','bn_conv_A2c');
relu_A2c = reluLayer('Name','relu_A2c');
dropout_A2c = dropoutLayer(0.5, 'Name','dropout_A2c');

% Pooling Layer A1c
pool_A1c = maxPooling2dLayer(2, 'Stride', 2, 'Name', 'pool_A1c', 'Padding', 'same');


% ================================================================
% Convolutional Layer B1c

conv_B1c = convolution2dLayer(3,128,'Padding','same','Name','conv_B1c');
conv_B1c.Weights = rand(3,3,64,128);
bn_conv_B1c = batchNormalizationLayer('Name','bn_conv_B1c');
relu_B1c = reluLayer('Name','relu_B1c');
dropout_B1c = dropoutLayer(0.5,'Name','dropout_B1c');

% Convolutional Layer B2c
conv_B2c = convolution2dLayer(3,128,'Padding','same','Name','conv_B2c');
conv_B2c.Weights = rand(3,3,128,128);
bn_conv_B2c = batchNormalizationLayer('Name','bn_conv_B2c');
relu_B2c = reluLayer('Name','relu_B2c');
dropout_B2c = dropoutLayer(0.5, 'Name','dropout_B2c');

% Pooling Layer B1c
pool_B1c = maxPooling2dLayer(2, 'Stride', 2, 'Name','pool_B1c', 'Padding', 'same');

% ================================================================
% Convolutional Layer C1c

conv_C1c = convolution2dLayer(3,256,'Padding','same','Name','conv_C1c');
conv_C1c.Weights = rand(3,3,128,256);
bn_conv_C1c = batchNormalizationLayer('Name','bn_conv_C1c');
relu_C1c = reluLayer('Name','relu_C1c');
dropout_C1c = dropoutLayer(0.5,'Name','dropout_C1c');

% Convolutional Layer C2c
conv_C2c = convolution2dLayer(3,256,'Padding','same','Name','conv_C2c');
conv_C2c.Weights = rand(3,3,256,256);
bn_conv_C2c = batchNormalizationLayer('Name','bn_conv_C2c');
relu_C2c = reluLayer('Name','relu_C2c');
dropout_C2c = dropoutLayer(0.5,'Name','dropout_C2c');

% Pooling Layer C1c
pool_C1c = maxPooling2dLayer(2, 'Stride', 2,'Name','pool_C1c','Padding','same');

% ================================================================
% Convolutional Layer D1c

conv_D1c = convolution2dLayer(3,512,'Padding','same','Name','conv_D1c');
conv_D1c.Weights = rand(3,3,256,512);
bn_conv_D1c = batchNormalizationLayer('Name','bn_conv_D1c');
relu_D1c = reluLayer('Name','relu_D1c');
dropout_D1c = dropoutLayer(0.5,'Name','dropout_D1c');

% Convolutional Layer D2c
conv_D2c = convolution2dLayer(3,512,'Padding','same','Name','conv_D2c');
conv_D2c.Weights = rand(3,3,512,512);
bn_conv_D2c = batchNormalizationLayer('Name','bn_conv_D2c');
relu_D2c = reluLayer('Name','relu_D2c');
dropout_D2c = dropoutLayer(0.5,'Name','dropout_D2c');

% Pooling Layer D1c
pool_D1c = maxPooling2dLayer(2, 'Stride', 2,'Name','pool_D1c','Padding','same');

% ================================================================
% Convolutional Layer E1c
conv_E1 = convolution2dLayer(3,1024,'Padding','same','Name','conv_E1');
conv_E1.Weights = rand(3,3,512,1024);
bn_conv_E1 = batchNormalizationLayer('Name','bn_conv_E1');
relu_E1 = reluLayer('Name','relu_E1');
dropout_E1 = dropoutLayer(0.5,'Name','dropout_E1');

% Convolutional Layer E2c
conv_E2 = convolution2dLayer(3,1024,'Padding','same','Name','conv_E2');
conv_E2.Weights = rand(3,3,1024,1024);
bn_conv_E2 = batchNormalizationLayer('Name','bn_conv_E2');
relu_E2 = reluLayer('Name','relu_E2');
dropout_E2 = dropoutLayer(0.5,'Name','dropout_E2');

% Transposed Convolutional layer E1
upconv_E1 = transposedConv2dLayer(2, 512,'Name','upconv_E1', 'Stride', 2);

% ======================UP CONVOLUTION PORTION ===================

% ================================================================
% Convolutional Layer D1e
conv_D1e_ip  = concatenationLayer(3,2,'Name','conv_D1e_ip');
conv_D1e = convolution2dLayer(3,512,'Padding','same','Name','conv_D1e');
conv_D1e.Weights = rand(3,3,1024,512);
bn_conv_D1e = batchNormalizationLayer('Name','bn_conv_D1e');
relu_D1e = reluLayer('Name','relu_D1e');
dropout_D1e = dropoutLayer(0.5,'Name','dropout_D1e');

% Convolutional Layer D2e
conv_D2e = convolution2dLayer(3,512,'Padding','same','Name','conv_D2e');
conv_D2e.Weights = rand(3,3,512,512);
bn_conv_D2e = batchNormalizationLayer('Name','bn_conv_D2e');
relu_D2e = reluLayer('Name','relu_D2e');
dropout_D2e = dropoutLayer(0.5,'Name','dropout_D2e');

% Transposed Convolutional layer D1
upconv_D2e = transposedConv2dLayer(2, 256,'Stride', 2, 'Name','upconv_D2e');

% ================================================================
% Convolutional Layer C1e
conv_C1e_ip  = concatenationLayer(3,2,'Name','conv_C1e_ip');
conv_C1e = convolution2dLayer(3,256,'Padding','same','Name','conv_C1e');
conv_C1e.Weights = rand(3,3,512,256);
bn_conv_C1e = batchNormalizationLayer('Name','bn_conv_C1e');
relu_C1e = reluLayer('Name','relu_C1e');
dropout_C1e = dropoutLayer(0.5,'Name','dropout_C1e');

% Convolutional Layer C2e
conv_C2e = convolution2dLayer(3,256,'Padding','same','Name','conv_C2e');
conv_C2e.Weights = rand(3,3,256,256);
bn_conv_C2e = batchNormalizationLayer('Name','bn_conv_C2e');
relu_C2e = reluLayer('Name','relu_C2e');
dropout_C2e = dropoutLayer(0.5,'Name','dropout_C2e');

% Transposed Convolutional layer C1
upconv_C2e = transposedConv2dLayer(2, 128, 'Stride', 2, 'Name','upconv_C2e');

% ================================================================
% Convolutional Layer B1e
conv_B1e_ip  = concatenationLayer(3,2,'Name','conv_B1e_ip');
conv_B1e = convolution2dLayer(3,128,'Padding','same','Name','conv_B1e');
conv_B1e.Weights = rand(3,3,256,128);
bn_conv_B1e = batchNormalizationLayer('Name','bn_conv_B1e');
relu_B1e = reluLayer('Name','relu_B1e');
dropout_B1e = dropoutLayer(0.5,'Name','dropout_B1e');

% Convolutional Layer B2e
conv_B2e = convolution2dLayer(3,128,'Padding','same','Name','conv_B2e');
conv_B2e.Weights = rand(3,3,128,128);
bn_conv_B2e = batchNormalizationLayer('Name','bn_conv_B2e');
relu_B2e = reluLayer('Name','relu_B2e');
dropout_B2e = dropoutLayer(0.5,'Name','dropout_B2e');

% Transposed Convolutional layer B1
upconv_B2e = transposedConv2dLayer(2, 64,'Name','upconv_B2e', 'Stride', 2);

% ================================================================
% Convolutional Layer A1e
conv_A1e_ip  = concatenationLayer(3,2,'Name','conv_A1e_ip');
conv_A1e = convolution2dLayer(3,64,'Padding','same','Name','conv_A1e');
conv_A1e.Weights = rand(3,3,128,64);
bn_conv_A1e = batchNormalizationLayer('Name','bn_conv_A1e');
relu_A1e = reluLayer('Name','relu_A1e');
dropout_A1e = dropoutLayer(0.5,'Name','dropout_A1e');

% Convolutional Layer A2e
conv_A2e = convolution2dLayer(3,64,'Padding','same','Name','conv_A2e');
conv_A2e.Weights = rand(3,3,64,64);
bn_conv_A2e = batchNormalizationLayer('Name','bn_conv_A2e');
relu_A2e = reluLayer('Name','relu_A2e');
dropout_A2e = dropoutLayer(0.5,'Name','dropout_A2e');

% Output
conv_score = convolution2dLayer(1,2,'Padding','same','Name','conv_score');
conv_score.Weights = rand(1,1,64,2);
relu_score = reluLayer('Name','relu_score');
softmax = softmaxLayer('Name', 'softmax');
output = pixelClassificationLayer('Name', 'output');

output.Classes = ["background" "Vessel"];
% SETTING DUMMY WEIGHTS
% conv_B2e.Weights = rand(3,3,1,128);
% conv_C2e.Weights = rand(3,3,1,256);
% conv_D2e.Weights = rand(3,3,1,512);
% conv_E1.Weights = rand(3,3,1,1024);

%IDK what this should be
upconv_B2e.Weights = rand(2,2,64,64);
upconv_C2e.Weights = rand(2,2,128,128);
upconv_D2e.Weights = rand(2,2,256,3);
upconv_E1.Weights = rand(2,2,512,3);

bn_conv_A1c.Offset = rand(1,1,64);
bn_conv_A1c.Scale = rand(1,1,64);
bn_conv_A1c.TrainedVariance = rand(1,1,64);
bn_conv_A1c.TrainedMean = rand(1,1,64);

bn_conv_A1e.Offset = rand(1,1,64);
bn_conv_A1e.Scale = rand(1,1,64);
bn_conv_A1e.TrainedVariance = rand(1,1,64);
bn_conv_A1e.TrainedMean = rand(1,1,64);
% bn_conv_A1e.NumChannels = 1;

bn_conv_A2c.Offset = rand(1,1,64);
bn_conv_A2c.Scale = rand(1,1,64);
bn_conv_A2c.TrainedVariance = rand(1,1,64);
bn_conv_A2c.TrainedMean = rand(1,1,64);

bn_conv_A2e.Offset = rand(1,1,64);
bn_conv_A2e.Scale = rand(1,1,64);
bn_conv_A2e.TrainedVariance = rand(1,1,64);
bn_conv_A2e.TrainedMean = rand(1,1,64);

bn_conv_B1c.Offset = rand(1,1,128);
bn_conv_B1c.Scale = rand(1,1,128);
bn_conv_B1c.TrainedVariance = rand(1,1,128);
bn_conv_B1c.TrainedMean = rand(1,1,128);

bn_conv_B1e.Offset = rand(1,1,128);
bn_conv_B1e.Scale = rand(1,1,128);
bn_conv_B1e.TrainedVariance = rand(1,1,128);
bn_conv_B1e.TrainedMean = rand(1,1,128);

bn_conv_B2c.Offset = rand(1,1,128);
bn_conv_B2c.Scale = rand(1,1,128);
bn_conv_B2c.TrainedVariance = rand(1,1,128);
bn_conv_B2c.TrainedMean = rand(1,1,128);

bn_conv_B2e.Offset = rand(1,1,128);
bn_conv_B2e.Scale = rand(1,1,128);
bn_conv_B2e.TrainedVariance = rand(1,1,128);
bn_conv_B2e.TrainedMean = rand(1,1,128);

bn_conv_C1c.Offset = rand(1,1,256);
bn_conv_C1c.Scale = rand(1,1,256);
bn_conv_C1c.TrainedVariance = rand(1,1,256);
bn_conv_C1c.TrainedMean = rand(1,1,256);

bn_conv_C1e.Offset = rand(1,1,256);
bn_conv_C1e.Scale = rand(1,1,256);
bn_conv_C1e.TrainedVariance = rand(1,1,256);
bn_conv_C1e.TrainedMean = rand(1,1,256);

bn_conv_C2c.Offset = rand(1,1,256);
bn_conv_C2c.Scale = rand(1,1,256);
bn_conv_C2c.TrainedVariance = rand(1,1,256);
bn_conv_C2c.TrainedMean = rand(1,1,256);

bn_conv_C2e.Offset = rand(1,1,256);
bn_conv_C2e.Scale = rand(1,1,256);
bn_conv_C2e.TrainedVariance = rand(1,1,256);
bn_conv_C2e.TrainedMean = rand(1,1,256);

bn_conv_D1c.Offset = rand(1,1,512);
bn_conv_D1c.Scale = rand(1,1,512);
bn_conv_D1c.TrainedVariance = rand(1,1,512);
bn_conv_D1c.TrainedMean = rand(1,1,512);

bn_conv_D1e.Offset = rand(1,1,512);
bn_conv_D1e.Scale = rand(1,1,512);
bn_conv_D1e.TrainedVariance = rand(1,1,512);
bn_conv_D1e.TrainedMean = rand(1,1,512);

bn_conv_D2c.Offset = rand(1,1,512);
bn_conv_D2c.Scale = rand(1,1,512);
bn_conv_D2c.TrainedVariance = rand(1,1,512);
bn_conv_D2c.TrainedMean = rand(1,1,512);

bn_conv_D2e.Offset = rand(1,1,512);
bn_conv_D2e.Scale = rand(1,1,512);
bn_conv_D2e.TrainedVariance = rand(1,1,512);
bn_conv_D2e.TrainedMean = rand(1,1,512);

bn_conv_E1.Offset = rand(1,1,1024);
bn_conv_E1.Scale = rand(1,1,1024);
bn_conv_E1.TrainedVariance = rand(1,1,1024);
bn_conv_E1.TrainedMean = rand(1,1,1024);

bn_conv_E2.Offset = rand(1,1,1024);
bn_conv_E2.Scale = rand(1,1,1024);
bn_conv_E2.TrainedVariance = rand(1,1,1024);
bn_conv_E2.TrainedMean = rand(1,1,1024);


conv_A1c.Bias = rand(1,1,64);
conv_A1e.Bias = rand(1,1,64);
conv_A2c.Bias = rand(1,1,64);
conv_A2e.Bias = rand(1,1,64);

conv_B1c.Bias = rand(1,1,128);
conv_B1e.Bias = rand(1,1,128);
conv_B2c.Bias = rand(1,1,128);
conv_B2e.Bias = rand(1,1,128);

conv_C1c.Bias = rand(1,1,256);
conv_C1e.Bias = rand(1,1,256);
conv_C2c.Bias = rand(1,1,256);
conv_C2e.Bias = rand(1,1,256);

conv_D1c.Bias = rand(1,1,512);
conv_D1e.Bias = rand(1,1,512);
conv_D2c.Bias = rand(1,1,512);
conv_D2e.Bias = rand(1,1,512);

upconv_B2e.Bias = rand(1,1,64);
upconv_C2e.Bias = rand(1,1,128);
upconv_D2e.Bias = rand(1,1,256);
upconv_E1.Bias = rand(1,1,512);


conv_E1.Bias = rand(1,1,1024);
conv_E2.Bias = rand(1,1,1024);

conv_score.Bias = rand(1,1,2);

inputs.AverageImage = rand(192,400,1);

layers = [inputs    
    conv_A1c
    bn_conv_A1c
    relu_A1c
    dropout_A1c    
    conv_A2c
    bn_conv_A2c
    relu_A2c
    dropout_A2c    
    pool_A1c    
    conv_B1c
    bn_conv_B1c
    relu_B1c
    dropout_B1c    
    conv_B2c
    bn_conv_B2c
    relu_B2c
    dropout_B2c    
    pool_B1c    
    conv_C1c
    bn_conv_C1c
    relu_C1c
    dropout_C1c    
    conv_C2c
    bn_conv_C2c
    relu_C2c
    dropout_C2c    
    pool_C1c    
    conv_D1c
    bn_conv_D1c
    relu_D1c
    dropout_D1c
    conv_D2c
    bn_conv_D2c
    relu_D2c
    dropout_D2c
    pool_D1c
    conv_E1
    bn_conv_E1
    relu_E1
    dropout_E1
    conv_E2
    bn_conv_E2
    relu_E2
    dropout_E2
    upconv_E1
    conv_D1e_ip
    conv_D1e
    bn_conv_D1e
    relu_D1e
    dropout_D1e
    conv_D2e
    bn_conv_D2e
    relu_D2e
    dropout_D2e
    upconv_D2e
    conv_C1e_ip
    conv_C1e
    bn_conv_C1e
    relu_C1e
    dropout_C1e    
    conv_C2e
    bn_conv_C2e
    relu_C2e
    dropout_C2e    
    upconv_C2e 
    conv_B1e_ip
    conv_B1e
    bn_conv_B1e
    relu_B1e
    dropout_B1e    
    conv_B2e
    bn_conv_B2e
    relu_B2e
    dropout_B2e    
    upconv_B2e 
    conv_A1e_ip
    conv_A1e
    bn_conv_A1e
    relu_A1e
    dropout_A1e    
    conv_A2e
    bn_conv_A2e
    relu_A2e
    dropout_A2e    
    conv_score
    relu_score
    softmax
    output];

lgraph = layerGraph(layers);

% Connecting Concatenating layers
lgraph = connectLayers(lgraph,'dropout_A2c','conv_A1e_ip/in2');
lgraph = connectLayers(lgraph,'dropout_B2c','conv_B1e_ip/in2');
lgraph = connectLayers(lgraph,'dropout_C2c','conv_C1e_ip/in2');
lgraph = connectLayers(lgraph,'dropout_D2c','conv_D1e_ip/in2');

net = assembleNetwork(lgraph);

0 个答案:

没有答案