我注意到SegNet中有上采样层,它自己的图像是480 * 360,当我想使用我的图像(565 * 584)时,我遇到以下错误:
I0929 03:58:06.238135 22750 net.cpp:368] upsample4 -> pool4_D
I0929 03:58:06.238142 22750 net.cpp:120] Setting up upsample4
F0929 03:58:06.238164 22750 upsample_layer.cpp:63] Check failed: bottom[0]->height() == bottom[1]->height() (38 vs. 37)
这是定义:
layer {
name: "upsample4"
type: "Upsample"
bottom: "conv5_1_D"
top: "pool4_D"
bottom: "pool4_mask"
upsample_param {
scale: 2
upsample_w: 60
upsample_h: 45
}
}
我想我应该更改upsample_w
和upsample_h
,但我不知道确切的价值。任何人都可以告诉我scale
upsample_w
之间的关系upsample_h
和图片大小或如何计算。
网络的整个定义:segnet_train.prototxt
name: "VGG_ILSVRC_16_layer"
layer {
name: "data"
type: "DenseImageData"
top: "data"
top: "label"
dense_image_data_param {
source: "/home/zhaimo/SegNet/CamVid/mytrain.txt" # Change this to the absolute path to your data file
batch_size: 4 # Change this number to a batch size that will fit on your GPU
shuffle: true
}
}
layer {
bottom: "data"
top: "conv1_1"
name: "conv1_1"
type: "Convolution"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
weight_filler {
type: "msra"
}
bias_filler {
type: "constant"
}
num_output: 64
pad: 1
kernel_size: 3
}
}
layer {
bottom: "conv1_1"
top: "conv1_1"
name: "conv1_1_bn"
type: "BN"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 0
}
bn_param {
scale_filler {
type: "constant"
value: 1
}
shift_filler {
type: "constant"
value: 0.001
}
}
}
layer {
bottom: "conv1_1"
top: "conv1_1"
name: "relu1_1"
type: "ReLU"
}
layer {
bottom: "conv1_1"
top: "conv1_2"
name: "conv1_2"
type: "Convolution"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
weight_filler {
type: "msra"
}
bias_filler {
type: "constant"
}
num_output: 64
pad: 1
kernel_size: 3
}
}
layer {
bottom: "conv1_2"
top: "conv1_2"
name: "conv1_2_bn"
type: "BN"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 0
}
bn_param {
scale_filler {
type: "constant"
value: 1
}
shift_filler {
type: "constant"
value: 0.001
}
}
}
layer {
bottom: "conv1_2"
top: "conv1_2"
name: "relu1_2"
type: "ReLU"
}
layer {
bottom: "conv1_2"
top: "pool1"
top: "pool1_mask"
name: "pool1"
type: "Pooling"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
bottom: "pool1"
top: "conv2_1"
name: "conv2_1"
type: "Convolution"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
weight_filler {
type: "msra"
}
bias_filler {
type: "constant"
}
num_output: 128
pad: 1
kernel_size: 3
}
}
layer {
bottom: "conv2_1"
top: "conv2_1"
name: "conv2_1_bn"
type: "BN"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 0
}
bn_param {
scale_filler {
type: "constant"
value: 1
}
shift_filler {
type: "constant"
value: 0.001
}
}
}
layer {
bottom: "conv2_1"
top: "conv2_1"
name: "relu2_1"
type: "ReLU"
}
layer {
bottom: "conv2_1"
top: "conv2_2"
name: "conv2_2"
type: "Convolution"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
weight_filler {
type: "msra"
}
bias_filler {
type: "constant"
}
num_output: 128
pad: 1
kernel_size: 3
}
}
layer {
bottom: "conv2_2"
top: "conv2_2"
name: "conv2_2_bn"
type: "BN"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 0
}
bn_param {
scale_filler {
type: "constant"
value: 1
}
shift_filler {
type: "constant"
value: 0.001
}
}
}
layer {
bottom: "conv2_2"
top: "conv2_2"
name: "relu2_2"
type: "ReLU"
}
layer {
bottom: "conv2_2"
top: "pool2"
top: "pool2_mask"
name: "pool2"
type: "Pooling"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
bottom: "pool2"
top: "conv3_1"
name: "conv3_1"
type: "Convolution"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
weight_filler {
type: "msra"
}
bias_filler {
type: "constant"
}
num_output: 256
pad: 1
kernel_size: 3
}
}
layer {
bottom: "conv3_1"
top: "conv3_1"
name: "conv3_1_bn"
type: "BN"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 0
}
bn_param {
scale_filler {
type: "constant"
value: 1
}
shift_filler {
type: "constant"
value: 0.001
}
}
}
layer {
bottom: "conv3_1"
top: "conv3_1"
name: "relu3_1"
type: "ReLU"
}
layer {
bottom: "conv3_1"
top: "conv3_2"
name: "conv3_2"
type: "Convolution"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
weight_filler {
type: "msra"
}
bias_filler {
type: "constant"
}
num_output: 256
pad: 1
kernel_size: 3
}
}
layer {
bottom: "conv3_2"
top: "conv3_2"
name: "conv3_2_bn"
type: "BN"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 0
}
bn_param {
scale_filler {
type: "constant"
value: 1
}
shift_filler {
type: "constant"
value: 0.001
}
}
}
layer {
bottom: "conv3_2"
top: "conv3_2"
name: "relu3_2"
type: "ReLU"
}
layer {
bottom: "conv3_2"
top: "conv3_3"
name: "conv3_3"
type: "Convolution"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
weight_filler {
type: "msra"
}
bias_filler {
type: "constant"
}
num_output: 256
pad: 1
kernel_size: 3
}
}
layer {
bottom: "conv3_3"
top: "conv3_3"
name: "conv3_3_bn"
type: "BN"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 0
}
bn_param {
scale_filler {
type: "constant"
value: 1
}
shift_filler {
type: "constant"
value: 0.001
}
}
}
layer {
bottom: "conv3_3"
top: "conv3_3"
name: "relu3_3"
type: "ReLU"
}
layer {
bottom: "conv3_3"
top: "pool3"
top: "pool3_mask"
name: "pool3"
type: "Pooling"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
bottom: "pool3"
top: "conv4_1"
name: "conv4_1"
type: "Convolution"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
weight_filler {
type: "msra"
}
bias_filler {
type: "constant"
}
num_output: 512
pad: 1
kernel_size: 3
}
}
layer {
bottom: "conv4_1"
top: "conv4_1"
name: "conv4_1_bn"
type: "BN"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 0
}
bn_param {
scale_filler {
type: "constant"
value: 1
}
shift_filler {
type: "constant"
value: 0.001
}
}
}
layer {
bottom: "conv4_1"
top: "conv4_1"
name: "relu4_1"
type: "ReLU"
}
layer {
bottom: "conv4_1"
top: "conv4_2"
name: "conv4_2"
type: "Convolution"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
weight_filler {
type: "msra"
}
bias_filler {
type: "constant"
}
num_output: 512
pad: 1
kernel_size: 3
}
}
layer {
bottom: "conv4_2"
top: "conv4_2"
name: "conv4_2_bn"
type: "BN"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 0
}
bn_param {
scale_filler {
type: "constant"
value: 1
}
shift_filler {
type: "constant"
value: 0.001
}
}
}
layer {
bottom: "conv4_2"
top: "conv4_2"
name: "relu4_2"
type: "ReLU"
}
layer {
bottom: "conv4_2"
top: "conv4_3"
name: "conv4_3"
type: "Convolution"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
weight_filler {
type: "msra"
}
bias_filler {
type: "constant"
}
num_output: 512
pad: 1
kernel_size: 3
}
}
layer {
bottom: "conv4_3"
top: "conv4_3"
name: "conv4_3_bn"
type: "BN"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 0
}
bn_param {
scale_filler {
type: "constant"
value: 1
}
shift_filler {
type: "constant"
value: 0.001
}
}
}
layer {
bottom: "conv4_3"
top: "conv4_3"
name: "relu4_3"
type: "ReLU"
}
layer {
bottom: "conv4_3"
top: "pool4"
top: "pool4_mask"
name: "pool4"
type: "Pooling"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
bottom: "pool4"
top: "conv5_1"
name: "conv5_1"
type: "Convolution"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
weight_filler {
type: "msra"
}
bias_filler {
type: "constant"
}
num_output: 512
pad: 1
kernel_size: 3
}
}
layer {
bottom: "conv5_1"
top: "conv5_1"
name: "conv5_1_bn"
type: "BN"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 0
}
bn_param {
scale_filler {
type: "constant"
value: 1
}
shift_filler {
type: "constant"
value: 0.001
}
}
}
layer {
bottom: "conv5_1"
top: "conv5_1"
name: "relu5_1"
type: "ReLU"
}
layer {
bottom: "conv5_1"
top: "conv5_2"
name: "conv5_2"
type: "Convolution"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
weight_filler {
type: "msra"
}
bias_filler {
type: "constant"
}
num_output: 512
pad: 1
kernel_size: 3
}
}
layer {
bottom: "conv5_2"
top: "conv5_2"
name: "conv5_2_bn"
type: "BN"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 0
}
bn_param {
scale_filler {
type: "constant"
value: 1
}
shift_filler {
type: "constant"
value: 0.001
}
}
}
layer {
bottom: "conv5_2"
top: "conv5_2"
name: "relu5_2"
type: "ReLU"
}
layer {
bottom: "conv5_2"
top: "conv5_3"
name: "conv5_3"
type: "Convolution"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
weight_filler {
type: "msra"
}
bias_filler {
type: "constant"
}
num_output: 512
pad: 1
kernel_size: 3
}
}
layer {
bottom: "conv5_3"
top: "conv5_3"
name: "conv5_3_bn"
type: "BN"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 0
}
bn_param {
scale_filler {
type: "constant"
value: 1
}
shift_filler {
type: "constant"
value: 0.001
}
}
}
layer {
bottom: "conv5_3"
top: "conv5_3"
name: "relu5_3"
type: "ReLU"
}
layer {
bottom: "conv5_3"
top: "pool5"
top: "pool5_mask"
name: "pool5"
type: "Pooling"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "upsample5"
type: "Upsample"
bottom: "pool5"
top: "pool5_D"
bottom: "pool5_mask"
upsample_param {
scale: 2
upsample_w: 30
upsample_h: 23
}
}
....(The rest is omitted)
答案 0 :(得分:0)
您应该更改upsample_w
和upsample_h
。
每个Pool layer
都会降低你的图像X2。因此,您应该计算您拥有的图层数量,然后根据图片的大小计算upsample
。