我一直在使用AlexNet进行逐像素回归任务(深度估算)。现在我想用VGG网替换AlexNet,因为它应该更好。
这是我使用的AlexNet:
layer {
name: "train-data"
type: "Data"
top: "data"
include {
phase: TRAIN
}
data_param {
source: "../data/.."
batch_size: 4
backend: LMDB
}
transform_param {
mean_value: 127
}
}
layer {
name: "train-depth"
type: "Data"
top: "gt"
include {
phase: TRAIN
}
transform_param {
# feature scaling coefficient: this maps [0, 255] to [0, 1]
scale: 0.00390625
}
data_param {
source: "../data/.."
batch_size: 4
backend: LMDB
}
}
layer {
name: "val-data"
type: "Data"
top: "data"
include {
phase: TEST
}
data_param {
source: "../data/.."
batch_size: 4
backend: LMDB
}
transform_param {
mean_value: 127
}
}
layer {
name: "val-depth"
type: "Data"
top: "gt"
include {
phase: TEST
}
transform_param {
# feature scaling coefficient: this maps [0, 255] to [0, 1]
scale: 0.00390625
}
data_param {
source: "../data/.."
batch_size: 4
backend: LMDB
}
}
# CONVOLUTIONAL
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
param {
lr_mult: 0.02
decay_mult: 1
}
param {
lr_mult: 0.02
decay_mult: 0
}
convolution_param {
num_output: 96
kernel_size: 11
stride: 4
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "conv1"
top: "conv1"
}
layer {
name: "norm1"
type: "LRN"
bottom: "conv1"
top: "norm1"
lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
}
}
layer {
name: "pool1"
type: "Pooling"
bottom: "norm1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "conv2"
type: "Convolution"
bottom: "pool1"
top: "conv2"
param {
lr_mult: 0.02
decay_mult: 1
}
param {
lr_mult: 0.02
decay_mult: 0
}
convolution_param {
engine: CAFFE
num_output: 256
pad: 2
kernel_size: 5
group: 2
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu2"
type: "ReLU"
bottom: "conv2"
top: "conv2"
}
layer {
name: "norm2"
type: "LRN"
bottom: "conv2"
top: "norm2"
lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
}
}
layer {
name: "conv3"
type: "Convolution"
bottom: "norm2"
top: "conv3"
param {
lr_mult: 0.02
decay_mult: 1
}
param {
lr_mult: 0.02
decay_mult: 0
}
convolution_param {
num_output: 384
pad: 1
kernel_size: 3
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "relu3"
type: "ReLU"
bottom: "conv3"
top: "conv3"
}
layer {
name: "conv4"
type: "Convolution"
bottom: "conv3"
top: "conv4"
param {
lr_mult: 0.02
decay_mult: 1
}
param {
lr_mult: 0.02
decay_mult: 0
}
convolution_param {
num_output: 384
pad: 1
kernel_size: 3
group: 2
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu4"
type: "ReLU"
bottom: "conv4"
top: "conv4"
}
layer {
name: "conv5"
type: "Convolution"
bottom: "conv4"
top: "conv5"
param {
lr_mult: 0.02
decay_mult: 1
}
param {
lr_mult: 0.02
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
group: 2
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu5"
type: "ReLU"
bottom: "conv5"
top: "conv5"
}
layer {
name: "pool5"
type: "Pooling"
bottom: "conv5"
top: "pool5"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
# MAIN
layer {
name: "fc-main"
type: "InnerProduct"
bottom: "pool5"
top: "fc-main"
param {
decay_mult: 1
}
param {
decay_mult: 0
}
inner_product_param {
num_output: 1024
weight_filler {
type: "xavier"
std: 0.005
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "relu6"
type: "ReLU"
bottom: "fc-main"
top: "fc-main"
relu_param {
engine: CAFFE
}
}
layer {
name: "drop6"
type: "Dropout"
bottom: "fc-main"
top: "fc-main"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
name: "fc-depth"
type: "InnerProduct"
bottom: "fc-main"
top: "fc-depth"
param {
decay_mult: 1
lr_mult: 0.2
}
param {
lr_mult: 0.2
decay_mult: 0
}
inner_product_param {
num_output: 1369
weight_filler {
type: "gaussian"
std: 0.001
}
bias_filler {
type: "constant"
value: 0.5
}
}
}
layer {
name: "reshape"
type: "Reshape"
bottom: "fc-depth"
top: "depth"
reshape_param {
shape {
dim: 0 # copy the dimension from below
dim: 1
dim: 37
dim: 37 # infer it from the other dimensions
}
}
}
layer {
name: "loss"
type: "EuclideanLoss"
bottom: "depth"
bottom: "gt"
top: "loss"
loss_weight: 1
}
这是我正在使用的VGG:
layer {
bottom: "data"
top: "conv1_1"
name: "conv1_1"
type: "Convolution"
param {
lr_mult: 0.001
decay_mult: 1
}
param {
lr_mult: 0.001
decay_mult: 0
}
convolution_param {
num_output: 64
pad: 1
kernel_size: 3
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
bottom: "conv1_1"
top: "conv1_1"
name: "relu1_1"
type: "ReLU"
}
layer {
bottom: "conv1_1"
top: "conv1_2"
name: "conv1_2"
type: "Convolution"
param {
lr_mult: 0.001
decay_mult: 1
}
param {
lr_mult: 0.001
decay_mult: 0
}
convolution_param {
num_output: 64
pad: 1
kernel_size: 3
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
bottom: "conv1_2"
top: "conv1_2"
name: "relu1_2"
type: "ReLU"
}
layer {
bottom: "conv1_2"
top: "pool1"
name: "pool1"
type: "Pooling"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
bottom: "pool1"
top: "conv2_1"
name: "conv2_1"
type: "Convolution"
param {
lr_mult: 0.001
decay_mult: 1
}
param {
lr_mult: 0.001
decay_mult: 0
}
convolution_param {
num_output: 128
pad: 1
kernel_size: 3
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
bottom: "conv2_1"
top: "conv2_1"
name: "relu2_1"
type: "ReLU"
}
layer {
bottom: "conv2_1"
top: "conv2_2"
name: "conv2_2"
type: "Convolution"
param {
lr_mult: 0.001
decay_mult: 1
}
param {
lr_mult: 0.001
decay_mult: 0
}
convolution_param {
num_output: 128
pad: 1
kernel_size: 3
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
bottom: "conv2_2"
top: "conv2_2"
name: "relu2_2"
type: "ReLU"
}
layer {
bottom: "conv2_2"
top: "pool2"
name: "pool2"
type: "Pooling"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
bottom: "pool2"
top: "conv3_1"
name: "conv3_1"
type: "Convolution"
param {
lr_mult: 0.001
decay_mult: 1
}
param {
lr_mult: 0.001
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
bottom: "conv3_1"
top: "conv3_1"
name: "relu3_1"
type: "ReLU"
}
layer {
bottom: "conv3_1"
top: "conv3_2"
name: "conv3_2"
type: "Convolution"
param {
lr_mult: 0.001
decay_mult: 1
}
param {
lr_mult: 0.001
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
bottom: "conv3_2"
top: "conv3_2"
name: "relu3_2"
type: "ReLU"
}
layer {
bottom: "conv3_2"
top: "conv3_3"
name: "conv3_3"
type: "Convolution"
param {
lr_mult: 0.001
decay_mult: 1
}
param {
lr_mult: 0.001
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
bottom: "conv3_3"
top: "conv3_3"
name: "relu3_3"
type: "ReLU"
}
layer {
bottom: "conv3_3"
top: "pool3"
name: "pool3"
type: "Pooling"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
bottom: "pool3"
top: "conv4_1"
name: "conv4_1"
type: "Convolution"
param {
lr_mult: 0.001
decay_mult: 1
}
param {
lr_mult: 0.001
decay_mult: 0
}
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
bottom: "conv4_1"
top: "conv4_1"
name: "relu4_1"
type: "ReLU"
}
layer {
bottom: "conv4_1"
top: "conv4_2"
name: "conv4_2"
type: "Convolution"
param {
lr_mult: 0.001
decay_mult: 1
}
param {
lr_mult: 0.001
decay_mult: 0
}
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
bottom: "conv4_2"
top: "conv4_2"
name: "relu4_2"
type: "ReLU"
}
layer {
bottom: "conv4_2"
top: "conv4_3"
name: "conv4_3"
type: "Convolution"
param {
lr_mult: 0.001
decay_mult: 1
}
param {
lr_mult: 0.001
decay_mult: 0
}
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
bottom: "conv4_3"
top: "conv4_3"
name: "relu4_3"
type: "ReLU"
}
layer {
bottom: "conv4_3"
top: "pool4"
name: "pool4"
type: "Pooling"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
bottom: "pool4"
top: "conv5_1"
name: "conv5_1"
type: "Convolution"
param {
lr_mult: 0.001
decay_mult: 1
}
param {
lr_mult: 0.001
decay_mult: 0
}
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
bottom: "conv5_1"
top: "conv5_1"
name: "relu5_1"
type: "ReLU"
}
layer {
bottom: "conv5_1"
top: "conv5_2"
name: "conv5_2"
type: "Convolution"
param {
lr_mult: 0.001
decay_mult: 1
}
param {
lr_mult: 0.001
decay_mult: 0
}
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
bottom: "conv5_2"
top: "conv5_2"
name: "relu5_2"
type: "ReLU"
}
layer {
bottom: "conv5_2"
top: "conv5_3"
name: "conv5_3"
type: "Convolution"
param {
lr_mult: 0.001
decay_mult: 1
}
param {
lr_mult: 0.001
decay_mult: 0
}
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
bottom: "conv5_3"
top: "conv5_3"
name: "relu5_3"
type: "ReLU"
}
layer {
bottom: "conv5_3"
top: "pool5"
name: "pool5"
type: "Pooling"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
bottom: "pool5"
top: "fc6"
name: "fc6"
type: "InnerProduct"
param {
lr_mult: 0.1
decay_mult: 1
}
param {
lr_mult: 0.1
decay_mult: 0
}
inner_product_param {
num_output: 4096
weight_filler {
type: "gaussian"
std: 0.001
}
bias_filler {
type: "constant"
value: 0.5
}
}
}
layer {
bottom: "fc6"
top: "fc6"
name: "relu6"
type: "ReLU"
relu_param {
engine: CAFFE
}
}
layer {
name: "drop6"
type: "Dropout"
bottom: "fc6"
top: "fc6"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
bottom: "fc6"
top: "fc7"
name: "fc7"
type: "InnerProduct"
param {
lr_mult: 0.1
decay_mult: 1
}
param {
lr_mult: 0.1
decay_mult: 0
}
inner_product_param {
num_output: 1369
weight_filler {
type: "gaussian"
std: 0.001
}
bias_filler {
type: "constant"
value: 0.5
}
}
}
layer {
name: "reshape"
type: "Reshape"
bottom: "fc7"
top: "depth"
reshape_param {
shape {
dim: 0 # copy the dimension from below
dim: 1
dim: 37
dim: 37 # infer it from the other dimensions
}
}
}
layer {
name: "loss"
type: "EuclideanLoss"
bottom: "depth"
bottom: "gt"
top: "loss"
loss_weight: 1
}
learning_rate是:0.0005
在训练AlexNet时,损失会收敛到大约5,当使用VGG时,网络根本不会收敛。它始终保持在30,即使我一直在减少learning_rate,甚至减少了mult_lr。有没有人有任何想法还有什么可能是错的?我100%确定只有.prototxt文件不同,其他一切都完全相同。
答案 0 :(得分:0)
众所周知,VGG很难从头开始训练大型网络:在Simonyan和Zisserman的论文中,它在3.1节中说过。在实践中,他们首先训练具有随机权重的“小”(A)配置,然后使用这些权重来初始化较大的网络(C-D-E)。此外,您可能需要更多数据来训练VGG而不是AlexNet。
在您的情况下,您可以考虑微调VGG16,而不是从头开始学习。或者让我们使用更轻松(更容易训练)的googlenet,并在我测试的几个问题上获得类似的性能。