我想在mnist_generic上使用NVIDIA数字中的caffe实现autoencoder。 我根据这里解释的方法制作了数据集: https://github.com/NVIDIA/DIGITS/tree/master/examples/autoencoder
但我不想使用Torch或Tensorflow来构建自动编码器。我想和Caffe一起做。
我使用了caffe中提供的mnist_autoencoder并在DIGITS中运行了它。
但我得到的错误如下:
ERROR: cannot specify two val image data layers
当我删除一个测试层(架构中的第三层)时,我收到了这个错误:
ERROR: Check failed: bottom[0]->count() == bottom[1]->count() (78400 vs. 235200) SIGMOID_CROSS_ENTROPY_LOSS layer inputs must have the same count.
这是自动编码器架构:
name: "MNISTAutoencoder"
layer {
name: "data"
type: "Data"
top: "data"
include {
phase: TRAIN
}
transform_param {
scale: 0.0039215684
}
data_param {
batch_size: 100
backend: LMDB
}
}
layer {
name: "data"
type: "Data"
top: "data"
include {
phase: TEST
stage: "test-on-train"
}
transform_param {
scale: 0.0039215684
}
data_param {
batch_size: 100
backend: LMDB
}
}
layer {
name: "data"
type: "Data"
top: "data"
include {
phase: TEST
stage: "test-on-test"
}
transform_param {
scale: 0.0039215684
}
data_param {
batch_size: 100
backend: LMDB
}
}
layer {
name: "flatdata"
type: "Flatten"
bottom: "data"
top: "flatdata"
}
layer {
name: "encode1"
type: "InnerProduct"
bottom: "data"
top: "encode1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 0
}
inner_product_param {
num_output: 1000
weight_filler {
type: "gaussian"
std: 1
sparse: 15
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "encode1neuron"
type: "Sigmoid"
bottom: "encode1"
top: "encode1neuron"
}
layer {
name: "encode2"
type: "InnerProduct"
bottom: "encode1neuron"
top: "encode2"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 0
}
inner_product_param {
num_output: 500
weight_filler {
type: "gaussian"
std: 1
sparse: 15
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "encode2neuron"
type: "Sigmoid"
bottom: "encode2"
top: "encode2neuron"
}
layer {
name: "encode3"
type: "InnerProduct"
bottom: "encode2neuron"
top: "encode3"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 0
}
inner_product_param {
num_output: 250
weight_filler {
type: "gaussian"
std: 1
sparse: 15
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "encode3neuron"
type: "Sigmoid"
bottom: "encode3"
top: "encode3neuron"
}
layer {
name: "encode4"
type: "InnerProduct"
bottom: "encode3neuron"
top: "encode4"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 0
}
inner_product_param {
num_output: 30
weight_filler {
type: "gaussian"
std: 1
sparse: 15
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "decode4"
type: "InnerProduct"
bottom: "encode4"
top: "decode4"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 0
}
inner_product_param {
num_output: 250
weight_filler {
type: "gaussian"
std: 1
sparse: 15
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "decode4neuron"
type: "Sigmoid"
bottom: "decode4"
top: "decode4neuron"
}
layer {
name: "decode3"
type: "InnerProduct"
bottom: "decode4neuron"
top: "decode3"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 0
}
inner_product_param {
num_output: 500
weight_filler {
type: "gaussian"
std: 1
sparse: 15
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "decode3neuron"
type: "Sigmoid"
bottom: "decode3"
top: "decode3neuron"
}
layer {
name: "decode2"
type: "InnerProduct"
bottom: "decode3neuron"
top: "decode2"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 0
}
inner_product_param {
num_output: 1000
weight_filler {
type: "gaussian"
std: 1
sparse: 15
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "decode2neuron"
type: "Sigmoid"
bottom: "decode2"
top: "decode2neuron"
}
layer {
name: "decode1"
type: "InnerProduct"
bottom: "decode2neuron"
top: "decode1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 0
}
inner_product_param {
num_output: 784
weight_filler {
type: "gaussian"
std: 1
sparse: 15
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "loss"
type: "SigmoidCrossEntropyLoss"
bottom: "decode1"
bottom: "flatdata"
top: "cross_entropy_loss"
loss_weight: 1
include {
phase: TRAIN
}
}
layer {
name: "decode1neuron"
type: "Sigmoid"
bottom: "decode1"
top: "decode1neuron"
}
layer {
name: "loss"
type: "EuclideanLoss"
bottom: "decode1neuron"
bottom: "flatdata"
top: "l2_error"
loss_weight: 0
include {
phase: TRAIN
}
}
你知道如何在nvidia-digits中使用caffe训练这个mnist自动编码器吗?