Caffe的微调训练均值失败。Binaryproto:Movidius

时间:2018-09-06 15:27:47

标签: deep-learning caffe intel

从具有两个不同类的自己的数据集正确创建了train.txt和val.txt之后,我在里面创建了带有data.mdb(67.7Mb),lock.mdb(8.2kB)和mean.binaryproto(786.4kB)的train_leveldb ,以及带有data.mdb(16.9Mb),lock.mdb(8.2kB)和mean.binaryproto(786.4kB)的val_leveldb。

在那之后,我开始如下训练网络:

>/opt/movidius/caffe/build/tools/caffe train --solver=/opt/movidius/caffe/models/bvlc_reference_caffenet/solver_isia.prototxt --weights /opt/movidius/caffe/models/bvlc_reference_caffenet/bvlc.caffemodel 2>&1 | tee /opt/movidius/caffe/models/blvc_reference_caffenet/train.log

MDB数据库文件(train和val)存在并且可以访问,也都意味着.binaryproto。有解决的办法吗?任何评论都将受到欢迎。

谢谢。

LOGFILE:

I0906 16:56:47.615576 10762 caffe.cpp:210] Use CPU.
I0906 16:56:47.615811 10762 solver.cpp:63] Initializing solver from parameters: 
test_iter: 1000
test_interval: 1000
base_lr: 0.01
display: 20
max_iter: 40000
lr_policy: "step"
gamma: 0.1
momentum: 0.9
weight_decay: 0.0005
stepsize: 2500
snapshot: 5000
snapshot_prefix: "/opt/movidius/caffe/models/bvlc_reference_caffenet/caffenet_isia"
solver_mode: CPU
net: "/opt/movidius/caffe/models/bvlc_reference_caffenet/train_isia.prototxt"
train_state {
  level: 0
  stage: ""
}
I0906 16:56:47.615988 10762 solver.cpp:106] Creating training net from net file: /opt/movidius/caffe/models/bvlc_reference_caffenet/train_isia.prototxt
I0906 16:56:47.616300 10762 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer data
I0906 16:56:47.616331 10762 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer accuracy
I0906 16:56:47.616339 10762 net.cpp:58] Initializing net from parameters: 
name: "CaffeNet"
state {
  phase: TRAIN
  level: 0
  stage: ""
}
layer {
  name: "data"
  type: "Data"
  top: "data"
  top: "label"
  include {
    phase: TRAIN
  }
  transform_param {
    mirror: true
    crop_size: 227
    mean_file: "/home/spalomar/workspace/ISIA/lmdb/Imagenet/train_leveldb/mean.binaryproto"
  }
  data_param {
    source: "/home/spalomar/workspace/ISIA/lmdb/Imagenet/train_leveldb"
    batch_size: 256
    backend: LMDB
  }
}
layer {
  name: "conv1"
  type: "Convolution"
  bottom: "data"
  top: "conv1"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 96
    kernel_size: 11
    stride: 4
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layer {
  name: "relu1"
  type: "ReLU"
  bottom: "conv1"
  top: "conv1"
}
layer {
  name: "pool1"
  type: "Pooling"
  bottom: "conv1"
  top: "pool1"
  pooling_param {
    pool: MAX
    kernel_size: 3
    stride: 2
  }
}
layer {
  name: "norm1"
  type: "LRN"
  bottom: "pool1"
  top: "norm1"
  lrn_param {
    local_size: 5
    alpha: 0.0001
    beta: 0.75
  }
}
layer {
  name: "conv2"
  type: "Convolution"
  bottom: "norm1"
  top: "conv2"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 256
    pad: 2
    kernel_size: 5
    group: 2
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
    bias_filler {
      type: "constant"
      value: 1
    }
  }
}
layer {
  name: "relu2"
  type: "ReLU"
  bottom: "conv2"
  top: "conv2"
}
layer {
  name: "pool2"
  type: "Pooling"
  bottom: "conv2"
  top: "pool2"
  pooling_param {
    pool: MAX
    kernel_size: 3
    stride: 2
  }
}
layer {
  name: "norm2"
  type: "LRN"
  bottom: "pool2"
  top: "norm2"
  lrn_param {
    local_size: 5
    alpha: 0.0001
    beta: 0.75
  }
}
layer {
  name: "conv3"
  type: "Convolution"
  bottom: "norm2"
  top: "conv3"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 384
    pad: 1
    kernel_size: 3
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layer {
  name: "relu3"
  type: "ReLU"
  bottom: "conv3"
  top: "conv3"
}
layer {
  name: "conv4"
  type: "Convolution"
  bottom: "conv3"
  top: "conv4"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 384
    pad: 1
    kernel_size: 3
    group: 2
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
    bias_filler {
      type: "constant"
      value: 1
    }
  }
}
layer {
  name: "relu4"
  type: "ReLU"
  bottom: "conv4"
  top: "conv4"
}
layer {
  name: "conv5"
  type: "Convolution"
  bottom: "conv4"
  top: "conv5"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 256
    pad: 1
    kernel_size: 3
    group: 2
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
    bias_filler {
      type: "constant"
      value: 1
    }
  }
}
layer {
  name: "relu5"
  type: "ReLU"
  bottom: "conv5"
  top: "conv5"
}
layer {
  name: "pool5"
  type: "Pooling"
  bottom: "conv5"
  top: "pool5"
  pooling_param {
    pool: MAX
    kernel_size: 3
    stride: 2
  }
}
layer {
  name: "fc6"
  type: "InnerProduct"
  bottom: "pool5"
  top: "fc6"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  inner_product_param {
    num_output: 4096
    weight_filler {
      type: "gaussian"
      std: 0.005
    }
    bias_filler {
      type: "constant"
      value: 1
    }
  }
}
layer {
  name: "relu6"
  type: "ReLU"
  bottom: "fc6"
  top: "fc6"
}
layer {
  name: "drop6"
  type: "Dropout"
  bottom: "fc6"
  top: "fc6"
  dropout_param {
    dropout_ratio: 0.5
  }
}
layer {
  name: "fc7"
  type: "InnerProduct"
  bottom: "fc6"
  top: "fc7"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  inner_product_param {
    num_output: 4096
    weight_filler {
      type: "gaussian"
      std: 0.005
    }
    bias_filler {
      type: "constant"
      value: 1
    }
  }
}
layer {
  name: "relu7"
  type: "ReLU"
  bottom: "fc7"
  top: "fc7"
}
layer {
  name: "drop7"
  type: "Dropout"
  bottom: "fc7"
  top: "fc7"
  dropout_param {
    dropout_ratio: 0.5
  }
}
layer {
  name: "fc8-isia"
  type: "InnerProduct"
  bottom: "fc7"
  top: "fc8-isia"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  inner_product_param {
    num_output: 2
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layer {
  name: "loss"
  type: "SoftmaxWithLoss"
  bottom: "fc8-isia"
  bottom: "label"
  top: "loss"
}
I0906 16:56:47.616586 10762 layer_factory.hpp:77] Creating layer data
I0906 16:56:47.616915 10762 net.cpp:100] Creating Layer data
I0906 16:56:47.616928 10762 net.cpp:408] data -> data
I0906 16:56:47.616962 10762 net.cpp:408] data -> label
I0906 16:56:47.616978 10762 data_transformer.cpp:27] Loading mean file from: /home/spalomar/workspace/ISIA/lmdb/Imagenet/train_leveldb/mean.binaryproto
F0906 16:56:47.616992 10765 db_lmdb.hpp:15] Check failed: mdb_status == 0 (2 vs. 0) No such file or directory
*** Check failure stack trace: ***
F0906 16:56:47.616993 10762 io.cpp:63] Check failed: fd != -1 (-1 vs. -1) File not found: /home/spalomar/workspace/ISIA/lmdb/Imagenet/train_leveldb/mean.binaryproto
*** Check failure stack trace: ***
    @     0x7f6b3b1dc0cd  google::LogMessage::Fail()
    @     0x7f6b3b1dc0cd  google::LogMessage::Fail()
    @     0x7f6b3b1ddf33  google::LogMessage::SendToLog()
    @     0x7f6b3b1ddf33  google::LogMessage::SendToLog()
    @     0x7f6b3b1dbc28  google::LogMessage::Flush()
    @     0x7f6b3b1dbc28  google::LogMessage::Flush()
    @     0x7f6b3b1de999  google::LogMessageFatal::~LogMessageFatal()
    @     0x7f6b3b1de999  google::LogMessageFatal::~LogMessageFatal()
    @     0x7f6b3b9aad4a  caffe::ReadProtoFromBinaryFile()
    @     0x7f6b3b993c4a  caffe::db::LMDB::Open()
    @     0x7f6b3b7a8250  caffe::DataTransformer<>::DataTransformer()
    @     0x7f6b3b797ab7  caffe::DataReader<>::Body::InternalThreadEntry()
    @     0x7f6b3b7d6775  caffe::BaseDataLayer<>::LayerSetUp()
    @     0x7f6b396a4bcd  (unknown)
    @     0x7f6b3b7d689a  caffe::BasePrefetchingDataLayer<>::LayerSetUp()
    @     0x7f6b38f596db  start_thread
    @     0x7f6b3b93925b  caffe::Net<>::Init()
    @     0x7f6b399d988f  clone

1 个答案:

答案 0 :(得分:0)

您可以解决此问题,而无需提供任何文件。

 transform_param {
    mirror: true
    crop_size: 227
    mean_value: 104 # Blue
    mean_value: 116 # Green  
    mean_value: 122 # Red  
  }

要从mean.binarypro实际获取值,请使用以下代码:https://gist.github.com/Coderx7/26eebeefaa3fb28f654d2951980b80ba 或自己计算。