Caffe ImageNet检查失败的形状[i]> = 0

时间:2016-06-07 15:20:27

标签: machine-learning neural-network deep-learning caffe

我想使用ImageNet进行回归,标签是对象的两个坐标,例如(622 132 736 318),我已将图像转换为.lmdb文件。但是当我尝试训练时,发现此错误

I0510 16:50:06.576092  7167 layer_factory.hpp:77] Creating layer data
I0510 16:50:06.576848  7167 net.cpp:106] Creating Layer data
I0510 16:50:06.576869  7167 net.cpp:411] data -> data
I0510 16:50:06.576900  7167 net.cpp:411] data -> label
I0510 16:50:06.576916  7167 data_transformer.cpp:25] Loading mean file from: /home/sx/caffe-master/sx/person_location/data/conferenceroom_train_mean.binaryproto
I0510 16:50:06.578588  7171 db_lmdb.cpp:38] Opened lmdb /home/shawn/caffe-master/shawn/person_location/data/conferenceroom_train_lmdb
I0510 16:50:06.595842  7167 data_layer.cpp:41] output data size: 256,3,227,227
I0510 16:50:08.680726  7167 net.cpp:150] Setting up data
I0510 16:50:08.680807  7167 net.cpp:157] Top shape: 256 3 227 227 (39574272)
I0510 16:50:08.680817  7167 net.cpp:157] Top shape: 256 (256)
I0510 16:50:08.680824  7167 net.cpp:165] Memory required for data: 158298112
I0510 16:50:08.680842  7167 layer_factory.hpp:77] Creating layer conv1
I0510 16:50:08.680874  7167 net.cpp:106] Creating Layer conv1
I0510 16:50:08.680884  7167 net.cpp:454] conv1 <- data
I0510 16:50:08.680907  7167 net.cpp:411] conv1 -> conv1
F0510 16:50:08.927338  7167 blob.cpp:33] Check failed: shape[i] >= 0 (-281264070 vs. 0) 
*** Check failure stack trace: ***
    @     0x7fec6e186778  (unknown)
    @     0x7fec6e1866b2  (unknown)
    @     0x7fec6e1860b4  (unknown)
    @     0x7fec6e189055  (unknown)
    @     0x7fec73a13598  caffe::Blob<>::Reshape()
    @     0x7fec7395206c  caffe::BaseConvolutionLayer<>::Reshape()
    @     0x7fec739a90ef  caffe::CuDNNConvolutionLayer<>::Reshape()
    @     0x7fec738d32fb  caffe::Net<>::Init()
    @     0x7fec738d4a98  caffe::Net<>::Net()
    @     0x7fec73a1fd62  caffe::Solver<>::InitTrainNet()
    @     0x7fec73a21262  caffe::Solver<>::Init()
    @     0x7fec73a21599  caffe::Solver<>::Solver()
    @     0x7fec738ebf43  caffe::Creator_SGDSolver<>()
    @           0x4105bc  caffe::SolverRegistry<>::CreateSolver()
    @           0x4087ed  train()
    @           0x405d67  main
    @     0x7fec64993b45  (unknown)
    @           0x406588  (unknown)
    @              (nil)  (unknown)
Aborted

这是train_val.prototxt

name: "AlexNet"
layer {
  name: "data"
  type: "Data"
  top: "data"
  top: "label"
  include {
    phase: TRAIN
  }
  transform_param {
    mirror: true
    crop_size: 227
    mean_file: "/home/shawn/caffe-master/person_location/data/conferenceroom_train_mean.binaryproto"
  }
  data_param {
    source: "/home/shawn/caffe-master/person_location/data/conferenceroom_train_lmdb"
    batch_size: 256
    backend: LMDB
  }
}
layer {
  name: "data"
  type: "Data"
  top: "data"
  top: "label"
  include {
    phase: TEST
  }
  transform_param {
    mirror: false
    crop_size: 227
    mean_file: "/home/shawn/caffe-master/person_location/data/conferenceroom_train_mean.binaryproto"
  }
  data_param {
    source: "/home/shawn/caffe-master/person_location/data/conferenceroom_val_lmdb"
    batch_size: 50
    backend: LMDB
  }
}
layer {
  name: "conv1"
  type: "Convolution"
  bottom: "data"
  top: "conv1"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 96
    kernel_size: 11
    stride: 4
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layer {
  name: "relu1"
  type: "ReLU"
  bottom: "conv1"
  top: "conv1"
}
layer {
  name: "norm1"
  type: "LRN"
  bottom: "conv1"
  top: "norm1"
  lrn_param {
    local_size: 5
    alpha: 0.0001
    beta: 0.75
  }
}
layer {
  name: "pool1"
  type: "Pooling"
  bottom: "norm1"
  top: "pool1"
  pooling_param {
    pool: MAX
    kernel_size: 3
    stride: 2
  }
}
layer {
  name: "conv2"
  type: "Convolution"
  bottom: "pool1"
  top: "conv2"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 256
    pad: 2
    kernel_size: 5
    group: 2
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
    bias_filler {
      type: "constant"
      value: 0.1
    }
  }
}
layer {
  name: "relu2"
  type: "ReLU"
  bottom: "conv2"
  top: "conv2"
}
layer {
  name: "norm2"
  type: "LRN"
  bottom: "conv2"
  top: "norm2"
  lrn_param {
    local_size: 5
    alpha: 0.0001
    beta: 0.75
  }
}
layer {
  name: "pool2"
  type: "Pooling"
  bottom: "norm2"
  top: "pool2"
  pooling_param {
    pool: MAX
    kernel_size: 3
    stride: 2
  }
}
layer {
  name: "conv3"
  type: "Convolution"
  bottom: "pool2"
  top: "conv3"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 384
    pad: 1
    kernel_size: 3
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layer {
  name: "relu3"
  type: "ReLU"
  bottom: "conv3"
  top: "conv3"
}
layer {
  name: "conv4"
  type: "Convolution"
  bottom: "conv3"
  top: "conv4"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 384
    pad: 1
    kernel_size: 3
    group: 2
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
    bias_filler {
      type: "constant"
      value: 0.1
    }
  }
}
layer {
  name: "relu4"
  type: "ReLU"
  bottom: "conv4"
  top: "conv4"
}
layer {
  name: "conv5"
  type: "Convolution"
  bottom: "conv4"
  top: "conv5"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 256
    pad: 1
    kernel_size: 3
    group: 2
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
    bias_filler {
      type: "constant"
      value: 0.1
    }
  }
}
layer {
  name: "relu5"
  type: "ReLU"
  bottom: "conv5"
  top: "conv5"
}
layer {
  name: "pool5"
  type: "Pooling"
  bottom: "conv5"
  top: "pool5"
  pooling_param {
    pool: MAX
    kernel_size: 3
    stride: 2
  }
}
layer {
  name: "fc6"
  type: "InnerProduct"
  bottom: "pool5"
  top: "fc6"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  inner_product_param {
    num_output: 4096
    weight_filler {
      type: "gaussian"
      std: 0.005
    }
    bias_filler {
      type: "constant"
      value: 0.1
    }
  }
}
layer {
  name: "relu6"
  type: "ReLU"
  bottom: "fc6"
  top: "fc6"
}
layer {
  name: "drop6"
  type: "Dropout"
  bottom: "fc6"
  top: "fc6"
  dropout_param {
    dropout_ratio: 0.5
  }
}
layer {
  name: "fc7"
  type: "InnerProduct"
  bottom: "fc6"
  top: "fc7"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  inner_product_param {
    num_output: 4096
    weight_filler {
      type: "gaussian"
      std: 0.005
    }
    bias_filler {
      type: "constant"
      value: 0.1
    }
  }
}
layer {
  name: "relu7"
  type: "ReLU"
  bottom: "fc7"
  top: "fc7"
}
layer {
  name: "drop7"
  type: "Dropout"
  bottom: "fc7"
  top: "fc7"
  dropout_param {
    dropout_ratio: 0.5
  }
}
layer {
  name: "fc8"
  type: "InnerProduct"
  bottom: "fc7"
  top: "fc8"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  inner_product_param {
    num_output: 4
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layer {
  name: "accuracy"
  type: "Accuracy"
  bottom: "fc8"
  bottom: "label"
  top: "accuracy"
  include {
    phase: TEST
  }
}
layer {
  name: "loss"
  type: "EuclideanLoss"
  bottom: "fc8"
  bottom: "label"
  top: "loss"
}

0 个答案:

没有答案