我试图在caffe中实现一个暹罗网络,它由两个不共享权重的图像网组成。所以我基本上要做的就是给每个网络一个图像,最后试着找出它们之间的相似距离,下面是我的原型。所以我的主要问题是我应该如何设置我的" num_output"太?我的训练只有2个课程,0个不同,他们不相似,1个是相似的。
name: "Siamese_ImageNet"
layers {
name: "data"
type: IMAGE_DATA
top: "data"
top: "label"
image_data_param {
source: "train1.txt"
batch_size: 32
new_height: 256
new_width: 256
}
include: { phase: TRAIN }
}
layers {
name: "data"
type: IMAGE_DATA
top: "data"
top: "label"
image_data_param {
source: "test1.txt"
batch_size: 32
new_height: 256
new_width: 256
}
include: { phase: TEST }
}
layers {
name: "data_p"
type: IMAGE_DATA
top: "data_p"
top: "label_p"
image_data_param {
source: "train2.txt"
batch_size: 32
new_height: 256
new_width: 256
}
include: { phase: TRAIN }
}
layers {
name: "data_p"
type: IMAGE_DATA
top: "data_p"
top: "label_p"
image_data_param {
source: "test2.txt"
batch_size: 32
new_height: 256
new_width: 256
}
include: { phase: TEST }
}
layers {
name: "conv1"
type: CONVOLUTION
bottom: "data"
top: "conv1"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
convolution_param {
num_output: 96
kernel_size: 11
stride: 4
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layers {
name: "relu1"
type: RELU
bottom: "conv1"
top: "conv1"
}
layers {
name: "pool1"
type: POOLING
bottom: "conv1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layers {
name: "norm1"
type: LRN
bottom: "pool1"
top: "norm1"
lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
}
}
layers {
name: "conv2"
type: CONVOLUTION
bottom: "norm1"
top: "conv2"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
convolution_param {
num_output: 256
pad: 2
kernel_size: 5
group: 2
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 1
}
}
}
layers {
name: "relu2"
type: RELU
bottom: "conv2"
top: "conv2"
}
layers {
name: "pool2"
type: POOLING
bottom: "conv2"
top: "pool2"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layers {
name: "norm2"
type: LRN
bottom: "pool2"
top: "norm2"
lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
}
}
layers {
name: "conv3"
type: CONVOLUTION
bottom: "norm2"
top: "conv3"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
convolution_param {
num_output: 384
pad: 1
kernel_size: 3
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layers {
name: "relu3"
type: RELU
bottom: "conv3"
top: "conv3"
}
layers {
name: "conv4"
type: CONVOLUTION
bottom: "conv3"
top: "conv4"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
convolution_param {
num_output: 384
pad: 1
kernel_size: 3
group: 2
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 1
}
}
}
layers {
name: "relu4"
type: RELU
bottom: "conv4"
top: "conv4"
}
layers {
name: "conv5"
type: CONVOLUTION
bottom: "conv4"
top: "conv5"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
group: 2
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 1
}
}
}
layers {
name: "relu5"
type: RELU
bottom: "conv5"
top: "conv5"
}
layers {
name: "pool5"
type: POOLING
bottom: "conv5"
top: "pool5"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layers {
name: "fc6"
type: INNER_PRODUCT
bottom: "pool5"
top: "fc6"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
inner_product_param {
num_output: 4096
weight_filler {
type: "gaussian"
std: 0.005
}
bias_filler {
type: "constant"
value: 1
}
}
}
layers {
name: "relu6"
type: RELU
bottom: "fc6"
top: "fc6"
}
layers {
name: "drop6"
type: DROPOUT
bottom: "fc6"
top: "fc6"
dropout_param {
dropout_ratio: 0.5
}
}
layers {
name: "fc7"
type: INNER_PRODUCT
bottom: "fc6"
top: "fc7"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
inner_product_param {
num_output: 2
weight_filler {
type: "gaussian"
std: 0.005
}
bias_filler {
type: "constant"
value: 1
}
}
}
layers {
name: "relu7"
type: RELU
bottom: "fc7"
top: "fc7"
}
layers {
name: "drop7"
type: DROPOUT
bottom: "fc7"
top: "fc7"
dropout_param {
dropout_ratio: 0.5
}
}
layers {
name: "conv1_p"
type: CONVOLUTION
bottom: "data_p"
top: "conv1_p"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
convolution_param {
num_output: 96
kernel_size: 11
stride: 4
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layers {
name: "relu1_p"
type: RELU
bottom: "conv1_p"
top: "conv1_p"
}
layers {
name: "pool1_p"
type: POOLING
bottom: "conv1_p"
top: "pool1_p"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layers {
name: "norm1_p"
type: LRN
bottom: "pool1_p"
top: "norm1_p"
lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
}
}
layers {
name: "conv2_p"
type: CONVOLUTION
bottom: "norm1_p"
top: "conv2_p"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
convolution_param {
num_output: 256
pad: 2
kernel_size: 5
group: 2
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 1
}
}
}
layers {
name: "relu2_p"
type: RELU
bottom: "conv2_p"
top: "conv2_p"
}
layers {
name: "pool2_p"
type: POOLING
bottom: "conv2_p"
top: "pool2_p"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layers {
name: "norm2_p"
type: LRN
bottom: "pool2_p"
top: "norm2_p"
lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
}
}
layers {
name: "conv3_p"
type: CONVOLUTION
bottom: "norm2_p"
top: "conv3_p"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
convolution_param {
num_output: 384
pad: 1
kernel_size: 3
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layers {
name: "relu3_p"
type: RELU
bottom: "conv3_p"
top: "conv3_p"
}
layers {
name: "conv4_p"
type: CONVOLUTION
bottom: "conv3_p"
top: "conv4_p"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
convolution_param {
num_output: 384
pad: 1
kernel_size: 3
group: 2
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 1
}
}
}
layers {
name: "relu4_p"
type: RELU
bottom: "conv4_p"
top: "conv4_p"
}
layers {
name: "conv5_p"
type: CONVOLUTION
bottom: "conv4_p"
top: "conv5_p"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
group: 2
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 1
}
}
}
layers {
name: "relu5_p"
type: RELU
bottom: "conv5_p"
top: "conv5_p"
}
layers {
name: "pool5_p"
type: POOLING
bottom: "conv5_p"
top: "pool5_p"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layers {
name: "fc6_p"
type: INNER_PRODUCT
bottom: "pool5_p"
top: "fc6_p"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
inner_product_param {
num_output: 4096
weight_filler {
type: "gaussian"
std: 0.005
}
bias_filler {
type: "constant"
value: 1
}
}
}
layers {
name: "relu6_p"
type: RELU
bottom: "fc6_p"
top: "fc6_p"
}
layers {
name: "drop6_p"
type: DROPOUT
bottom: "fc6_p"
top: "fc6_p"
dropout_param {
dropout_ratio: 0.5
}
}
layers {
name: "fc7_p"
type: INNER_PRODUCT
bottom: "fc6_p"
top: "fc7_p"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
inner_product_param {
num_output: 2
weight_filler {
type: "gaussian"
std: 0.005
}
bias_filler {
type: "constant"
value: 1
}
}
}
layers {
name: "relu7_p"
type: RELU
bottom: "fc7_p"
top: "fc7_p"
}
layers {
name: "drop7_p"
type: DROPOUT
bottom: "fc7_p"
top: "fc7_p"
dropout_param {
dropout_ratio: 0.5
}
}
layers {
name: "loss"
type: CONTRASTIVE_LOSS
contrastive_loss_param {
margin: 1.0
}
bottom: "fc7"
bottom: "fc7_p"
bottom: "label"
top: "loss"
}
我的培训档案结构: 0是不相似的,1是相似的
train1.txt:
/aer/img1_1.jpg 0
/aer/img1_2.jpg 1
/aer/img1_3.jpg 1
train2.txt:
/tpd/img2_1.jpg 0
/tpd/img2_2.jpg 1
/tpd/img2_3.jpg 1
答案 0 :(得分:7)
我应该如何设置我的" num_output"?
在了解您应该设置num_output
的程度之前,请先解释一下它的含义。实际上,您可以将Simense网络的两侧data -> fc7
,data_p -> fc7_p
视为2个特征提取器。每个都从相应数据层中的图像中提取特征,例如fc7
和fc7_p
。因此num_output
定义了提取的特征向量的维度。
在训练期间,ContrastiveLoss
图层总是尝试最小化2个提取的特征向量'当矢量表示的图像的距离类似(label == 1
)并且在不熟悉时(label == 0
)最大化距离。即,特征向量的距离越小,图像越相似。
那么特征向量的最佳维度是什么才能最好地包含指示相似性的信息?或者你应该设置num_output
?可能没有确切的值,它取决于特征提取器的编码质量(您可以将该特征视为图像的代码)以及识别图像的相似性有多难。所以基本上如果网络(特征提取器)很深并且不太难以识别相似性,你可以选择相对较小的num_output
例如200,因为该特征可以被更大的网络编码得更好并且更多歧视性。如果不是,您可以尝试更大的值,例如500,1000或尝试更复杂的网络。
如果您想尝试使用MultinomialLogisticLoss
而不是ContrastiveLoss
图层,则应首先使用类似{{{}的图层将2个要素向量fc7
,fc7_p
融合为1 1}}然后将其输入CONCAT
图层,如下所示:
SOFTMAX_LOSS
为了比较相似性并将其用于部署,Constrastive Loss或SoftMax Loss,实施哪种方法最好?
Softmax Loss简单易用。但它只能给你二进制预测,即相似或不相似。它给出的2类(相似的,不相似的)的概率分布通常太硬(不均匀),例如, ...#original layers
layers {
name: "concat"
type: CONCAT
bottom: "fc7"
bottom: "fc7_p"
top: "fc_concat" # concatenate fc7 and fc7_p along channel axis
}
layer {
name: "fc_cls"
type: INNER_PRODUCT
bottom: "fc_concat"
top: "fc_cls"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
inner_product_param {
num_output: 2 # a binary classification problem in this case
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "accuracy"
type: ACCURACY
bottom: "fc_cls"
bottom: "label"
top: "accuracy"
include {
phase: TEST
}
}
layer {
name: "loss"
type: SOFTMAX_LOSS
bottom: "fc_cls"
bottom: "label"
top: "loss"
}
,[0.9*, 0.0*]
,....在许多情况下,它不会很好地反映真实的输入相似度。
使用Constrastive Loss时,您可以获得图像的判别特征向量。并且您可以使用向量来计算相似概率,正如CVPR 2005论文Learning a Similarity Metric Discriminatively, with Application to Face Verification在4.1节中所做的那样。(关键点是使用从属于图像的图像生成的特征向量计算多元法向密度。同一主题)。您还可以使用阈值来控制模型the false positive rate and the false negative rate以获得ROC curve以更好地评估模型。
顺便说一下,要挖掘更多CNN架构以预测相似性,您可以参考CVPR 2015论文Learning to Compare Image Patches via Convolutional Neural Networks。
答案 1 :(得分:1)
只是为了纠正Dale的优秀answer以上Caffe的超敏感语法,对于像我一样卡住的新手,这里有一些修正(层到层,有些引用) ,加上删除评论和有效大写)
layer {
name: "concat"
type: "Concat"
bottom: "fc7"
bottom: "fc7_p"
top: "fc_concat"
}
layer {
name: "fc_cls"
type: "InnerProduct"
bottom: "fc_concat"
top: "fc_cls"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
inner_product_param {
num_output: 2
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "fc_cls"
bottom: "label"
top: "accuracy"
include {
phase: TEST
}
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "fc_cls"
bottom: "label"
top: "loss"
}
答案 2 :(得分:0)
我相信.declareNamespace( )
定义了提取的特征向量的维度,然后提取的特征可用于确定num_output
距离。如果L2
距离大于 1 ,那么它是一个不同的类,如果它接近 0 ,则图像类似。休息戴尔的答案是完美的。