Recently contacted Caffe got a caffe multiple tags encounter a variety of egg pain to share with you.
A verification code used here to prepare the data to generate a 4-digit verification Code 0-9+26 letters
The Second Amendment Caffe source code involves the modification of the file has
Caffe.proto,
Convert_imageset.cpp,
Data_layer.cpp,
Io.cpp,
DATA_LAYER.HPP,
Io.hpp
Specific changes will not be introduced to the following address to download the modified file and then replace the original Caffe in the
File Download Address: Https://pan.baidu.com/s/1eSP1RUi
Here we understand, Caffe originally does not support the multi-label classification task, the main change here is to Caffe support multiple label classification, we know Caffe label original specified is an integer type and only 1, Read Caffe.proto inside write: Datum is our data layer, then the data Layer label is Int32, which restricts the data label input must be an integer, then modify his starting point is to start from the Proto, plus a labels, array type
Modified and then then modified Caffe use datum part of the code to achieve support for labels
Finally modify the convert_imageset.cpp and let him implement it for example as follows:
Imgs/abc.jpg 1 2 3 4 5
This type of multiple label do support
Finish this change and compile it again.
Three Production data labels
Picture path + corresponding label Samples/myl1.bmp 22 34 21 1
Four write a multiple classification network
The structure is as follows:
Name: "Lenet" layer{ name: "Mnist" type: "Data" top: "Data" top: "Label" include{ phase:train } transform_param{ scale:0.003921568627451 } data_param{ source: "Train_lmdb" batch_size:64 backend:lmdb }} layer{  ; Name: "Mnist" type: "Data" top: "Data" top: "label" include{ phase:test } transform_param{ scale:0.003921568627451 } data_param{ source: "Val_lmdb" batch_size:64 backend:lmdb  }} layer{ name: "Conv1" type: "Convolution" bottom: "Data" top: "Conv1" param{ lr_mult:1 } param{ lr_mult:2 & nbsp;} convolution_param{ num_output:128 kernel_size:7 stride:1 weight_filler{ type: "Xavier" &NBSP} bias_filler{ type: "Constant" } } layer{ name: "Pool1" type: "Pooling" bottom: "Conv1" top: "Pool1" pooling_param{ pool:max Kernel_size:2 stride:2  }} layer{ name: "Conv2" type: "Convolution" bottom: "Pool1" & Nbsp;top: "Conv2" param{ lr_mult:1 } param{ lr_mult:2 } convolution_param{ num_output:128 kernel_size:5 stride:1 weight_ filler{ type: "Xavier" bias_filler{ type: "Constant" &N Bsp &NBSP}  }} layer{ name: "Pool2" type: "Pooling" bottom: "Conv2" top: "Pool2" pooling_ param{ pool:max kernel_size:2 stride:1 } layer{ name: "Relu" ty PE: "Relu" bottom: "Pool2" top: "Pool2"layer{ name: "Conv3" type: "Convolution" bottom: "Pool2" top: "Conv3" param{ Lr_mult:1 } param{ lr_mult:2 } convolution_param{ num_output:128 kernel_size:3 stride:1 weight_filler{ type: "Xavier" & nbsp;} bias_filler{ type: "Constant" } layer{ name: "RELU2" ty PE: "Relu" bottom: "Conv3" top: "Conv3"} layer{ name: "Conv4" type: "Convolution" bottom: " Conv3 " top: Conv4" param{ lr_mult:1 } param{ lr_mult:2 } convolution_param{ num_output:128 kernel_size:3 stride:1 weight_ filler{ type: "Xavier" bias_filler{ type: "Constant" &N Bsp &NBSP}  }} layer{ name: "RELU3" type: "Relu" bottom: "Conv4" top: "Conv4"} layer{ name: "Conv5" type: "Convolution" Bottom: "conv4" top: "conv5" param{ lr_mult:1 } param{ lr_mult:2 convolution_param{ num_output:128 kernel_size:3 stride:1 weight_filler{ type: "Xavier" bias_filler{ type: " Constant " }  }} layer{ name:" fc81 " type:" Innerproduct " bottom:" Conv5 " top:" Fc81 " param{ lr_mult:1 } param{ lr_mult:2 } inner_product_param { num_output:36 weight_filler{ type: "Xavier" } bi as_filler{ type: "Constant" } layer{ name: "Fc82" type: " Innerproduct " bottom:" Conv5 " top:" Fc82 " param{ lr_mult:1 } param{ lr_mult:2 } inner_product_param{ num_ output:36 weight_filler{ type: "Xavier" } bias_filler{ & nbsp type: "Constant" }  } layer{ name: "fc83" type: "Innerproduct" bottom: "CONV5" top: "fc83" param{ lr_mult:1 } param{ lr_mult:2 } inner_ product_param{ num_output:36 weight_filler{ type: "Xavier" nbsp bias_filler{ type: "Constant" } layer{ name: "fc84" type: " Innerproduct " bottom: Conv5" top: "fc84" param{ lr_mult:1 } param{ & Nbsp;lr_mult:2  } inner_product_param{ num_output:36 weight_filler{ type: "Xavier" } &nbSp bias_filler{ type: "Constant" } layer{ name: "Slice2" type: " Slice " bottom: Label" top: "L1" top: "L2" top: "L3" top: "L4" slice_param{ axis:1 slice_point:1 slice_point:2 slice_point:3 } layer{ Name: "Accuracy1" type: "Accuracy" bottom: "fc81" bottom: "L1" top: "Accuracy1" include{ phase:test  } layer{ name: "Accuracy2" type: "Accuracy" bottom: "Fc82" bottom : "L2" top: "Accuracy2" include{ phase:test }} layer{ name: "Accuracy3" type: " Accuracy " bottom: fc83" bottom: "L3" top: "Accuracy3" include{ phase:test } layer{ name: "Accuracy4" type: "Accuracy" bottom: "fc84" bottom: "L4" top: "Accuracy4" include{ phase:test  }} layer{&nbsP;name: "Loss1" type: "Softmaxwithloss" bottom: "fc81" bottom: "L1" top: "Loss1" loss_ weight:0.1} layer{ name: "Loss2" type: "Softmaxwithloss" bottom: "Fc82" bottom: "L2" top: " Loss2 " loss_weight:0.1} layer{ name:" LOSS3 " type:" Softmaxwithloss " bottom:" fc83 " Bottom: "L3" top: "Loss3" loss_weight:0.1} layer{ name: "Loss4" type: "Softmaxwithloss" Bottom: "fc84" bottom: "L4" top: "Loss4" loss_weight:0.1}
And then we can start training.
The results are as follows: