Use Droupout layer in Caffe to improve accuracy of Cifar10 picture set 10% (0.62 to 0.72)

Source: Internet
Author: User
Tags constant


# The Train/test net protocol buffer definition
NET: "D:\\caffeinfo\\d_trainval\\cifar10_full_train_test.prototxt "
# Test_iter Specifies how many forward passes the test should carry out.
# in the case of MNIST, we have test batch size and test iterations,
# covering the full testing images .
test_iter:200
# Carry out testing every training iterations.
test_interval:200
# The base learning rate, momentum and the weight decay of the network.
base_lr:0.01
momentum:0.9
weight_decay:0.004
# The Learning rate policy
lr_policy: "Step"
gamma:0.1
stepsize:10000
# Display every iterations
display:200
# The maximum number of Iterati ONS
max_iter:100000
# Snapshot Intermediate results
snapshot:10000
snapshot_format:hdf5
Snapshot_prefix: "D:\\caffeinfo\\d_trainval\\cifar10_full"
# Solver mode:cpu or GPU
Solver_mode:gpu


/////////////////////////////////////////////////////////////////////////////////////////////////

dropout_ratio:0.1

Name: "Cifar10_full" Layer {name: "Cifar" type: "Data" Top: "Data" Top: "label" include {Phase:train} Transform_param {mean_file: "D:\\caffeinfo\\b_datacreate\\mean.binaryproto"} data_param {source: "D:\\Caf feinfo\\b_datacreate\\train_db "Batch_size:50 Backend:lmdb}}" layer {name: "Cifar" type: "Data" Top: " Data "Top:" label "include {phase:test} transform_param {mean_file:" D:\\caffeinfo\\b_datacreate\\mean
  . Binaryproto "} data_param {Source:" d:\\caffeinfo\\b_datacreate\\val_db "batch_size:50 Backend:lmdb
    }} layer {name: "CONV1" type: "Convolution" bottom: "Data" Top: "Conv1" param {lr_mult:1} param { 
      Lr_mult:2} convolution_param {num_output:32 pad:2 kernel_size:5 stride:1 Weight_filler { Type: "Gaussian" std:0.0001} bias_filler {type: "Constant"}}} "layer {name:" Pool 1 "type:" Pooling "bOttom: "Conv1" Top: "Pool1" Pooling_param {Pool:max kernel_size:3 stride:2}} layer {name: "Relu 1 "type:" ReLU "bottom:" pool1 "Top:" pool1 "} layer {name:" Norm1 "type:" LRN "bottom:" pool1 "Top:" Norm1 "Lrn_param {local_size:3 alpha:5e-05 beta:0.75 Norm_region:within_channel}}" layer {name: "C Onv2 "type:" Convolution "bottom:" Norm1 "Top:" Conv2 "param {lr_mult:1} param {Lr_mult:2} c Onvolution_param {num_output:32 pad:2 kernel_size:5 stride:1 weight_filler {type: "Gaussia n "std:0.01} bias_filler {type:" Constant "}}}} layer {name:" RELU2 "type:" ReLU "bot 
    Tom: "Conv2" Top: "conv2"} layer {name: "pool2" type: "Pooling" bottom: "conv2" Top: "Pool2" Pooling_param { Pool:ave kernel_size:3 Stride:2}} layer {name: "Norm2" type: "LRN" bottom: "pool2" Top: "norm 2 "Lrn_param {locAl_size:3 alpha:5e-05 beta:0.75 Norm_region:within_channel}} layer {name: "Conv3" type: "Convolut 
    Ion "bottom:" Norm2 "Top:" Conv3 "Convolution_param {num_output:64 pad:2 kernel_size:5 stride:1 Weight_filler {type: "Gaussian" std:0.01} bias_filler {type: "Constant"}}} Lay  ER {name: "RELU3" type: "ReLU" bottom: "conv3" Top: "conv3"} layer {name: "pool3" type: "Pooling" bottom: "Conv3" Top: "Pool3" Pooling_param {pool:ave kernel_size:3 stride:2}} layer {name: "ip1" Typ 
    E: "Innerproduct" bottom: "pool3" Top: "ip1" param {lr_mult:1 decay_mult:250} param {Lr_mult:2 decay_mult:0} inner_product_param {num_output:256 Weight_filler {type: "Gaussian" std:0 . Bias_filler} {type: ' Constant '}}} ' layer {name: ' fc7 ' type: ' innerproduct ' bottom: ' ip1 "Top:" Fc7 "param {
    Lr_mult:1 decay_mult:1} param {lr_mult:2 decay_mult:0} inner_product_param {NUM_OUTPU T:10 Weight_filler {type: "Gaussian" std:0.005} bias_filler {type: "Constant" Val UE:1}}} layer {name: "RELU7" type: "ReLU" bottom: "fc7" Top: "FC7"} layer {name: "DROP7" type: "D Ropout "bottom:" fc7 "Top:" Fc7 "Dropout_param {dropout_ratio:0.1}} layer {name:" accuracy "type:" Ac Curacy "bottom:" fc7 "bottom:" Label "Top:" accuracy "include {phase:test}} layer {name:" Loss "Typ E: "Softmaxwithloss" bottom: "fc7" bottom: "Label" Top: "Loss"}




////////////////////////////////////////////////////////////////////////////////////////////////////////////

No droupout.

Name: "Cifar10_full" Layer {name: "Cifar" type: "Data" Top: "Data" Top: "label" include {Phase:train} Transform_param {mean_file: "D:\\caffeinfo\\b_datacreate\\mean.binaryproto"} data_param {source: "D:\\Caf feinfo\\b_datacreate\\train_db "Batch_size:50 Backend:lmdb}}" layer {name: "Cifar" type: "Data" Top: " Data "Top:" label "include {phase:test} transform_param {mean_file:" D:\\caffeinfo\\b_datacreate\\mean
  . Binaryproto "} data_param {Source:" d:\\caffeinfo\\b_datacreate\\val_db "batch_size:50 Backend:lmdb
    }} layer {name: "CONV1" type: "Convolution" bottom: "Data" Top: "Conv1" param {lr_mult:1} param { 
      Lr_mult:2} convolution_param {num_output:32 pad:2 kernel_size:5 stride:1 Weight_filler { Type: "Gaussian" std:0.0001} bias_filler {type: "Constant"}}} "layer {name:" Pool 1 "type:" Pooling "bOttom: "Conv1" Top: "Pool1" Pooling_param {Pool:max kernel_size:3 stride:2}} layer {name: "Relu 1 "type:" ReLU "bottom:" pool1 "Top:" pool1 "} layer {name:" Norm1 "type:" LRN "bottom:" pool1 "Top:" Norm1 "Lrn_param {local_size:3 alpha:5e-05 beta:0.75 Norm_region:within_channel}}" layer {name: "C Onv2 "type:" Convolution "bottom:" Norm1 "Top:" Conv2 "param {lr_mult:1} param {Lr_mult:2} c Onvolution_param {num_output:32 pad:2 kernel_size:5 stride:1 weight_filler {type: "Gaussia n "std:0.01} bias_filler {type:" Constant "}}}} layer {name:" RELU2 "type:" ReLU "bot 
    Tom: "Conv2" Top: "conv2"} layer {name: "pool2" type: "Pooling" bottom: "conv2" Top: "Pool2" Pooling_param { Pool:ave kernel_size:3 Stride:2}} layer {name: "Norm2" type: "LRN" bottom: "pool2" Top: "norm 2 "Lrn_param {locAl_size:3 alpha:5e-05 beta:0.75 Norm_region:within_channel}} layer {name: "Conv3" type: "Convolut 
    Ion "bottom:" Norm2 "Top:" Conv3 "Convolution_param {num_output:64 pad:2 kernel_size:5 stride:1 Weight_filler {type: "Gaussian" std:0.01} bias_filler {type: "Constant"}}} Lay  ER {name: "RELU3" type: "ReLU" bottom: "conv3" Top: "conv3"} layer {name: "pool3" type: "Pooling" bottom: "Conv3" Top: "Pool3" Pooling_param {pool:ave kernel_size:3 stride:2}} layer {name: "ip1" Typ 
    E: "Innerproduct" bottom: "pool3" Top: "ip1" param {lr_mult:1 decay_mult:250} param {Lr_mult:2 decay_mult:0} inner_product_param {num_output:10 Weight_filler {type: "Gaussian" std:0. Bias_filler {type: "Constant"}}}} layer {name: "accuracy" type: "Accuracy" bottom: "ip1 "Bottom:" Label "toP: "Accuracy" include {phase:test}} layer {name: "Loss" type: "Softmaxwithloss" bottom: "ip1" bottom:

 "Label" Top: "Loss"}


/////////////////////////////////////////////////////////////////////////////////////////////////////////////// ////
































Contact Us

The content source of this page is from Internet, which doesn't represent Alibaba Cloud's opinion; products and services mentioned on that page don't have any relationship with Alibaba Cloud. If the content of the page makes you feel confusing, please write us an email, we will handle the problem within 5 days after receiving your email.

If you find any instances of plagiarism from the community, please send an email to: info-contact@alibabacloud.com and provide relevant evidence. A staff member will contact you within 5 working days.

A Free Trial That Lets You Build Big!

Start building with 50+ products and up to 12 months usage for Elastic Compute Service

  • Sales Support

    1 on 1 presale consultation

  • After-Sales Support

    24/7 Technical Support 6 Free Tickets per Quarter Faster Response

  • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.