Caffe C + + usage Tutorial

Source: Internet
Author: User

Caffe C + + usage Tutorial

Caffe Using Tutorials

by Shicai Yang (sorcerer under the Stars) on 2015/08/06

Initializing the network
#include "caffe/caffe.hpp"#include <string>#include <vector>using namespace caffe;char *proto = "H:\\Models\\Caffe\\deploy.prototxt"; /* 加载CaffeNet的配置 */Phase phase = TEST; /* or TRAIN */Caffe::set_mode(Caffe::CPU);// Caffe::set_mode(Caffe::GPU);// Caffe::SetDevice(0);//! Note: 后文所有提到的net,都是这个netboost::shared_ptr< Net<float> > net(new caffe::Net<float>(proto, phase));
Load a trained model
char *model = "H:\\Models\\Caffe\\bvlc_reference_caffenet.caffemodel";    net->CopyTrainedLayersFrom(model);
Read image mean value
char *mean_file = "H:\\Models\\Caffe\\imagenet_mean.binaryproto";Blob<float> image_mean;BlobProto blob_proto;const float *mean_ptr;unsigned int num_pixel;bool succeed = ReadProtoFromBinaryFile(mean_file, &blob_proto);if (succeed){    image_mean.FromProto(blob_proto);    num_pixel = image_mean.count(); /* NCHW=1x3x256x256=196608 */    mean_ptr = (const float *) image_mean.cpu_data();}
Forward propagation network According to the specified data
//! Note: data_ptr指向已经处理好(去均值的,符合网络输入图像的长宽和Batch Size)的数据void caffe_forward(boost::shared_ptr< Net<float> > & net, float *data_ptr){    Blob<float>* input_blobs = net->input_blobs()[0];    switch (Caffe::mode())    {    case Caffe::CPU:        memcpy(input_blobs->mutable_cpu_data(), data_ptr,            sizeof(float) * input_blobs->count());        break;    case Caffe::GPU:        cudaMemcpy(input_blobs->mutable_gpu_data(), data_ptr,            sizeof(float) * input_blobs->count(), cudaMemcpyHostToDevice);        break;    default:        LOG(FATAL) << "Unknown Caffe mode.";    }     net->ForwardPrefilled();}
Gets its index in the network based on the name of the feature layer
//! Note: Net的Blob是指,每个层的输出数据,即Feature Maps// char *query_blob_name = "conv1";unsigned int get_blob_index(boost::shared_ptr< Net<float> > & net, char *query_blob_name){    std::string str_query(query_blob_name);        vector< string > const & blob_names = net->blob_names();    for( unsigned int i = 0; i != blob_names.size(); ++i )     {         if( str_query == blob_names[i] )         {             return i;        }     }    LOG(FATAL) << "Unknown blob name: " << str_query;}
Read network specified feature layer data
//! Note: 根据CaffeNet的deploy.prototxt文件,该Net共有15个Blob,从data一直到prob    char *query_blob_name = "conv1"; /* data, conv1, pool1, norm1, fc6, prob, etc */unsigned int blob_id = get_blob_index(net, query_blob_name);boost::shared_ptr<Blob<float> > blob = net->blobs()[blob_id];unsigned int num_data = blob->count(); /* NCHW=10x96x55x55 */const float *blob_ptr = (const float *) blob->cpu_data();
Gets its index in the network based on the name of the layer
//! Note: Layer包括神经网络所有层,比如,CaffeNet共有23层// char *query_layer_name = "conv1";unsigned int get_layer_index(boost::shared_ptr< Net<float> > & net, char *query_layer_name){    std::string str_query(query_layer_name);        vector< string > const & layer_names = net->layer_names();    for( unsigned int i = 0; i != layer_names.size(); ++i )     {         if( str_query == layer_names[i] )         {             return i;        }     }    LOG(FATAL) << "Unknown layer name: " << str_query;}
Reading weight data for a specified layer
//! Note: 不同于Net的Blob是Feature Maps,Layer的Blob是指Conv和FC等层的Weight和Biaschar *query_layer_name = "conv1";const float *weight_ptr, *bias_ptr;unsigned int layer_id = get_layer_index(net, query_layer_name);boost::shared_ptr<Layer<float> > layer = net->layers()[layer_id];std::vector<boost::shared_ptr<Blob<float>  >> blobs = layer->blobs();if (blobs.size() > 0){    weight_ptr = (const float *) blobs[0]->cpu_data();    bias_ptr = (const float *) blobs[1]->cpu_data();}//! Note: 训练模式下,读取指定Layer的梯度数据,与此相似,唯一的区别是将cpu_data改为cpu_diff
Modify weight data for a layer
const float* data_ptr;          /* 指向待写入数据的指针, 源数据指针*/float* weight_ptr = NULL;       /* 指向网络中某层权重的指针,目标数据指针*/unsigned int data_size;         /* 待写入的数据量 */char *layer_name = "conv1";     /* 需要修改的Layer名字 */unsigned int layer_id = get_layer_index(net, query_layer_name);    boost::shared_ptr<Blob<float> > blob = net->layers()[layer_id]->blobs()[0];CHECK(data_size == blob->count());switch (Caffe::mode()){case Caffe::CPU:    weight_ptr = blob->mutable_cpu_data();    break;case Caffe::GPU:    weight_ptr = blob->mutable_gpu_data();    break;default:    LOG(FATAL) << "Unknown Caffe mode";}caffe_copy(blob->count(), data_ptr, weight_ptr);//! Note: 训练模式下,手动修改指定Layer的梯度数据,与此相似// mutable_cpu_data改为mutable_cpu_diff,mutable_gpu_data改为mutable_gpu_diff
Save a new model
char* weights_file = "bvlc_reference_caffenet_new.caffemodel";NetParameter net_param;net->ToProto(&net_param, false);WriteProtoToBinaryFile(net_param, weights_file);

Caffe C + + usage Tutorial

Contact Us

The content source of this page is from Internet, which doesn't represent Alibaba Cloud's opinion; products and services mentioned on that page don't have any relationship with Alibaba Cloud. If the content of the page makes you feel confusing, please write us an email, we will handle the problem within 5 days after receiving your email.

If you find any instances of plagiarism from the community, please send an email to: info-contact@alibabacloud.com and provide relevant evidence. A staff member will contact you within 5 working days.

A Free Trial That Lets You Build Big!

Start building with 50+ products and up to 12 months usage for Elastic Compute Service

  • Sales Support

    1 on 1 presale consultation

  • After-Sales Support

    24/7 Technical Support 6 Free Tickets per Quarter Faster Response

  • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.