How to Use Caffe in a program for image classification and caffe image classification

Source: Internet
Author: User

How to Use Caffe in a program for image classification and caffe image classification

Caffe is an open-source library with excellent deep learning capabilities. It samples c ++ and CUDA implementations and has the advantages of fast speed and convenient model definition. After studying for a few days, I found that there is also an inconvenient point, that is, there is no direct interface to Call Caffe in my program for image classification. Caffe's data layer can be read from databases (including leveldb, lmdb, and hdf5), images, and memory. We need to use it in the program. Of course, we need to read it from the memory. For more information, see:

layers {  name: "mydata"  type: MEMORY_DATA  top: "data"  top: "label"  transform_param {    scale: 0.00390625  }  memory_data_param {    batch_size: 10    channels: 1    height: 24    width: 24  }}

Here, four parameters in memory_data_param must be set. For these parameters, see the caffe. proto file in the source code. Now, we can design a Classifier class to encapsulate it:

#ifndef CAFFE_CLASSIFIER_H#define CAFFE_CLASSIFIER_H#include <string>#include <vector>#include "caffe/net.hpp"#include "caffe/data_layers.hpp"#include <opencv2/core.hpp>using cv::Mat;namespace caffe {template <typename Dtype>class Classifier { public:  explicit Classifier(const string& param_file, const string& weights_file);  Dtype test(vector<Mat> &images, vector<int> &labels, int iter_num);  virtual ~Classifier() {}  inline shared_ptr<Net<Dtype> > net() { return net_; }  void predict(vector<Mat> &images, vector<int> *labels);  void predict(vector<Dtype> &data, vector<int> *labels, int num);  void extract_feature(vector<Mat> &images, vector<vector<Dtype>> *out); protected:  shared_ptr<Net<Dtype> > net_;  MemoryDataLayer<Dtype> *m_layer_;  int batch_size_;  int channels_;  int height_;  int width_;   DISABLE_COPY_AND_ASSIGN(Classifier);};}//namespace #endif //CAFFE_CLASSIFIER_H

In the constructor, we use the model definition file (. prototxt) and trained models (. caffemodel) file to construct a Net object, and use m_layer _ to point to the memory data layer in Net, so that the AddMatVector and Reset functions in MemoryDataLayer will be called to add data.

#include <cstdio>#include <algorithm>#include <string>#include <vector>#include "caffe/net.hpp"#include "caffe/proto/caffe.pb.h"#include "caffe/util/io.hpp"#include "caffe/util/math_functions.hpp"#include "caffe/util/upgrade_proto.hpp"#include "caffe_classifier.h"namespace caffe {template <typename Dtype>Classifier<Dtype>::Classifier(const string& param_file, const string& weights_file) : net_(){  net_.reset(new Net<Dtype>(param_file, TEST));  net_->CopyTrainedLayersFrom(weights_file);  //m_layer_ = (MemoryDataLayer<Dtype>*)net_->layer_by_name("mnist").get();  m_layer_ = (MemoryDataLayer<Dtype>*)net_->layers()[0].get();  batch_size_ = m_layer_->batch_size();  channels_ = m_layer_->channels();  height_ = m_layer_->height();  width_ = m_layer_->width();}template <typename Dtype>Dtype Classifier<Dtype>::test(vector<Mat> &images, vector<int> &labels, int iter_num){    m_layer_->AddMatVector(images, labels);    //    int iterations = iter_num;    vector<Blob<Dtype>* > bottom_vec;  vector<int> test_score_output_id;  vector<Dtype> test_score;  Dtype loss = 0;  for (int i = 0; i < iterations; ++i) {    Dtype iter_loss;    const vector<Blob<Dtype>*>& result =        net_->Forward(bottom_vec, &iter_loss);    loss += iter_loss;    int idx = 0;    for (int j = 0; j < result.size(); ++j) {      const Dtype* result_vec = result[j]->cpu_data();      for (int k = 0; k < result[j]->count(); ++k, ++idx) {        const Dtype score = result_vec[k];        if (i == 0) {          test_score.push_back(score);          test_score_output_id.push_back(j);        } else {          test_score[idx] += score;        }        const std::string& output_name = net_->blob_names()[            net_->output_blob_indices()[j]];        LOG(INFO) << "Batch " << i << ", " << output_name << " = " << score;      }    }  }  loss /= iterations;  LOG(INFO) << "Loss: " << loss;  return loss;}template <typename Dtype>void Classifier<Dtype>::predict(vector<Mat> &images, vector<int> *labels){    int original_length = images.size();    if(original_length == 0)        return;    int valid_length = original_length / batch_size_ * batch_size_;    if(original_length != valid_length)    {        valid_length += batch_size_;        for(int i = original_length; i < valid_length; i++)        {            images.push_back(images[0].clone());        }    }    vector<int> valid_labels, predicted_labels;    valid_labels.resize(valid_length, 0);    m_layer_->AddMatVector(images, valid_labels);    vector<Blob<Dtype>* > bottom_vec;    for(int i = 0; i < valid_length / batch_size_; i++)    {        const vector<Blob<Dtype>*>& result = net_->Forward(bottom_vec);        const Dtype * result_vec = result[1]->cpu_data();        for(int j = 0; j < result[1]->count(); j++)        {            predicted_labels.push_back(result_vec[j]);        }    }    if(original_length != valid_length)    {        images.erase(images.begin()+original_length, images.end());    }    labels->resize(original_length, 0);    std::copy(predicted_labels.begin(), predicted_labels.begin() + original_length, labels->begin());}template <typename Dtype>void Classifier<Dtype>::predict(vector<Dtype> &data, vector<int> *labels, int num){    int size = channels_*height_*width_;    CHECK_EQ(data.size(), num*size);    int original_length = num;    if(original_length == 0)        return;    int valid_length = original_length / batch_size_ * batch_size_;    if(original_length != valid_length)    {        valid_length += batch_size_;        for(int i = original_length; i < valid_length; i++)        {            for(int j = 0; j < size; j++)                data.push_back(0);        }    }    vector<int> predicted_labels;    Dtype * label_ = new Dtype[valid_length];    memset(label_, 0, valid_length);    m_layer_->Reset(data.data(), label_, valid_length);    vector<Blob<Dtype>* > bottom_vec;    for(int i = 0; i < valid_length / batch_size_; i++)    {        const vector<Blob<Dtype>*>& result = net_->Forward(bottom_vec);        const Dtype * result_vec = result[1]->cpu_data();        for(int j = 0; j < result[1]->count(); j++)        {            predicted_labels.push_back(result_vec[j]);        }    }    if(original_length != valid_length)    {        data.erase(data.begin()+original_length*size, data.end());    }    delete [] label_;    labels->resize(original_length, 0);    std::copy(predicted_labels.begin(), predicted_labels.begin() + original_length, labels->begin());}template <typename Dtype>void Classifier<Dtype>::extract_feature(vector<Mat> &images, vector<vector<Dtype>> *out){    int original_length = images.size();    if(original_length == 0)        return;    int valid_length = original_length / batch_size_ * batch_size_;    if(original_length != valid_length)    {        valid_length += batch_size_;        for(int i = original_length; i < valid_length; i++)        {            images.push_back(images[0].clone());        }    }    vector<int> valid_labels;    valid_labels.resize(valid_length, 0);    m_layer_->AddMatVector(images, valid_labels);    vector<Blob<Dtype>* > bottom_vec;    out->clear();    for(int i = 0; i < valid_length / batch_size_; i++)    {        const vector<Blob<Dtype>*>& result = net_->Forward(bottom_vec);        const Dtype * result_vec = result[0]->cpu_data();        const int dim = result[0]->count(1);        for(int j = 0; j < result[0]->num(); j++)        {            const Dtype * ptr = result_vec + j * dim;            vector<Dtype> one_;            for(int k = 0; k < dim; ++k)                one_.push_back(ptr[k]);            out->push_back(one_);        }    }    if(original_length != valid_length)    {        images.erase(images.begin()+original_length, images.end());        out->erase(out->begin()+original_length, out->end());    }}INSTANTIATE_CLASS(Classifier);}  // namespace caffe

Since the number of added data must be an integer multiple of batch_size, we use the fill method when adding data.

CHECK_EQ(num % batch_size_, 0) <<      "The added data must be a multiple of the batch size.";  //AddMatVector

At the end of the model file, we changed the loss layer during training to the argmax layer:

layers {  name: "predicted"  type: ARGMAX  bottom: "prob"  top: "predicted"}
Author: waring Source: http://www.cnblogs.com/waring welcome to reprint or share, but please be sure to declare the source of the article.

Contact Us

The content source of this page is from Internet, which doesn't represent Alibaba Cloud's opinion; products and services mentioned on that page don't have any relationship with Alibaba Cloud. If the content of the page makes you feel confusing, please write us an email, we will handle the problem within 5 days after receiving your email.

If you find any instances of plagiarism from the community, please send an email to: info-contact@alibabacloud.com and provide relevant evidence. A staff member will contact you within 5 working days.

A Free Trial That Lets You Build Big!

Start building with 50+ products and up to 12 months usage for Elastic Compute Service

  • Sales Support

    1 on 1 presale consultation

  • After-Sales Support

    24/7 Technical Support 6 Free Tickets per Quarter Faster Response

  • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.