caffe 提取特征caffe接口

//CaffeExFeat.h
#ifndef CAFFEEXFEAT_H 
#define CAFFEEXFEAT_H
 
#include "caffe/caffe.hpp"
#include <string>
#include <vector>
#include "opencv2/opencv.hpp"
 https://blog.csdn.net/AP1005834/article/details/79820947
 
using namespace caffe;
 
class CaffeExFeat
{
    public:
         explicit CaffeExFeat(std::string proto,std::string model,char* nameLayer,std::string meanFile,float scale=-1);
         explicit CaffeExFeat(std::string proto,std::string model,char* nameLayer,float v1=0.0,float v2=0.0,float v3=0.0,float scale=-1);
         ~CaffeExFeat();
 
        double*extractFeat(const cv::Mat& img);
        double calSimilarity(const cv::Mat& img1 ,const cv::Mat& img2);
    private:
        unsigned int blob_id_;
        boost::shared_ptr< Net<float> > net_;
        cv::Size input_geometry_;
        int num_channels_;
        cv::Mat mean_;
        Blob<float>* input_blobs_;
        unsigned int featNum_;
        float scale_;
 
        void init(std::string proto,std::string model,float scale);
        void getMeanData(std::string mean_file);
        void getMeanData(float v1,float v2,float v3 );
        unsigned int get_blob_index( char *query_blob_name);
        void wrapInputLayer(std::vector<cv::Mat>* input_channels);
        void preprocess(const cv::Mat& img,std::vector<cv::Mat>* input_channels);
 
};
 
#endif
//CaffeExFeat.cpp
#include "CaffeExFeat.h"
 
CaffeExFeat::CaffeExFeat(std::string proto,std::string model, char* nLayer,std::string meanFile,
    float scale)
{
    init(proto,model,scale);
    getMeanData(meanFile);
    blob_id_ = get_blob_index(nLayer);
}
 
CaffeExFeat::CaffeExFeat(std::string proto,std::string model, char* nLayer,float v1,float v2,float v3,
    float scale)
{
    init(proto,model,scale);
    getMeanData(v1,v2,v3);
    blob_id_ = get_blob_index(nLayer);
}
 
void CaffeExFeat::init(std::string proto,std::string model,float scale)
{
    scale_=scale;
    Phase phase = TEST;
    Caffe::set_mode(Caffe::CPU);
    net_ = boost::shared_ptr< Net<float> >(new caffe::Net<float>(proto, phase));
    net_->CopyTrainedLayersFrom(model);
 
    input_blobs_ = net_->input_blobs()[0];
    num_channels_ = input_blobs_->channels();
    input_geometry_ = cv::Size(input_blobs_->width(),input_blobs_->height());
    input_blobs_->Reshape(1, num_channels_,input_geometry_.height, input_geometry_.width); //难道可输入多张图
    
    net_->Reshape();  //维度改变
    
}
 
CaffeExFeat::~CaffeExFeat()
{
    
}
 
double* CaffeExFeat::extractFeat(const cv::Mat& img)
{
    std::vector<cv::Mat> input_channels;
    wrapInputLayer(&input_channels);
    preprocess(img, &input_channels);
 
    net_->ForwardPrefilled();
    boost::shared_ptr<Blob<float> > featBlob = net_->blobs()[blob_id_];
    featNum_ = featBlob->count();
    const float *featData = (const float *) featBlob->cpu_data();
    
    double* out = new double[featNum_];
    for(int k=0;k<featNum_;++k) out[k]=featData[k];
 
    return out;
}
 
double CaffeExFeat::calSimilarity(const cv::Mat& img1 ,const cv::Mat& img2)
{
    double* feat_1 = extractFeat(img1);
    double* feat_2 = extractFeat(img2);
 
    double sim = cblas_ddot(featNum_,feat_1,1,feat_2,1)/(std::sqrt(cblas_ddot(featNum_,feat_1,1,feat_1,1))*std::sqrt(cblas_ddot(featNum_,feat_2,1,feat_2,1)));
    delete []feat_1;
    delete []feat_2;
 
    return sim;
}
 
void CaffeExFeat::getMeanData(std::string mean_file)
{
  BlobProto blob_proto;  
  ReadProtoFromBinaryFileOrDie(mean_file.c_str(), &blob_proto);  
  
  /* Convert from BlobProto to Blob<float> */  
  Blob<float> mean_blob;  
  mean_blob.FromProto(blob_proto);  
  
  /* The format of the mean file is planar 32-bit float BGR or grayscale. */  
  std::vector<cv::Mat> channels;  
  float* data = mean_blob.mutable_cpu_data();  
  for (int i = 0; i < num_channels_; ++i) {  
    /* Extract an individual channel. */  
    cv::Mat channel(mean_blob.height(), mean_blob.width(), CV_32FC1, data);  
    channels.push_back(channel);  
    data += mean_blob.height() * mean_blob.width();  
  }  
  
  /* Merge the separate channels into a single image. */  
  cv::Mat mean;  
  cv::merge(channels, mean);  
  
  /* Compute the global mean pixel value and create a mean image 
   * filled with this value. */  
  cv::Scalar channel_mean = cv::mean(mean);  
  mean_ = cv::Mat(input_geometry_, mean.type(), channel_mean);  
}
 
void CaffeExFeat::getMeanData(float v1,float v2, float v3)
{
    cv::Scalar channel_mean(v1,v2,v3);
    mean_ = cv::Mat(input_geometry_,CV_32FC3,channel_mean );
}
 
unsigned int CaffeExFeat::get_blob_index( char *query_blob_name)
{
    std::string str_query(query_blob_name);    
    vector< string > const & blob_names = net_->blob_names();
    for( unsigned int i = 0; i != blob_names.size(); ++i ) 
    { 
        if( str_query == blob_names[i] ) 
        { 
            return i;
        } 
    }
    LOG(FATAL) << "Unknown blob name: " << str_query;
}
 
void CaffeExFeat::wrapInputLayer(std::vector<cv::Mat>* input_channels) {  
  Blob<float>* input_layer = net_->input_blobs()[0];  
  
  int width = input_layer->width();  
  int height = input_layer->height();  
  float* input_data = input_layer->mutable_cpu_data();  
  for (int i = 0; i < input_layer->channels(); ++i) {  
    cv::Mat channel(height, width, CV_32FC1, input_data);  
    input_channels->push_back(channel);  
    input_data += width * height;  
  }  
}
 
void CaffeExFeat::preprocess(const cv::Mat& img,std::vector<cv::Mat>* input_channels)
{
  cv::Mat sample;  
  if (img.channels() == 3 && num_channels_ == 1)  
    cv::cvtColor(img, sample, CV_BGR2GRAY);  
  else if (img.channels() == 4 && num_channels_ == 1)  
    cv::cvtColor(img, sample, CV_BGRA2GRAY);  
  else if (img.channels() == 4 && num_channels_ == 3)  
    cv::cvtColor(img, sample, CV_BGRA2BGR);  
  else if (img.channels() == 1 && num_channels_ == 3)  
    cv::cvtColor(img, sample, CV_GRAY2BGR);  
  else  
    sample = img; 
 
  cv::Mat sample_resized;
  if (sample.size() != input_geometry_)  
    cv::resize(sample, sample_resized, input_geometry_);  
  else  
    sample_resized = sample;
 
  cv::Mat sample_float;
  if (num_channels_ == 3)  
    sample_resized.convertTo(sample_float, CV_32FC3);  
  else  
    sample_resized.convertTo(sample_float, CV_32FC1);
 
  cv::Mat sample_normalized;  
  cv::subtract(sample_float, mean_, sample_normalized);
  
  if(scale_!=-1){
      cv::multiply(scale_ ,sample_normalized,sample_normalized);  
  }
  
  cv::split(sample_normalized, *input_channels);
}

 

 

参考<深度学习21天实战caffe>,P136,所用到的boost需要是boot_1_58_0版本。

编写文件 net_demo.cpp,并保存在/home/sf/demo下:

#include <vector>
#include <iostream>
#include "caffe/net.hpp"
using namespace caffe;
using namespace std;


int main(void)
{
    std::string proto("deploy.prototxt");
    Net<float> nn(proto,caffe::TEST);
    vector<string> bn=nn.blob_names();
    for(int i=0;i<bn.size();i++)
    {
        cout<<"Blob #"<<i<<" : "<<bn[i]<<endl;


    }


return 0;


}

sf@ub
--------------------- 
作者:woneil 
来源:CSDN 
原文:https://blog.csdn.net/ahbbshenfeng/article/details/52077605 
版权声明:本文为博主原创文章,转载请附上博文链接!

 

Net在Caffe中代表一个完整的CNN模型,它包含若干个Layer实例。前面看到的各类prototxt的经典网络结构如LeNet、AlexNet等都是Caffe代码实现的一个Net对象。

1、Net基本用法
#include <caffe\net.hpp>

在main()中添加

std::string proto("deploy.prototxt");
Net<float> nn(proto, caffe::TEST);
vector<string> bn = nn.blob_names();  //获取Net中所有blob对象名
vector<string> ln = nn.layer_names();
for (int i = 0; i < bn.size(); i++)
{
    cout << "Blob #" << i << ":" << bn[i] << endl;
}
for (int j = 0; j < ln.size(); j++)
{
    cout << "Layer #" << j << ":" << ln[j] << endl;
}
运行结果如下:
F0103 13:36:44.474290  5904 cudnn_conv_layer.cpp:53] Check failed: status == CUDNN_STATUS_SUCCESS (6 vs. 0)  CUDNN_STATUS_ARCH_MISMATCH
*** Check failure stack trace: ***
由于我的GPU硬件不支持cuda 3.0以上版本,关闭掉预处理器中的USE_CUDNN(修改CommonSettings.props文件相应选项)。可以看到结果如下:

I0103 13:54:55.282647 13792 net.cpp:283] Network initialization done.
Blob #0:data
Blob #1:conv1
Blob #2:pool1
Blob #3:norm1
Blob #4:conv2
Blob #5:pool2
Blob #6:norm2
Blob #7:conv3
Blob #8:conv4
Blob #9:conv5
Blob #10:pool5
Blob #11:fc6
Blob #12:fc7
Blob #13:fc8
Blob #14:prob
Layer #0:data
Layer #1:conv1
Layer #2:relu1
Layer #3:pool1
Layer #4:norm1
Layer #5:conv2
Layer #6:relu2
Layer #7:pool2
Layer #8:norm2
Layer #9:conv3
Layer #10:relu3
Layer #11:conv4
Layer #12:relu4
Layer #13:conv5
Layer #14:relu5
Layer #15:pool5
Layer #16:fc6
Layer #17:relu6
Layer #18:drop6
Layer #19:fc7
Layer #20:relu7
Layer #21:drop7
Layer #22:fc8
Layer #23:prob
--------------------- 
作者:阿尔法旺旺 
来源:CSDN 
原文:https://blog.csdn.net/yingwei13mei/article/details/53997389 
版权声明:本文为博主原创文章,转载请附上博文链接!

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值