caffe data层_caffe 中data 有几种类型

展开全部

Caffe是目前深度学习比较优秀好用的一个开源库,采样c++和CUDA实现,具有速度快,模型定义32313133353236313431303231363533e78988e69d8331333363376463方便等优点。学习了几天过后,发现也有一个不方便的地方,就是在我的程序中调用Caffe做图像分类没有直接的接口。Caffe的数据层可以从数据库(支持leveldb、lmdb、hdf5)、图片、和内存中读入。我们要在程序中使用,当然得从内存中读入,我们首先在模型定义文件中定义数据层:

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250layers { name: "mydata" type: MEMORY_DATA top: "data" top: "label" transform_param { scale: 0.00390625 } memory_data_param { batch_size: 10 channels: 1 height: 24 width: 24 }} 这里必须设置memory_data_param中的四个参数,对应这些参数可以参见源码中caffe.proto文件。现在,我们可以设计一个Classifier类来封装一下:#ifndef CAFFE_CLASSIFIER_H#define CAFFE_CLASSIFIER_H #include #include #include "caffe/net.hpp"#include "caffe/data_layers.hpp"#include using cv::Mat; namespace caffe { template class Classifier {public: explicit Classifier(const string& param_file, const string& weights_file); Dtype test(vector &images, vector &labels, int iter_num); virtual ~Classifier() {} inline shared_ptr > net() { return net_; } void predict(vector &images, vector *labels); void predict(vector &data, vector *labels, int num); void extract_feature(vector &images, vector> *out); protected: shared_ptr > net_; MemoryDataLayer *m_layer_; int batch_size_; int channels_; int height_; int width_; DISABLE_COPY_AND_ASSIGN(Classifier);};}//namespace #endif //CAFFE_CLASSIFIER_H 构造函数中我们通过模型定义文件(.prototxt)和训练好的模型(.caffemodel)文件构造一个Net对象,并用m_layer_指向Net中的memory data层,以便待会调用MemoryDataLayer中AddMatVector和Reset函数加入数据。#include #include #include #include #include "caffe/net.hpp"#include "caffe/proto/caffe.pb.h"#include "caffe/util/io.hpp"#include "caffe/util/math_functions.hpp"#include "caffe/util/upgrade_proto.hpp"#include "caffe_classifier.h" namespace caffe { template Classifier::Classifier(const string& param_file, const string& weights_file) : net_(){ net_.reset(new Net(param_file, TEST)); net_->CopyTrainedLayersFrom(weights_file); //m_layer_ = (MemoryDataLayer*)net_->layer_by_name("mnist").get(); m_layer_ = (MemoryDataLayer*)net_->layers()[0].get(); batch_size_ = m_layer_->batch_size(); channels_ = m_layer_->channels(); height_ = m_layer_->height(); width_ = m_layer_->width();} template Dtype Classifier::test(vector &images, vector &labels, int iter_num){ m_layer_->AddMatVector(images, labels); // int iterations = iter_num; vector* > bottom_vec; vector test_score_output_id; vector test_score; Dtype loss = 0; for (int i = 0; i < iterations; ++i) { Dtype iter_loss; const vector*>& result = net_->Forward(bottom_vec, &iter_loss); loss += iter_loss; int idx = 0; for (int j = 0; j < result.size(); ++j) { const Dtype* result_vec = result[j]->cpu_data(); for (int k = 0; k < result[j]->count(); ++k, ++idx) { const Dtype score = result_vec[k]; if (i == 0) { test_score.push_back(score); test_score_output_id.push_back(j); } else { test_score[idx] += score; } const std::string& output_name = net_->blob_names()[ net_->output_blob_indices()[j]]; LOG(INFO) << "Batch " << i << ", " << output_name << " = " << score; } } } loss /= iterations; LOG(INFO) << "Loss: " << loss; return loss;} template void Classifier::predict(vector &images, vector *labels){ int original_length = images.size(); if(original_length == 0) return; int valid_length = original_length / batch_size_ * batch_size_; if(original_length != valid_length) { valid_length += batch_size_; for(int i = original_length; i < valid_length; i++) { images.push_back(images[0].clone()); } } vector valid_labels, predicted_labels; valid_labels.resize(valid_length, 0); m_layer_->AddMatVector(images, valid_labels); vector* > bottom_vec; for(int i = 0; i < valid_length / batch_size_; i++) { const vector*>& result = net_->Forward(bottom_vec); const Dtype * result_vec = result[1]->cpu_data(); for(int j = 0; j < result[1]->count(); j++) { predicted_labels.push_back(result_vec[j]); } } if(original_length != valid_length) { images.erase(images.begin()+original_length, images.end()); } labels->resize(original_length, 0); std::copy(predicted_labels.begin(), predicted_labels.begin() + original_length, labels->begin());} template void Classifier::predict(vector &data, vector *labels, int num){ int size = channels_*height_*width_; CHECK_EQ(data.size(), num*size); int original_length = num; if(original_length == 0) return; int valid_length = original_length / batch_size_ * batch_size_; if(original_length != valid_length) { valid_length += batch_size_; for(int i = original_length; i < valid_length; i++) { for(int j = 0; j < size; j++) data.push_back(0); } } vector predicted_labels; Dtype * label_ = new Dtype[valid_length]; memset(label_, 0, valid_length); m_layer_->Reset(data.data(), label_, valid_length); vector* > bottom_vec; for(int i = 0; i < valid_length / batch_size_; i++) { const vector*>& result = net_->Forward(bottom_vec); const Dtype * result_vec = result[1]->cpu_data(); for(int j = 0; j < result[1]->count(); j++) { predicted_labels.push_back(result_vec[j]); } } if(original_length != valid_length) { data.erase(data.begin()+original_length*size, data.end()); } delete [] label_; labels->resize(original_length, 0); std::copy(predicted_labels.begin(), predicted_labels.begin() + original_length, labels->begin());}template void Classifier::extract_feature(vector &images, vector> *out){ int original_length = images.size(); if(original_length == 0) return; int valid_length = original_length / batch_size_ * batch_size_; if(original_length != valid_length) { valid_length += batch_size_; for(int i = original_length; i < valid_length; i++) { images.push_back(images[0].clone()); } } vector valid_labels; valid_labels.resize(valid_length, 0); m_layer_->AddMatVector(images, valid_labels); vector* > bottom_vec; out->clear(); for(int i = 0; i < valid_length / batch_size_; i++) { const vector*>& result = net_->Forward(bottom_vec); const Dtype * result_vec = result[0]->cpu_data(); const int dim = result[0]->count(1); for(int j = 0; j < result[0]->num(); j++) { const Dtype * ptr = result_vec + j * dim; vector one_; for(int k = 0; k < dim; ++k) one_.push_back(ptr[k]); out->push_back(one_); } } if(original_length != valid_length) { images.erase(images.begin()+original_length, images.end()); out->erase(out->begin()+original_length, out->end()); }}INSTANTIATE_CLASS(Classifier);} // namespace caffe 由于加入的数据个数必须是batch_size的整数倍,所以我们在加入数据时采用填充的方式。CHECK_EQ(num % batch_size_, 0) << "The added data must be a multiple of the batch size."; //AddMatVector 在模型文件的最后,我们把训练时的loss层改为argmax层:layers { name: "predicted" type: ARGMAX bottom: "prob" top: "predicted"}

已赞过

已踩过<

你对这个回答的评价是?

评论

收起

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值