Caffe中Layer注册机制

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/xizero00/article/details/50923722

一、LayerRegistry的作用简介


LayerResistry的功能很简单,就是将类和对应的字符串类型放入到一个map当中去,以便灵活调用。主要就是注册类的功能

二、LayerRegistry类的详细介绍


1)构造函数和析构函数

构造函数

 

  
  
  1. // 禁止实例化,因为该类都是静态函数,所以是私有的
  2. LayerRegistry() {}

2)类型定义


  
  
  1. // 函数指针Creator,返回的是Layer<Dtype>类型的指针
  2. typedef shared_ptr<Layer<Dtype> > (*Creator)( const LayerParameter&);
  3. // CreatorRegistry是字符串与对应的Creator的映射
  4. typedef std:: map< string, Creator> CreatorRegistry;

3)成员函数

3-1加入一个Creator到注册表


  
  
  1. // 给定类型,以及函数指针,加入到注册表
  2. static void AddCreator(const string& type, Creator creator) {
  3. CreatorRegistry& registry = Registry();
  4. CHECK_EQ(registry.count(type), 0)
  5. << "Layer type " << type << " already registered.";
  6. registry[type] = creator;
  7. }


3-2给定层的类型,创建层

这个创建层在net.cpp中会用到,在初始化整个网络的时候会根据参数文件中的层的类型去创建该层的实例

  
  
  1. static shared_ptr<Layer<Dtype> > CreateLayer( const LayerParameter& param) {
  2. if (Caffe::root_solver()) {
  3. LOG(INFO) << "Creating layer " << param.name();
  4. }
  5. // 从参数中获得类型字符串
  6. const string& type = param.type();
  7. // 获得注册表指针
  8. CreatorRegistry& registry = Registry();
  9. // 测试是否查找到给定type的Creator
  10. CHECK_EQ(registry.count(type), 1) << "Unknown layer type: " << type
  11. << " (known types: " << LayerTypeListString() << ")";
  12. // 调用对应的层的Creator函数
  13. return registry[type](param);
  14. }

3-3返回层的类型列表


  
  
  1. static vector< string> LayerTypeList() {
  2. // 获得注册表
  3. CreatorRegistry& registry = Registry();
  4. vector< string> layer_types;
  5. // 遍历注册表压入layer_types字符串容器
  6. for ( typename CreatorRegistry::iterator iter = registry.begin();
  7. iter != registry.end(); ++iter) {
  8. layer_types.push_back(iter->first);
  9. }
  10. return layer_types;
  11. }

3-4返回一个string,就是把所有的类型都拼起来用逗号分隔形成一个字符串


  
  
  1. static string LayerTypeListString() {
  2. vector< string> layer_types = LayerTypeList();
  3. string layer_types_str;
  4. for ( vector< string>::iterator iter = layer_types.begin();
  5. iter != layer_types.end(); ++iter) {
  6. if (iter != layer_types.begin()) {
  7. layer_types_str += ", ";
  8. }
  9. layer_types_str += *iter;
  10. }
  11. return layer_types_str;
  12. }
  13. };


3-5 获取注册表(静态的,第一次的时候才new,以后都是直接return的)


  
  
  1. // 产生一个CreatorRegistry映射的的实例赋值给g_registry_
  2. // 表示内部的注册表
  3. // 静态函数,第一次的时候会new然后return,其余时间都是return
  4. static CreatorRegistry& Registry() {
  5. static CreatorRegistry* g_registry_ = new CreatorRegistry();
  6. return *g_registry_;
  7. }


3-6此外还定义了一个层注册器


  
  
  1. // LayerRegisterer
  2. // 自己定义层的注册器
  3. // 以供后面的宏进行使用
  4. template < typename Dtype>
  5. class LayerRegisterer {
  6. public:
  7. // 层的注册器的构造函数
  8. LayerRegisterer( const string& type,
  9. shared_ptr<Layer<Dtype> > (*creator)( const LayerParameter&)) {
  10. // LOG(INFO) << "Registering layer type: " << type;
  11. // 还是调用的层注册表中的加入Creator函数加入注册表
  12. LayerRegistry<Dtype>::AddCreator(type, creator);
  13. }
  14. };

三、其他:

为了方便作者还弄了个宏便于注册自己写的层类


  
  
  1. #define REGISTER_LAYER_CREATOR(type, creator) \
  2. static LayerRegisterer<float> g_creator_f_##type(#type, creator<float>); \
  3. static LayerRegisterer< double> g_creator_d_# #type(#type, creator<double>) \
  4. #define REGISTER_LAYER_CLASS(type) \
  5. template <typename Dtype> \
  6. shared_ptr<Layer<Dtype> > Creator_# #type##Layer(const LayerParameter& param) \
  7. { \
  8. return shared_ptr<Layer<Dtype> >(new type##Layer<Dtype>(param)); \
  9. } \
  10. REGISTER_LAYER_CREATOR(type, Creator_##type##Layer)

下面对该宏进行详细解释:


  
  
  1. // 生成g_creator_f_type(type, creator<Dtype>)的两个函数 (double和float类型)
  2. #define REGISTER_LAYER_CREATOR(type, creator) \
  3. static LayerRegisterer<float> g_creator_f_##type(#type, creator<float>); \
  4. static LayerRegisterer< double> g_creator_d_# #type(#type, creator<double>) \
  5. // 注册自己定义的类,类名为type,
  6. // 假设比如type=bias,那么生成如下的代码
  7. // 下面的函数直接调用你自己的类的构造函数生成一个类的实例并返回
  8. // CreatorbiasLayer(const LayerParameter& param)
  9. // 下面的语句是为你自己的类定义了LayerRegisterer<float>类型的静态变量g_creator_f_biasLayer(float类型,实际上就是把你自己的类的字符串类型和类的实例绑定到注册表)
  10. // static LayerRegisterer<float> g_creator_f_biasLayer(bias, CreatorbiasLayer)
  11. // 下面的语句为你自己的类定义了LayerRegisterer<double>类型的静态变量g_creator_d_biasLayer(double类型,实际上就是把你自己的类的字符串类型和类的实例绑定到注册表)
  12. // static LayerRegisterer<double> g_creator_d_biasLayer(bias, CreatorbiasLayer)
  13. #define REGISTER_LAYER_CLASS(type) \
  14. template <typename Dtype> \
  15. shared_ptr<Layer<Dtype> > Creator_# #type##Layer(const LayerParameter& param) \
  16. { \
  17. return shared_ptr<Layer<Dtype> >(new type##Layer<Dtype>(param)); \
  18. } \
  19. REGISTER_LAYER_CREATOR(type, Creator_##type##Layer)

四、Layer_factory.cpp中的实现

首先给出卷积层的参数

  
  
  1. message ConvolutionParameter {
  2. optional uint32 num_output = 1; // The number of outputs for the layer
  3. optional bool bias_term = 2 [ default = true]; // whether to have bias terms
  4. // Pad, kernel size, and stride are all given as a single value for equal
  5. // dimensions in all spatial dimensions, or once per spatial dimension.
  6. repeated uint32 pad = 3; // The padding size; defaults to 0
  7. repeated uint32 kernel_size = 4; // The kernel size
  8. repeated uint32 stride = 6; // The stride; defaults to 1
  9. // For 2D convolution only, the *_h and *_w versions may also be used to
  10. // specify both spatial dimensions.
  11. optional uint32 pad_h = 9 [ default = 0]; // The padding height (2D only)
  12. optional uint32 pad_w = 10 [ default = 0]; // The padding width (2D only)
  13. optional uint32 kernel_h = 11; // The kernel height (2D only)
  14. optional uint32 kernel_w = 12; // The kernel width (2D only)
  15. optional uint32 stride_h = 13; // The stride height (2D only)
  16. optional uint32 stride_w = 14; // The stride width (2D only)
  17. optional uint32 group = 5 [ default = 1]; // The group size for group conv
  18. optional FillerParameter weight_filler = 7; // The filler for the weight
  19. optional FillerParameter bias_filler = 8; // The filler for the bias
  20. enum Engine {
  21. DEFAULT = 0;
  22. CAFFE = 1;
  23. CUDNN = 2;
  24. }
  25. optional Engine engine = 15 [ default = DEFAULT];
  26. // The axis to interpret as "channels" when performing convolution.
  27. // Preceding dimensions are treated as independent inputs;
  28. // succeeding dimensions are treated as "spatial".
  29. // With (N, C, H, W) inputs, and axis == 1 (the default), we perform
  30. // N independent 2D convolutions, sliding C-channel (or (C/g)-channels, for
  31. // groups g>1) filters across the spatial axes (H, W) of the input.
  32. // With (N, C, D, H, W) inputs, and axis == 1, we perform
  33. // N independent 3D convolutions, sliding (C/g)-channels
  34. // filters across the spatial axes (D, H, W) of the input.
  35. optional int32 axis = 16 [ default = 1];
  36. // Whether to force use of the general ND convolution, even if a specific
  37. // implementation for blobs of the appropriate number of spatial dimensions
  38. // is available. (Currently, there is only a 2D-specific convolution
  39. // implementation; for input blobs with num_axes != 2, this option is
  40. // ignored and the ND implementation will be used.)
  41. optional bool force_nd_im2col = 17 [ default = false];
  42. }

注册卷积层、注册池化层、注册ReLU层注册Tanh层,注册python层(如果开始python绑定的话)
代码如下:

  
  
  1. // Make sure we include Python.h before any system header
  2. // to avoid _POSIX_C_SOURCE redefinition
  3. #ifdef WITH_PYTHON_LAYER
  4. #include <boost/python.hpp>
  5. #endif
  6. #include <string>
  7. #include "caffe/layer.hpp"
  8. #include "caffe/layer_factory.hpp"
  9. #include "caffe/proto/caffe.pb.h"
  10. #include "caffe/vision_layers.hpp"
  11. #ifdef WITH_PYTHON_LAYER
  12. #include "caffe/python_layer.hpp"
  13. #endif
  14. namespace caffe {
  15. // 写一个获取卷积层实例的函数
  16. // Get convolution layer according to engine.
  17. template < typename Dtype>
  18. shared_ptr<Layer<Dtype> > GetConvolutionLayer(
  19. const LayerParameter& param) {
  20. // 从参数中获取是使用什么引擎进行计算CUDNN还是CAFFE还是DEFAULT
  21. // engine可从caffe.proto中看出是枚举类型的
  22. ConvolutionParameter_Engine engine = param.convolution_param().engine();
  23. if (engine == ConvolutionParameter_Engine_DEFAULT) {
  24. engine = ConvolutionParameter_Engine_CAFFE;
  25. #ifdef USE_CUDNN
  26. engine = ConvolutionParameter_Engine_CUDNN;
  27. #endif
  28. }
  29. if (engine == ConvolutionParameter_Engine_CAFFE) {
  30. // 直接初始化Caffe的卷积层
  31. return shared_ptr<Layer<Dtype> >( new ConvolutionLayer<Dtype>(param));
  32. #ifdef USE_CUDNN
  33. } else if (engine == ConvolutionParameter_Engine_CUDNN) {
  34. // 初始化CUDNN的卷积层
  35. return shared_ptr<Layer<Dtype> >( new CuDNNConvolutionLayer<Dtype>(param));
  36. #endif
  37. } else { // 否则就是出错了
  38. LOG(FATAL) << "Layer " << param.name() << " has unknown engine.";
  39. }
  40. }
  41. // 注册该卷积层,类型名为Convolution,获取卷积层的实例为GetConvolutionLayer函数
  42. REGISTER_LAYER_CREATOR(Convolution, GetConvolutionLayer);
  43. // 获取池化层的实例,同卷积层的逻辑
  44. // Get pooling layer according to engine.
  45. template < typename Dtype>
  46. shared_ptr<Layer<Dtype> > GetPoolingLayer( const LayerParameter& param) {
  47. PoolingParameter_Engine engine = param.pooling_param().engine();
  48. if (engine == PoolingParameter_Engine_DEFAULT) {
  49. engine = PoolingParameter_Engine_CAFFE;
  50. #ifdef USE_CUDNN
  51. engine = PoolingParameter_Engine_CUDNN;
  52. #endif
  53. }
  54. if (engine == PoolingParameter_Engine_CAFFE) {
  55. return shared_ptr<Layer<Dtype> >( new PoolingLayer<Dtype>(param));
  56. #ifdef USE_CUDNN
  57. } else if (engine == PoolingParameter_Engine_CUDNN) {
  58. PoolingParameter p_param = param.pooling_param();
  59. if (p_param.pad() || p_param.pad_h() || p_param.pad_w() ||
  60. param.top_size() > 1) {
  61. LOG(INFO) << "CUDNN does not support padding or multiple tops. "
  62. << "Using Caffe's own pooling layer.";
  63. return shared_ptr<Layer<Dtype> >( new PoolingLayer<Dtype>(param));
  64. }
  65. return shared_ptr<Layer<Dtype> >( new CuDNNPoolingLayer<Dtype>(param));
  66. #endif
  67. } else {
  68. LOG(FATAL) << "Layer " << param.name() << " has unknown engine.";
  69. }
  70. }
  71. // 注册池化层
  72. REGISTER_LAYER_CREATOR(Pooling, GetPoolingLayer);
  73. // 注册ReLU层
  74. // Get relu layer according to engine.
  75. template < typename Dtype>
  76. shared_ptr<Layer<Dtype> > GetReLULayer( const LayerParameter& param) {
  77. ReLUParameter_Engine engine = param.relu_param().engine();
  78. if (engine == ReLUParameter_Engine_DEFAULT) {
  79. engine = ReLUParameter_Engine_CAFFE;
  80. #ifdef USE_CUDNN
  81. engine = ReLUParameter_Engine_CUDNN;
  82. #endif
  83. }
  84. if (engine == ReLUParameter_Engine_CAFFE) {
  85. return shared_ptr<Layer<Dtype> >( new ReLULayer<Dtype>(param));
  86. #ifdef USE_CUDNN
  87. } else if (engine == ReLUParameter_Engine_CUDNN) {
  88. return shared_ptr<Layer<Dtype> >( new CuDNNReLULayer<Dtype>(param));
  89. #endif
  90. } else {
  91. LOG(FATAL) << "Layer " << param.name() << " has unknown engine.";
  92. }
  93. }
  94. REGISTER_LAYER_CREATOR(ReLU, GetReLULayer);
  95. // 注册sigmoid层
  96. // Get sigmoid layer according to engine.
  97. template < typename Dtype>
  98. shared_ptr<Layer<Dtype> > GetSigmoidLayer( const LayerParameter& param) {
  99. SigmoidParameter_Engine engine = param.sigmoid_param().engine();
  100. if (engine == SigmoidParameter_Engine_DEFAULT) {
  101. engine = SigmoidParameter_Engine_CAFFE;
  102. #ifdef USE_CUDNN
  103. engine = SigmoidParameter_Engine_CUDNN;
  104. #endif
  105. }
  106. if (engine == SigmoidParameter_Engine_CAFFE) {
  107. return shared_ptr<Layer<Dtype> >( new SigmoidLayer<Dtype>(param));
  108. #ifdef USE_CUDNN
  109. } else if (engine == SigmoidParameter_Engine_CUDNN) {
  110. return shared_ptr<Layer<Dtype> >( new CuDNNSigmoidLayer<Dtype>(param));
  111. #endif
  112. } else {
  113. LOG(FATAL) << "Layer " << param.name() << " has unknown engine.";
  114. }
  115. }
  116. REGISTER_LAYER_CREATOR(Sigmoid, GetSigmoidLayer);
  117. // 注册softmax层
  118. // Get softmax layer according to engine.
  119. template < typename Dtype>
  120. shared_ptr<Layer<Dtype> > GetSoftmaxLayer( const LayerParameter& param) {
  121. SoftmaxParameter_Engine engine = param.softmax_param().engine();
  122. if (engine == SoftmaxParameter_Engine_DEFAULT) {
  123. engine = SoftmaxParameter_Engine_CAFFE;
  124. #ifdef USE_CUDNN
  125. engine = SoftmaxParameter_Engine_CUDNN;
  126. #endif
  127. }
  128. if (engine == SoftmaxParameter_Engine_CAFFE) {
  129. return shared_ptr<Layer<Dtype> >( new SoftmaxLayer<Dtype>(param));
  130. #ifdef USE_CUDNN
  131. } else if (engine == SoftmaxParameter_Engine_CUDNN) {
  132. return shared_ptr<Layer<Dtype> >( new CuDNNSoftmaxLayer<Dtype>(param));
  133. #endif
  134. } else {
  135. LOG(FATAL) << "Layer " << param.name() << " has unknown engine.";
  136. }
  137. }
  138. REGISTER_LAYER_CREATOR(Softmax, GetSoftmaxLayer);
  139. // 注册tanh层
  140. // Get tanh layer according to engine.
  141. template < typename Dtype>
  142. shared_ptr<Layer<Dtype> > GetTanHLayer( const LayerParameter& param) {
  143. TanHParameter_Engine engine = param.tanh_param().engine();
  144. if (engine == TanHParameter_Engine_DEFAULT) {
  145. engine = TanHParameter_Engine_CAFFE;
  146. #ifdef USE_CUDNN
  147. engine = TanHParameter_Engine_CUDNN;
  148. #endif
  149. }
  150. if (engine == TanHParameter_Engine_CAFFE) {
  151. return shared_ptr<Layer<Dtype> >( new TanHLayer<Dtype>(param));
  152. #ifdef USE_CUDNN
  153. } else if (engine == TanHParameter_Engine_CUDNN) {
  154. return shared_ptr<Layer<Dtype> >( new CuDNNTanHLayer<Dtype>(param));
  155. #endif
  156. } else {
  157. LOG(FATAL) << "Layer " << param.name() << " has unknown engine.";
  158. }
  159. }
  160. REGISTER_LAYER_CREATOR(TanH, GetTanHLayer);
  161. // 注册PYTHON层
  162. #ifdef WITH_PYTHON_LAYER
  163. template < typename Dtype>
  164. shared_ptr<Layer<Dtype> > GetPythonLayer( const LayerParameter& param) {
  165. Py_Initialize();
  166. try {
  167. bp::object module = bp:: import(param.python_param(). module().c_str());
  168. bp::object layer = module.attr(param.python_param().layer().c_str())(param);
  169. return bp::extract< shared_ptr<PythonLayer<Dtype> > >(layer)();
  170. } catch (bp::error_already_set) {
  171. PyErr_Print();
  172. throw;
  173. }
  174. }
  175. REGISTER_LAYER_CREATOR(Python, GetPythonLayer);
  176. #endif
  177. // Layers that use their constructor as their default creator should be
  178. // registered in their corresponding cpp files. Do not register them here.
  179. } // namespace caffe

五、总结

作者还真是煞费苦心,弄了个宏,一下子就注册了类。

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值