Layer层是深度网络中基础单元,了解其源码架构,利于我们编写自定义的caffe层。
通过阅读源码我们可以了解到Layer层的上通下达,不仅是对输入输出明确,还要明确需要重写、覆写的部分是哪些。
#ifndef CAFFE_LAYER_H_
#define CAFFE_LAYER_H_
#include <algorithm>
#include <string>
#include <vector>
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/layer_factory.hpp"
#include "caffe/proto/caffe.pb.h"
#include "caffe/util/math_functions.hpp"
/**
Forward declare boost::thread instead of including boost/thread.hpp
to avoid a boost/NVCC issues (#1009, #1010) on OSX.
*/
namespace boost { class mutex; }
namespace caffe {
template<typename Dtype> class Net;//不是源码,自己定义层时加上的。
/**
* @brief An interface for the units of computation which can be composed into a
* Net.
*
* Layer%s must implement a Forward function, in which they take their input
* (bottom) Blob%s (if any) and compute their output Blob%s (if any).
* They may also implement a Backward function, in which they compute the error
* gradients with respect to their input Blob%s, given the error gradients with
* their output Blob%s.
*/
// Layer必须有一个Forward函数,以bottom blobs作为输入,然后计算出output blobs。
// Layer可以有Backward函数,给output blobs和其误差梯度值,计算出input blobs的误差梯度值。
template <typename Dtype>
class Layer {
public:
/**
* You should not implement your own constructor. Any set up code should go
* to SetUp(), where the dimensions of the bottom blobs are provided to the
* layer.
*/
// 不要用自己的构造函数,所有设置参数的代码都应该写到SetUp函数中,在这个函数中,bottom blobs的维度会提供给这层。
// 下面是显示构造函数,其输入是在caffe.proto中定义的message LayerParameter自动生成的类的别名,并将输入显式赋给layer_param_
// 所以参数应该从protocol buffer文件中解析来的。参数含义可以先到私有部分查看。
explicit Layer(const LayerParameter& param)
: layer_param_(param),parent_net_() {
// Set phase and copy blobs (if there are any).
//phase_设为Train/Test
phase_ = param.phase();
//若blob有参数,就拷贝给blobs_
if (layer_param_.blobs_size() > 0) {
//这里blobs_是存储指向一些blob的shared_ptr的vector,所以就resize一下,有几个再一个一个存。
blobs_.resize(layer_param_.blobs_size());
for (int i = 0; i < layer_param_.blobs_size(); ++i) {
//挨个释放之前shared_ptr指向blob的内存,重新分配内存给新来的blob类数据。
blobs_[i].reset(new Blob<Dtype>());
//调用shared_ptr所指向blob的fromproto函数,其具体可以参考之前的博文
//主要就是从protocol buffer文件中解析blobs参数,包括shape,data,diff
blobs_[i]->FromProto(layer_param_.blobs(i));
}
}
}
virtual ~Layer() {}
/**
* @brief Implements common layer setup functionality.
*
* @param bottom the preshaped input blobs
* @param top
* the allocated but unshaped output blobs, to be shaped by Reshape
*
* Checks that the number of bottom and top blobs is correct.
* Calls LayerSetUp to do special layer setup for individual layer types,
* followed by Reshape to set up sizes of top blobs and internal buffers.
* Sets up the loss weight multiplier blobs for any non-zero loss weights.
* This method may not be overridden.
*/
//函数化4步完成公共层的设置,输入:成型的输入blob,分配好内存空间但未成型的输出blob(reshape来塑造成型)
void SetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
//检查bottom和top blob数量是否满足各种层的具体要求,有要求要覆写对应要求的函数,该要求才会被检查。
CheckBlobCounts(bottom, top);
//具体的层设置,虚函数,要重写具体设置。
LayerSetUp(bottom, top);
//调整bottom和top blob形状,虚函数,要重写具体。
Reshape(bottom, top);
//给具有非零loss权重的top设置loss权重乘子,就是设置该层对最终loss的贡献程度。
SetLossWeights(top);
}
/**
* @brief Does layer-specific setup: your layer should implement this function
* as well as Reshape.
*
* @param bottom
* the preshaped input blobs, whose data fields store the input data for
* this layer
* @param top
* the allocated but unshaped output blobs
*
* This method should do one-time layer specific setup. This includes reading
* and processing relevent parameters from the <code>layer_param_</code>.
* Setting up the shapes of top blobs and internal buffers should be done in
* <code>Reshape</code>, which will be called before the forward pass to
* adjust the top blob sizes.
*/
// 参数bottom也就时输入blobs,其形状是提前设定好的,存储了输入数据
// top:分配好内存空间但未成型的输出blob(reshape来塑造成型)
// 该方法就是做一次层的具体设置。其中包括:
// 从具体层的protobuf layer_param_中读入并处理相关参数
// 还要通过Reshape(不同的层用不同的名字)设置top blobs和内部buffers的形状,
// 在调用Forward函数之前要先调用该函数来调整top blobs的形状
// 该函数是虚的,利用了多态性,不同层有不同的实现方法,需要单独重写
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {}
/**
* @brief Adjust the shapes of top blobs and internal buffers to accommodate
* the shapes of the bottom blobs.
*
* @param bottom the input blobs, with the requested input shapes
* @param top the top blobs, which should be reshaped as needed
*
* This method should reshape top blobs as needed according to the shapes
* of the bottom (input) blobs, as well as reshaping any internal buffers
* and making any other necessary adjustments so that the layer can
* accommodate the bottom blobs.
*/
//根据输入blob的形状,按照需要调整top blobs形状,并且调整内部缓存的形状
//和其他必要的调整,使该层能适应bottom blobs
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) = 0;
/**
* @brief Given the bottom blobs, compute the top blobs and the loss.
*
* @param bottom
* the input blobs, whose data fields store the input data for this layer
* @param top
* the preshaped output blobs, whose data fields will store this layers'
* outputs
* \return The total loss from the layer.
*
* The Forward wrapper calls the relevant device wrapper function
* (Forward_cpu or Forward_gpu) to compute the top blob values given the
* bottom blobs. If the layer has any non-zero loss_weights, the wrapper
* then computes and returns the loss.
*
* Your layer should implement Forward_cpu and (optionally) Forward_gpu.
*/
// 根据输入bottom blobs调用Forward_cpu or Forward_gpu函数,来计算top blobs,
// 如果该层有非零的loss_weights,那么Forward_cpu or Forward_gpu函数会计算该层的loss,并返回loss
inline Dtype Forward(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
/**
* @brief Given the top blob error gradients, compute the bottom blob error
* gradients.
*
* @param top
* the output blobs, whose diff fields store the gradient of the error
* with respect to themselves
* @param propagate_down
* a vector with equal length to bottom, with each index indicating
* whether to propagate the error gradients down to the bottom blob at
* the corresponding index
* @param bottom
* the input blobs, whose diff fields will store the gradient of the error
* with respect to themselves after Backward is run
*
* The Backward wrapper calls the relevant device wrapper function
* (Backward_cpu or Backward_gpu) to compute the bottom blob diffs given the
* top blob diffs.
*
* Your layer should implement Backward_cpu and (optionally) Backward_gpu.
*/
//给top blob的梯度,计算bottom blob的梯度,反向传播
//输入:top 输出blob,它的梯度场存储着关于自身错误的梯度;函数输入
//propagate_down 与bottom相等长度的bool型向量,每个索引表示是否将误差梯度向下传播至bottom blob对应索引处
//bottom 输入blob,在backward运行之后它的梯度场存储着关于自身错误的梯度,这才是函数输出
//自己定义层就完成Backward_cpu 和 Backward_gpu
inline void Backward(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom);
/**
* @brief Returns the vector of learnable parameter blobs.
*/
// 返回可学习参数blobs的vector
vector<shared_ptr<Blob<Dtype> > >& blobs() {
return blobs_;
}
/**
* @brief Returns the layer parameter.
*/
// 返回层的参数,其类型const的LayerParameter,也就时protobuf
const LayerParameter& layer_param() const { return layer_param_; }
/**
* @brief Writes the layer parameter to a protocol buffer
*/
// 将层参数写到protocol buffer中
virtual void ToProto(LayerParameter* param, bool write_diff = false);
/**
* @brief Returns the scalar loss associated with a top blob at a given index.
*/
// 返回关于top blob的给定index的标量loss。如果index超出范围,返回0
inline Dtype loss(const int top_index) const {
return (loss_.size() > top_index) ? loss_[top_index] : Dtype(0);
}
/**
* @brief Sets the loss associated with a top blob at a given index.
*/
// 与上面函数对应,设置相关于top blob的给定index的标量loss,如果超出范围,就在loss_中添加一个元素,并设置为value
inline void set_loss(const int top_index, const Dtype value) {
if (loss_.size() <= top_index) {
loss_.resize(top_index + 1, Dtype(0));
}
loss_[top_index] = value;
}
/**
* @brief Returns the layer type.
*/
// 返回layer的类型
virtual inline const char* type() const { return ""; }
/**
* @brief Returns the exact number of bottom blobs required by the layer,
* or -1 if no exact number is required.
*
* This method should be overridden to return a non-negative value if your
* layer expects some exact number of bottom blobs.
*/
//应该返回该层需要bottom blob的具体数量,需要覆写该层,如果需要有具体数量的输入,不然返回-1是不会被检查这项要求的
virtual inline int ExactNumBottomBlobs() const { return -1; }
/**
* @brief Returns the minimum number of bottom blobs required by the layer,
* or -1 if no minimum number is required.
*
* This method should be overridden to return a non-negative value if your
* layer expects some minimum number of bottom blobs.
*/
virtual inline int MinBottomBlobs() const { return -1; }
/**
* @brief Returns the maximum number of bottom blobs required by the layer,
* or -1 if no maximum number is required.
*
* This method should be overridden to return a non-negative value if your
* layer expects some maximum number of bottom blobs.
*/
virtual inline int MaxBottomBlobs() const { return -1; }
/**
* @brief Returns the exact number of top blobs required by the layer,
* or -1 if no exact number is required.
*
* This method should be overridden to return a non-negative value if your
* layer expects some exact number of top blobs.
*/
//应该返回该层需要top blob的具体数量,需要覆写该层,如果需要有具体数量的输入,不然返回-1是不会被检查这项要求的
virtual inline int ExactNumTopBlobs() const { return -1; }
/**
* @brief Returns the minimum number of top blobs required by the layer,
* or -1 if no minimum number is required.
*
* This method should be overridden to return a non-negative value if your
* layer expects some minimum number of top blobs.
*/
virtual inline int MinTopBlobs() const { return -1; }
/**
* @brief Returns the maximum number of top blobs required by the layer,
* or -1 if no maximum number is required.
*
* This method should be overridden to return a non-negative value if your
* layer expects some maximum number of top blobs.
*/
virtual inline int MaxTopBlobs() const { return -1; }
/**
* @brief Returns true if the layer requires an equal number of bottom and
* top blobs.
*
* This method should be overridden to return true if your layer expects an
* equal number of bottom and top blobs.
*/
//若你的层期望bottom and top blobs数量相等,就重写该层为ture
virtual inline bool EqualNumBottomTopBlobs() const { return false; }
/**
* @brief Return whether "anonymous" top blobs are created automatically
* by the layer.
*
* If this method returns true, Net::Init will create enough "anonymous" top
* blobs to fulfill the requirement specified by ExactNumTopBlobs() or
* MinTopBlobs().
*/
// 返回是否要自动创建“匿名”top blobs,自动根据你的要求(如指定了top blob的数量或者其最小值)创建
// 如果重写该函数为返回true, Net::Init就会自动创建top blobs来满足ExactNumTopBlobs()和MinTopBlobs()的需要
virtual inline bool AutoTopBlobs() const { return false; }
/**
* @brief Return whether to allow force_backward for a given bottom blob
* index.
*
* If AllowForceBackward(i) == false, we will ignore the force_backward
* setting and backpropagate to blob i only if it needs gradient information
* (as is done when force_backward == false).
*/
// 返回是否允许对指定的index的bottom blob做force_backward
// 如果重写AllowForceBackward(i) == false,就忽略force_backward的设置
// 并且只有当它需要梯度信息,才反向传播梯度给指定的index的bottom blob
virtual inline bool AllowForceBackward(const int bottom_index) const {
return true;
}
/**
* @brief Specifies whether the layer should compute gradients w.r.t. a
* parameter at a particular index given by param_id.
*
* You can safely ignore false values and always compute gradients
* for all parameters, but possibly with wasteful computation.
*/
//返回该层指定的index(param_id)的参数是否需要计算梯度的布尔值函数
inline bool param_propagate_down(const int param_id) {
return (param_propagate_down_.size() > param_id) ?
param_propagate_down_[param_id] : false;
}
/**
* @brief Sets whether the layer should compute gradients w.r.t. a
* parameter at a particular index given by param_id.
*/
//设置该层指定的index(param_id)的参数是否需要计算梯度的函数
inline void set_param_propagate_down(const int param_id, const bool value) {
if (param_propagate_down_.size() <= param_id) {
param_propagate_down_.resize(param_id + 1, true);
}
param_propagate_down_[param_id] = value;
}
/**
* @brief get the pointer to the parent network that holds this layer
* (needed by apply_deformation_layer)
*/
inline const Net<Dtype>* parent_net() const {
CHECK_NOTNULL( parent_net_);
return parent_net_;
}
/**
* @brief set the pointer to the parent network that holds this layer
* (needed by apply_deformation_layer)
*/
inline void set_parent_net( const Net<Dtype>* net) {
parent_net_ = net;
}
protected:
/** The protobuf that stores the layer parameters */
// 存储layer参数的protobuf
LayerParameter layer_param_;
/** The phase: TRAIN or TEST */
// 存储训练或测试的阶段参数
Phase phase_;
/** The vector that stores the learnable parameters as a set of blobs. */
// blobs_是一个vector容器,其元素是指向Blob的shared_ptr指针,将可学习的参数存在一组Blob类内
vector<shared_ptr<Blob<Dtype> > > blobs_;
/** Vector indicating whether to compute the diff of each param blob. */
// 是一个存储bool型元素的vector,表明对应的parm blob是否要计算导数
vector<bool> param_propagate_down_;
const Net<Dtype>* parent_net_;
/** The vector that indicates whether each top blob has a non-zero weight in
* the objective function. */
// 表明在目标函数计算中,该层的每个top blob是否有非零的权值,也就是是否参与到目标函数的计算
vector<Dtype> loss_;
/** @brief Using the CPU device, compute the layer output. */
// cpu上的Forward 函数,纯虚函数,在子类中必须要重写
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) = 0;
/**
* @brief Using the GPU device, compute the layer output.
* Fall back to Forward_cpu() if unavailable.
*/
// gpu上的Forward 函数,如果没有重写该函数,用Forward_cpu
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
// LOG(WARNING) << "Using CPU code as backup.";
return Forward_cpu(bottom, top);
}
/**
* @brief Using the CPU device, compute the gradients for any parameters and
* for the bottom blobs if propagate_down is true.
*/
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) = 0;
/**
* @brief Using the GPU device, compute the gradients for any parameters and
* for the bottom blobs if propagate_down is true.
* Fall back to Backward_cpu() if unavailable.
*/
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
// LOG(WARNING) << "Using CPU code as backup.";
Backward_cpu(top, propagate_down, bottom);
}
/**
* Called by the parent Layer's SetUp to check that the number of bottom
* and top Blobs provided as input match the expected numbers specified by
* the {ExactNum,Min,Max}{Bottom,Top}Blobs() functions.
*/
//由setup函数调用检查bottom和top blob数量是否满足各种层的具体要求,有要求要覆写对应要求的函数,该要求才会被检查。
virtual void CheckBlobCounts(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
if (ExactNumBottomBlobs() >= 0) {
CHECK_EQ(ExactNumBottomBlobs(), bottom.size())
<< type() << " Layer takes " << ExactNumBottomBlobs()
<< " bottom blob(s) as input.";
}
if (MinBottomBlobs() >= 0) {
CHECK_LE(MinBottomBlobs(), bottom.size())
<< type() << " Layer takes at least " << MinBottomBlobs()
<< " bottom blob(s) as input.";
}
if (MaxBottomBlobs() >= 0) {
CHECK_GE(MaxBottomBlobs(), bottom.size())
<< type() << " Layer takes at most " << MaxBottomBlobs()
<< " bottom blob(s) as input.";
}
if (ExactNumTopBlobs() >= 0) {
CHECK_EQ(ExactNumTopBlobs(), top.size())
<< type() << " Layer produces " << ExactNumTopBlobs()
<< " top blob(s) as output.";
}
if (MinTopBlobs() >= 0) {
CHECK_LE(MinTopBlobs(), top.size())
<< type() << " Layer produces at least " << MinTopBlobs()
<< " top blob(s) as output.";
}
if (MaxTopBlobs() >= 0) {
CHECK_GE(MaxTopBlobs(), top.size())
<< type() << " Layer produces at most " << MaxTopBlobs()
<< " top blob(s) as output.";
}
if (EqualNumBottomTopBlobs()) {
CHECK_EQ(bottom.size(), top.size())
<< type() << " Layer produces one top blob as output for each "
<< "bottom blob input.";
}
}
/**
* Called by SetUp to initialize the weights associated with any top blobs in
* the loss function. Store non-zero loss weights in the diff blob.
*/
//setup调用来初始化(损失函数计算中所有top blob的)权值。在diff blob存储非零损失权值
inline void SetLossWeights(const vector<Blob<Dtype>*>& top) {
const int num_loss_weights = layer_param_.loss_weight_size();
if (num_loss_weights) {
//检查top的数量是否与num_loss_weights相同
CHECK_EQ(top.size(), num_loss_weights) << "loss_weight must be "
"unspecified or specified once per top blob.";
for (int top_id = 0; top_id < top.size(); ++top_id) {
const Dtype loss_weight = layer_param_.loss_weight(top_id);
if (loss_weight == Dtype(0)) { continue; }
this->set_loss(top_id, loss_weight);
const int count = top[top_id]->count();
Dtype* loss_multiplier = top[top_id]->mutable_cpu_diff();
caffe_set(count, loss_weight, loss_multiplier);
}
}
}
private:
DISABLE_COPY_AND_ASSIGN(Layer);
}; // class Layer
// Forward and backward wrappers. You should implement the cpu and
// gpu specific implementations instead, and should not change these
// functions.
// Forward and backward,具体实现时,定义自己的cpu和gpu版本的,
// 然后会被这两个函数调用,不要修改这两个函数
template <typename Dtype>
inline Dtype Layer<Dtype>::Forward(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
Dtype loss = 0;
Reshape(bottom, top);
switch (Caffe::mode()) {
case Caffe::CPU:
Forward_cpu(bottom, top);
//计算总的loss值,即为所有top blob的所有loss累加
//每个loss是data和loss_weights的乘积
for (int top_id = 0; top_id < top.size(); ++top_id) {
if (!this->loss(top_id)) { continue; }
const int count = top[top_id]->count();
const Dtype* data = top[top_id]->cpu_data();
const Dtype* loss_weights = top[top_id]->cpu_diff();
loss += caffe_cpu_dot(count, data, loss_weights);
}
break;
case Caffe::GPU:
Forward_gpu(bottom, top);
#ifndef CPU_ONLY
for (int top_id = 0; top_id < top.size(); ++top_id) {
if (!this->loss(top_id)) { continue; }
const int count = top[top_id]->count();
const Dtype* data = top[top_id]->gpu_data();
const Dtype* loss_weights = top[top_id]->gpu_diff();
Dtype blob_loss = 0;
caffe_gpu_dot(count, data, loss_weights, &blob_loss);
loss += blob_loss;
}
#endif
break;
default:
LOG(FATAL) << "Unknown caffe mode.";
}
return loss;
}
template <typename Dtype>
inline void Layer<Dtype>::Backward(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
switch (Caffe::mode()) {
case Caffe::CPU:
Backward_cpu(top, propagate_down, bottom);
break;
case Caffe::GPU:
Backward_gpu(top, propagate_down, bottom);
break;
default:
LOG(FATAL) << "Unknown caffe mode.";
}
}
// Serialize LayerParameter to protocol buffer
// 将layer_param_和Blob数据写入protocol buffer中,
// 在message LayerParameter中定义了repeated BlobProto blobs = 7;
// 所以blobs存数值信息,layer_param_存层的一些参数
template <typename Dtype>
void Layer<Dtype>::ToProto(LayerParameter* param, bool write_diff) {
param->Clear();
param->CopyFrom(layer_param_);
param->clear_blobs();
for (int i = 0; i < blobs_.size(); ++i) {
blobs_[i]->ToProto(param->add_blobs(), write_diff);
}
}
} // namespace caffe
#endif // CAFFE_LAYER_H_
通过阅读layer.hpp源码,我们了解到,自定义caffe层需要:
1.根据层的具体特点,重写Setup函数的4步函数。
2.重写Forward_cpu/gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top)
Backward_cpu/gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,const vector<Blob<Dtype>*>& bottom);
3.设置一些成员变量。
目前最新的caffe源码layer.cpp只有一句:
INSTANTIATE_CLASS(Layer);//实例化该类层