1、outline
Layer是所有层的父类,其中主要定义一些共有的变量,函数。
LayerParameter layer_param; //层参数
Phase phase; //层的属性,Train or Test
Vector<shared_ptr<Blob<Dtype> > > blobs_; //用于存储层参数,如卷积层的w,b
vector<bool> param_propagate_down_; //同blobs_联合使用,指定对应的参数需不需要求导数;
vector<Dtype> loss_; //输出blob计算loss对应的weight
bool is_shared_; //层被多个网络共享的标志
shared_ptr<boost::mutex> forward_mutex_; //层被共享时的互斥锁变量
void InitMutex; //锁初始化
void Lock(); //上锁
void Unlock(); //解锁
important function:
Layer(param); //层的初始化
SetUp(bottom, top); //层前向反向传递前的准备工作
Forward(bottom, top); //前向传递
Backward(bottom, top); //反向传递
下面是详细的源代码:
template <typename Dtype>
class Layer {
public:
/**
* You should not implement your own constructor. Any set up code should go
* to SetUp(), where the dimensions of the bottom blobs are provided to the
* layer.
*/
explicit Layer(const LayerParameter& param): layer_param_(param), is_shared_(false)
{
// Set phase and copy blobs (if there are any).
phase_ = param.phase(); //训练阶段的标志
if (layer_param_.blobs_size() > 0)
{
//blobs_存储的是参数,循环构建参数
blobs_.resize(layer_param_.blobs_size());
for (int i = 0; i < layer_param_.blobs_size(); ++i)
{
blobs_[i].reset(new Blob<Dtype>());
blobs_[i]->FromProto(layer_param_.blobs(i));
}
}
}
virtual ~Layer() {}
/**************************************************************
Function: SetUp
Description: 具体每层实现不一样,主要是输出blob的reshape
Calls: 1、InitMutex
2、CheckBlobCounts
3、Reshape
4、SetLossWeights
Called By:
Input: 1、bottom:输入的blob
2、top:输出的blob
Return: 无
***************************************************************/
void SetUp(const vector<Blob<Dtype>*>& bottom,const vector<Blob<Dtype>*>& top)
{
//初始化锁
InitMutex();
//检测输入和输出是否一致
CheckBlobCounts(bottom, top);
//层的建立,继承者实现
LayerSetUp(bottom, top);
//分配内存,继承者实现
Reshape(bottom, top);
//设置loss的权重
SetLossWeights(top);
}
/**************************************************************
Function: LayerSetUp
Description: 具体每层实现不一样,主要是输出blob的reshape
Calls: 无
Called By: 1、SetUp
Input: 1、bottom:输入的blob
2、top:输出的blob
Return: 无
***************************************************************/
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,const vector<Blob<Dtype>*>& top) {}
//并行共享标志,默认只有数据层能够并行共享
virtual inline bool ShareInParallel() const { return false; }
//层是否被其他net共享标志
inline bool IsShared() const { return is_shared_; }
//设置共享
inline void SetShared(bool is_shared)
{
CHECK(ShareInParallel() || !is_shared)
<< type() << "Layer does not support sharing.";
is_shared_ = is_shared;
}
/**************************************************************
Function: Reshape
Description: 指定输出blob的大小
Calls: 无
Called By: 1、SetUp
Input: 1、bottom:输入的blob
2、top:输出的blob
Return: 无
***************************************************************/
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,const vector<Blob<Dtype>*>& top) = 0;
//具体的层,有自己不同任务处理,所以需要在子类里面实现
inline Dtype Forward(const vector<Blob<Dtype>*>& bottom,const vector<Blob<Dtype>*>& top);
//具体的层,有自己不同任务处理,所以需要在子类里面实现
inline void Backward(const vector<Blob<Dtype>*>& top,const vector<bool>& propagate_down,const vector<Blob<Dtype>*>& bottom);
//返回需要学习的参数的块
vector<shared_ptr<Blob<Dtype> > >& blobs()
{
return blobs_;
}
//返回所有的层参数
const LayerParameter& layer_param() const { return layer_param_; }
//参数序列化,即保存参数到proto中
virtual void ToProto(LayerParameter* param, bool write_diff = false);
//获取loss
inline Dtype loss(const int top_index) const
{
return (loss_.size() > top_index) ? loss_[top_index] : Dtype(0);
}
//设置loss
inline void set_loss(const int top_index, const Dtype value)
{
if (loss_.size() <= top_index)
{
loss_.resize(top_index + 1, Dtype(0));
}
loss_[top_index] = value;
}
/**
* @brief Returns the layer type.
*/
=================================================================
=================================================================
//获取强制反向传递标志
virtual inline bool AllowForceBackward(const int bottom_index) const
{
return true;
}
//获取参数需要求梯度的标志
inline bool param_propagate_down(const int param_id)
{
return (param_propagate_down_.size() > param_id) ?
param_propagate_down_[param_id] : false;
}
//设置参数需要求梯度的标志
inline void set_param_propagate_down(const int param_id, const bool value)
{
if (param_propagate_down_.size() <= param_id)
{
param_propagate_down_.resize(param_id + 1, true);
}
param_propagate_down_[param_id] = value;
}
===============================================================
===============================================================
//cpu模式前面传递,子类必须重新实现
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,const vector<Blob<Dtype>*>& top)=0;
//gpu模式前向传递,如果没有实现gpu,则调用cpu模式
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,const vector<Blob<Dtype>*>& top);
{
// LOG(WARNING) << "Using CPU code as backup.";
return Forward_cpu(bottom, top);
}
//cpu模式反面传递,子类必须重新实现
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,const vector<bool>& propagate_down,const vector<Blob<Dtype>*>& bottom) = 0;
//gpu模式反向传递,如果没有实现gpu,则调用cpu模式
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,const vector<bool>& propagate_down,const vector<Blob<Dtype>*>& bottom)
{
// LOG(WARNING) << "Using CPU code as backup.";
Backward_cpu(top, propagate_down, bottom);
}
==============================================================
==============================================================
virtual inline const char* type() const { return ""; }
//指定输入blob的个数
virtual inline int ExactNumBottomBlobs() const { return -1; }
//最小输入blob的个数
virtual inline int MinBottomBlobs() const { return -1; }
//最大输入blob的个数
virtual inline int MaxBottomBlobs() const { return -1; }
//指定输出blob的个数
virtual inline int ExactNumTopBlobs() const { return -1; }
//最小输出blob的个数
virtual inline int MinTopBlobs() const { return -1; }
//最大输出blob的个数
virtual inline int MaxTopBlobs() const { return -1; }
//输入个数和输出相等
virtual inline bool EqualNumBottomTopBlobs() const { return false; }
//根据EqualNumBottomTopBlobs和MinTopBlobs推断出输出的blob个数
virtual inline bool AutoTopBlobs() const { return false; }
/**************************************************************
Function: CheckBlobCounts
Description: 检测输入输出blob的个数是否符合要求
Calls: 1、Step:循环迭代
Called By: 1、caffe:
Input: 1、bottom: 输入blob
2、top:输出的blob
Return: bool:是否满足规则
***************************************************************/
virtual void CheckBlobCounts(const vector<Blob<Dtype>*>& bottom,const vector<Blob<Dtype>*>& top)
{
//检测bottom的count是否满足eq,max,min这三个条件
if (ExactNumBottomBlobs() >= 0)
{
CHECK_EQ(ExactNumBottomBlobs(), bottom.size())
<< type() << " Layer takes " << ExactNumBottomBlobs()
<< " bottom blob(s) as input.";
}
if (MinBottomBlobs() >= 0)
{
CHECK_LE(MinBottomBlobs(), bottom.size())
<< type() << " Layer takes at least " << MinBottomBlobs()
<< " bottom blob(s) as input.";
}
if (MaxBottomBlobs() >= 0)
{
CHECK_GE(MaxBottomBlobs(), bottom.size())
<< type() << " Layer takes at most " << MaxBottomBlobs()
<< " bottom blob(s) as input.";
}
//检测top的count是否满足eq,max,min这三个条件
if (ExactNumTopBlobs() >= 0)
{
CHECK_EQ(ExactNumTopBlobs(), top.size())
<< type() << " Layer produces " << ExactNumTopBlobs()
<< " top blob(s) as output.";
}
if (MinTopBlobs() >= 0)
{
CHECK_LE(MinTopBlobs(), top.size())
<< type() << " Layer produces at least " << MinTopBlobs()
<< " top blob(s) as output.";
}
if (MaxTopBlobs() >= 0)
{
CHECK_GE(MaxTopBlobs(), top.size())
<< type() << " Layer produces at most " << MaxTopBlobs()
<< " top blob(s) as output.";
}
//检测top和bottom是否相等
if (EqualNumBottomTopBlobs())
{
CHECK_EQ(bottom.size(), top.size())
<< type() << " Layer produces one top blob as output for each "
<< "bottom blob input.";
}
}
/**************************************************************
Function: SetLossWeights
Description: 根据输出blob和loss_weight初始化cpu_diff
loss_weight是梯度的权重,每个loss_weight对应一个blob
Calls: 无
Called By: 1、SetUp
Input: 1、top:输出的top
Return: 无
***************************************************************/
inline void SetLossWeights(const vector<Blob<Dtype>*>& top)
{
const int num_loss_weights = layer_param_.loss_weight_size();
//一个blob对应一个loss_weights
if (num_loss_weights)
{
CHECK_EQ(top.size(), num_loss_weights) << "loss_weight must be "
"unspecified or specified once per top blob.";
for (int top_id = 0; top_id < top.size(); ++top_id)
{
const Dtype loss_weight = layer_param_.loss_weight(top_id);
if (loss_weight == Dtype(0)) { continue; }
this->set_loss(top_id, loss_weight);
const int count = top[top_id]->count();
Dtype* loss_multiplier = top[top_id]->mutable_cpu_diff();
caffe_set(count, loss_weight, loss_multiplier);
}
}
}
}; // class Layer
/*********************************************************************************
Layer类申明结束
**********************************************************************************/
// Forward and backward wrappers. You should implement the cpu and
// gpu specific implementations instead, and should not change these
// functions.
/**************************************************************
Function: Forward每层向前传递真正调用的函数
Description: 向前传递和计算loss
Call: 1、Reshape
2、Forward_cpu
Called: 1、
Input: 无
Return: 计算出的loss
***************************************************************/
template <typename Dtype>inline Dtype Layer<Dtype>::Forward(const vector<Blob<Dtype>*>& bottom,const vector<Blob<Dtype>*>& top)
{
// Lock during forward to ensure sequential forward
Lock(); //上锁,其他网络不能访问
Dtype loss = 0; //loss值
Reshape(bottom, top); //调节大小
switch (Caffe::mode()) //根据模式选择不同的前向传递函数
{
case Caffe::CPU:
Forward_cpu(bottom, top);//前向传递
for (int top_id = 0; top_id < top.size(); ++top_id)
{
if (!this->loss(top_id))
{ continue; }
const int count = top[top_id]->count();
const Dtype* data = top[top_id]->cpu_data();
const Dtype* loss_weights = top[top_id]->cpu_diff();
//计算loss值
loss += caffe_cpu_dot(count, data, loss_weights);
}
break;
case Caffe::GPU:
Forward_gpu(bottom, top);
#ifndef CPU_ONLY
for (int top_id = 0; top_id < top.size(); ++top_id)
{
if (!this->loss(top_id))
{ continue; }
const int count = top[top_id]->count();
const Dtype* data = top[top_id]->gpu_data();
const Dtype* loss_weights = top[top_id]->gpu_diff();
Dtype blob_loss = 0;
caffe_gpu_dot(count, data, loss_weights, &blob_loss);
loss += blob_loss;
}
#endif
break;
default:
LOG(FATAL) << "Unknown caffe mode.";
}
Unlock(); //运行完,解锁
return loss;//返回本层的loss
}
/**************************************************************
Function: Backward每层向后传递真正调用的函数
Description: 向后传递
Call: 1、Backward_gpu
Called: 1、
Input: 无
Return: 无
***************************************************************/
template <typename Dtype>
inline void Layer<Dtype>::Backward(const vector<Blob<Dtype>*>& top,const vector<bool>& propagate_down,const vector<Blob<Dtype>*>& bottom)
{
switch (Caffe::mode())
{
case Caffe::CPU:
Backward_cpu(top, propagate_down, bottom);
break;
case Caffe::GPU:
Backward_gpu(top, propagate_down, bottom);
break;
default:
LOG(FATAL) << "Unknown caffe mode.";
}
}
/**************************************************************
Function: ToProto,保存层参数
Input: 无
Return: 无
***************************************************************/
// Serialize LayerParameter to protocol buffer
template <typename Dtype>
void Layer<Dtype>::ToProto(LayerParameter* param, bool write_diff)
{
param->Clear();
param->CopyFrom(layer_param_);
param->clear_blobs();
for (int i = 0; i < blobs_.size(); ++i)
{
blobs_[i]->ToProto(param->add_blobs(), write_diff);
}
}
} // namespace caffe
#endif // CAFFE_LAYER_H_
如有错误,还请指正,同时欢迎有兴趣的同学一起交流学习!
中国科学技术大学多媒体计算与通信教育部-微软重点实验室
MultiMedia Computing Group