caffe(二)header解析

头文件解析

Blob 成员变量
shared_ptr<SyncedMemory> data_; 数据储存地方
shared_ptr<SyncedMemory> diff_; 导数储存地方
shared_ptr<SyncedMemory> shape_data_; 未知 
vector<int> shape_; 数据各个维度的大小
int count_; blob是一个数组,count_存放的是这个数组的大小,如果储存的是多通道图片,即n*c*h*w
int capacity_; 数组大小的上限
Reshape
改变blob的储存大小
void Reshape(const int num, const int channels, const int height, const int width);
void Reshape(const vector<int>& shape);
如果shape_data_.size()
SetLossWeights(const vector<Blob<Dtype>*>& top)
// 设置反向传播时候的配置,
inline void SetLossWeights(const vector<Blob<Dtype>*>& top) {
    const int num_loss_weights = layer_param_.loss_weight_size();
    if (num_loss_weights) {
      CHECK_EQ(top.size(), num_loss_weights) << "loss_weight must be "
          "unspecified or specified once per top blob.";
      for (int top_id = 0; top_id < top.size(); ++top_id) {
        const Dtype loss_weight = layer_param_.loss_weight(top_id);
        if (loss_weight == Dtype(0)) { continue; }
        this->set_loss(top_id, loss_weight); // 初始化loss_
        const int count = top[top_id]->count();
        Dtype* loss_multiplier = top[top_id]->mutable_cpu_diff(); // 储存导数的内存指针
        caffe_set(count, loss_weight, loss_multiplier); // 把指向的内存中数据设为loss_weight
      }
    }
  }
Forward

先forward传播,之后再计算loss(如果有的话。。。,只有损失层才会有,而且SetLossWeights函数已经把loss的权重loss_weights 放到top的diff中了),

SetUp

准备活动

void SetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
CheckBlobCounts(bottom, top); // 检查bottom和top的个数
   LayerSetUp(bottom, top); // layer 本身的setup, 每种类型的层不一样
   Reshape(bottom, top); // 申请空间给bottom和top
   SetLossWeights(top); // 设置loss_weight,看哪些blob需要参与反向传播
  }
ConvolutionalLayer

include/caffe/layers/conv_layer.hpp

forward_cpu
void ConvolutionLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top)

下面两张图片应该能够把forward的整体流程描述的比较清楚,第一张是算法的框架,第二张是一个bottom中的流程,

前向传播

前向传播中的一个环节

Backward_cpu

计算方式见< deeplearning基本知识梳理.docx >文档阐述

template <typename Dtype>
void ConvolutionLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
        const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
    const Dtype* weight = this->blobs_[0]->cpu_data();
    Dtype* weight_diff = this->blobs_[0]->mutable_cpu_diff();
    for (int i = 0; i < top.size(); ++i) {
        const Dtype* top_diff = top[i]->cpu_diff();
        const Dtype* bottom_data = bottom[i]->cpu_data();
        Dtype* bottom_diff = bottom[i]->mutable_cpu_diff();
        // Bias gradient, if necessary.
        if (this->bias_term_ && this->param_propagate_down_[1]) {
          Dtype* bias_diff = this->blobs_[1]->mutable_cpu_diff();
          for (int n = 0; n < this->num_; ++n) {
            this->backward_cpu_bias(bias_diff, top_diff + n * this->top_dim_); // base_conv_layer.cpp
          }
        }
        if (this->param_propagate_down_[0] || propagate_down[i]) {
          for (int n = 0; n < this->num_; ++n) {
            // gradient w.r.t. weight. Note that we will accumulate diffs.
            if (this->param_propagate_down_[0]) {
             // base_conv_layer.cpp
              this->weight_cpu_gemm(bottom_data + n * this->bottom_dim_, 
                  top_diff + n * this->top_dim_, weight_diff);
            }
            // gradient w.r.t. bottom data, if necessary.
            if (propagate_down[i]) {
             // base_conv_layer.cpp
              this->backward_cpu_gemm(top_diff + n * this->top_dim_, weight,
                  bottom_diff + n * this->bottom_dim_);
            }
          }
        }
    }
}
BaseConvolutionLayer

include/caffe/layers/base_conv_layer.hpp

DataLayer

class BaseDataLayer : public Layer<Dtype>
        TransformationParameter transform_param_; // 数据预处理转换器参数
        shared_ptr<DataTransformer<Dtype> > data_transformer_; // 转换器
        bool output_labels_; // 是否输出标签数据

template <typename Dtype>
class Batch {
public:
    Blob<Dtype> data_, label_;
};

class BasePrefetchingDataLayer : public BaseDataLayer<Dtype>, public InternalThread; 
    vector<shared_ptr<Batch<Dtype> > > prefetch_; // 预取的数据

BlockingQueue<Batch<Dtype>*> prefetch_free_; // 空闲batch 队列
BlockingQueue<Batch<Dtype>*> prefetch_full_; // 已加载batch队列
Batch<Dtype>* prefetch_current_; // 

Blob<Dtype> transformed_data_; // 变换后的数据

Net

include/caffe/net.hpp

成员变量

vector<shared_ptr<Layer<Dtype> > > layers_; // 独立层
map<string, int> layer_names_index_; // 层名称与索引映射表
vector<shared_ptr<Blob<Dtype> > > blobs_; // 层与层之间传递数据的通道
vector<vector<Blob<Dtype>*> > bottom_vecs_; // 存放每层输入的blob的指针
vector<vector<Blob<Dtype>*> > top_vecs_; // 存放每层输出的blob的指针
vector<Dtype> blob_loss_weights_; // 每个blob对全局损失函数的贡献
vector<Blob<Dtype>*> learnable_params_; // 可以调整的参数
vector<int> learnable_param_ids_; // 暂且认为是learnable_param的index
size_t memory_used_; // net占用内存的大小

// Callbacks 中run函数是一个抽象方法,需要继承实现
vector<Callback*> before_forward_; // 先跑一遍看看是否有问题
vector<Callback*> after_forward_; // forward完收尾, 显示loss
vector<Callback*> before_backward_; //先跑一遍看看是否有问题
vector<Callback*> after_backward_; // backward完收尾

Solver

成员变量

SolverParameter param_;
int iter_;
int current_step_;
shared_ptr<Net<Dtype> > net_;
vector<shared_ptr<Net<Dtype> > > test_nets_;
vector<Callback*> callbacks_;
vector<Dtype> losses_;
Dtype smoothed_loss_;
// A function that can be set by a client of the Solver to provide indication
// that it wants a snapshot saved and/or to exit early.
typedef boost::function<SolverAction::Enum()> ActionCallback;
ActionCallback action_request_function_;

// True iff a request to stop early was received.
bool requested_early_exit_;

// Timing information, handy to tune e.g. nbr of GPUs
Timer iteration_timer_;
float iterations_last_;

SolverAction 
决定solver的动作,是继续,还是停止,还是先保存snapshot再停止,
namespace SolverAction {
enum Enum {
  NONE = 0,  // Take no special action.
  STOP = 1,  // Stop training. snapshot_after_train controls whether a snapshot is created.
  SNAPSHOT = 2  // Take a snapshot, and keep training.
};
}

SgdSolver

成员变量

// history maintains the historical momentum data.
// update maintains update related data and is not needed in snapshots.
// temp maintains other information that might be needed in computation
//   of gradients/updates and is not needed in snapshots
vector<shared_ptr<Blob<Dtype> > > history_, update_, temp_; // 所有的更新都放在history_里面

caffe.proto

./.build_debug/src/caffe/proto/caffe.pb.h
LayerParameter
NetParameter

factory

注册函数到全局注册表中

layer_factory
typedef shared_ptr<Layer<Dtype> > (*Creator)(const LayerParameter&); // 定义一个函数句柄,返回 layer
typedef std::map<string, Creator> CreatorRegistry; // 定义全局的layer的注册表,其实就是一个 <string, 函数句柄>的map

static CreatorRegistry& Registry() { // 创造一个新的layer注册表,注意是static,因而只能被创建一次
static CreatorRegistry* g_registry_ = new CreatorRegistry(); return *g_registry_;}
// Adds a creator.
static void AddCreator(const string& type, Creator creator) { // 创建实例的起点
CreatorRegistry& registry = Registry();
CHECK_EQ(registry.count(type), 0) << "Layer type " << type << " already registered.";
registry[type] = creator;}

// Get a layer using a LayerParameter.
static shared_ptr<Layer<Dtype> > CreateLayer(const LayerParameter& param) {
if (Caffe::root_solver()) {
LOG(INFO) << "Creating layer " << param.name();}
const string& type = param.type();
CreatorRegistry& registry = Registry();
CHECK_EQ(registry.count(type), 1) << "Unknown layer type: " << type << " (known types: " << LayerTypeListString() << ")";
return registry[type](param);}
           private:
// Layer registry should never be instantiated - everything is done with its static variables., 构造函数只能创建一次,典型的单例模型
           LayerRegistry() {}
           template <typename Dtype>
class LayerRegisterer { // 这个类就是专门来创建一个layer的
    public:
        LayerRegisterer(const string& type,
            shared_ptr<Layer<Dtype> > (*creator)(const LayerParameter&)) {
    // LOG(INFO) << "Registering layer type: " << type;
        LayerRegistry<Dtype>::AddCreator(type, creator);
    }
};

#define REGISTER_LAYER_CREATOR(type, creator)                         \ // 提供double和float注册
    static LayerRegisterer<float> g_creator_f_##type(#type, creator<float>);     \
    static LayerRegisterer<double> g_creator_d_##type(#type, creator<double>)    \

#define REGISTER_LAYER_CLASS(type)                                             \
    template <typename Dtype>                                                    \
    shared_ptr<Layer<Dtype> > Creator_##type##Layer(const LayerParameter& param) \
    {                                                                            \
            return shared_ptr<Layer<Dtype> >(new type##Layer<Dtype>(param));           \
    }                                                                            \
    REGISTER_LAYER_CREATOR(type, Creator_##type##Layer) // 先通过工厂函数创建一个对象,而后注册

}  // namespace caffe
solver_factory

和layer_factory类似,在这里只解释一下自己的理解,在solver_factory 最后define了一个 REGISTER_SOLVER_CLASS,这里首先创建一个solver对象,而后调用REGISTER_SOLVER_CREATOR,这里有调 用了g_creator_f_##type和g_creator_d_##type函数,最关键的是返回一个SolverRegisterer类对象,这里面 又调用了SolverRegistry::AddCreator函数,AddCreator中调用Registry函数,完成注册表的创建, 而后AddCreator就会开始向注册表里填数据,每调用一次就会填一个不同的数据(注意这里的调用是指由别的 solverParameter来注册的时候),又由于这个注册表全局只允许创建一次,因而一旦创建了就不会被重新创建,
在caffe主程序开始运行的时候,main函数之前就已经完成了parameter中具体solver的注册,这就是一个典型 的单例模型, 很棒的设计,

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值