Caffe学习(三)
Caffe 梳理
- layer.hpp中:Forward和Backward对应前向计算和反向更新,输入统一都是bottom,输出为top,其中Backward里面有个propagate_down参数,用来表示该Layer是否反向传播梯度。在Forward和Backward的具体实现里,会根据Caffe::mode()进行对应的操作,即使用cpu或者gpu进行计算,两个都实现了对应的接口Forward_cpu、Forward_gpu和Backward_cpu、Backward_gpu,这些接口都是virtual,具体还是要根据layer的类型进行对应的计算(注意:有些layer并没有GPU计算的实现,所以封装时加入了CPU的计算作为后备)。另外,还实现了ToProto的接口,将Layer的参数写入到protocol buffer文件中。
- 在梳理Net.cpp代码后,我们将详细说明propagate_down的具体使用方法,以及在finetune中决定是否反传的各个参数的区别
Net-Init
Net.cpp-Init函数
- 代码梳理
#include <algorithm>
#include <map>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "hdf5.h"
#include "caffe/common.hpp"
#include "caffe/layer.hpp"
#include "caffe/net.hpp"
#include "caffe/parallel.hpp"
#include "caffe/proto/caffe.pb.h"
#include "caffe/util/hdf5.hpp"
#include "caffe/util/insert_splits.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/util/upgrade_proto.hpp"
namespace caffe {
template <typename Dtype>
Net<Dtype>::Net(const NetParameter& param) {
Init(param);
}
template <typename Dtype>
Net<Dtype>::Net(const string& param_file, Phase phase,
const int level, const vector<string>* stages) {
NetParameter param;
ReadNetParamsFromTextFileOrDie(param_file, ¶m);
// Set phase, stages and level
param.mutable_state()->set_phase(phase);
if (stages != NULL) {
for (int i = 0; i < stages->size(); i++) {
param.mutable_state()->add_stage((*stages)[i]);
}
}
param.mutable_state()->set_level(level);
Init(param);
}
template <typename Dtype>
void Net<Dtype>::Init(const NetParameter& in_param) {
// Set phase from the state.
phase_ = in_param.state().phase();
// Filter layers based on their include/exclude rules and
// the current NetState.
// FilterNet
NetParameter filtered_param;
FilterNet(in_param, &filtered_param);
LOG_IF(INFO, Caffe::root_solver())
<< "Initializing net from parameters: " << std::endl
<< filtered_param.DebugString();
// Create a copy of filtered_param with splits added where necessary.
// 第二次处理Net 的param,作用:对于底层一个输出blob对应多个上层的情况,要再加入Split 层,
// 形成新的网络,这么做的主要原因是多个层反传给该blob的梯度需要累加
// 如Label需传入accuracy 层和loss 层,就会在这里插入一层,也就是caffe 的log 中打印出来的
// 一个新的层label_mnist_1_split 层,为该层创建两个top blob 分别为:
// Label_mnist_1_split_0 和Label_mnist_1_split_1 分别传入accuracy 层和loss
NetParameter param;
InsertSplits(filtered_param, ¶m);
// Basically, build all the layers and set up their connections.
// 下面是层及层间blob的创建
name_ = param.name();
map<string, int> blob_name_to_idx;
set<string> available_blobs;
memory_used_ = 0;
// For each layer, set up its input and output
bottom_vecs_.resize(param.layer_size());// 存每一层的输入(bottom)blob指针
top_vecs_.resize(param.layer_size());// 存每一层输出(top)的blob指针
bottom_id_vecs_.resize(param.layer_size());// 存每一层输入(bottom)blob的id
param_id_vecs_.resize(param.layer_size());// 存每一层参数blob的id
top_id_vecs_.resize(param.layer_size());// 存每一层输出(top)的blob的id
bottom_need_backward_.resize(param.layer_size());//该blob是需要返回的bool值
//for循环对每一层处理
for (int layer_id = 0; layer_id < param.layer_size(); ++layer_id) {
// Inherit phase from net if unset.
// 如果当前层没有设置phase,则将当前层phase设置为网络net的phase
if (!param.layer(layer_id).has_phase()) {
param.mutable_layer(layer_id)->set_phase(phase_);
}
// Setup layer.
const LayerParameter& layer_param = param.layer(layer_id);//当前层的参数
// 检查LayerParameter 类型propagate_down 成员的个数是否达标
// 也就是我们在prototxt 中设置的“propagate_down: ××”,他的个数要么是0(也就是不写)
// 要么等于bottom blob 的个数,代表当前layer的梯度是否反向传播(后续有详细解释)
// indicating whether to compute the diff of each param blob.
if (layer_param.propagate_down_size() > 0) {
CHECK_EQ(layer_param.propagate_down_size(),
layer_param.bottom_size())
<< "propagate_down param must be specified "
<< "either 0 or bottom_size times ";
}
// 创建一个具体的层,并压入到layers_ 中
layers_.push_back(LayerRegistry<Dtype>::CreateLayer(layer_param));
layer_names_.push_back(layer_param.name());
LOG_IF(INFO, Caffe::root_solver())
<< "Creating Layer " << layer_param.name();
bool need_backward = false;
// Figure out this layer's input and output
// 分别处理输入(bottom blob)和输出(top blob)
for (int bottom_id = 0; bottom_id < layer_param.bottom_size();
++bottom_id) {
const int blob_id = AppendBottom(param, layer_id, bottom_id,
&available_blobs, &blob_name_to_idx);
// AppendBottom() 函数为该层创建bottom blob
// If a blob needs backward, this layer should provide it.
need_backward |= blob_need_backward_[blob_id];
}
int num_top = layer_param.top_size();
for (int top_id = 0; top_id < num_top; ++top_id) {
AppendTop(param, layer_id, top_id, &available_blobs, &blob_name_to_idx);
// 创建top blob压入top_vecs_
// Collect Input layer tops as Net inputs.
if (layer_param.type() == "Input") {
const int blob_id = blobs_.size() - 1;
net_input_blob_indices_.push_back(blob_id);
net_input_blobs_.push_back(blobs_[blob_id].get());
}
}
// If the layer specifies that AutoTopBlobs() -> true and the LayerParameter
// specified fewer than the required number (as specified by
// ExactNumTopBlobs() or MinTopBlobs()), allocate them here.
Layer<Dtype>* layer = layers_[layer_id].get();
if (layer->AutoTopBlobs()) {
const int needed_num_top =
std::max(layer->MinTopBlobs(), layer->ExactNumTopBlobs());
for (; num_top < needed_num_top; ++num_top) {
// Add "anonymous" top blobs -- do not modify available_blobs or
// blob_name_to_idx as we don't want these blobs to be usable as input
// to other layers.
AppendTop(param, layer_id, num_top, NULL, NULL);
}
}
// After this layer is connected, set it up.
// Set_Up中为AppendTop() 中创建的Blob 分配内存空间
layers_[layer_id]->SetUp(bottom_vecs_[layer_id], top_vecs_[layer_id]);
LOG_IF(INFO, Caffe::root_solver())
<< "Setting up " << layer_names_[layer_id];
// 每次循环都会更新
for (int top_id = 0; top_id < top_vecs_[layer_id].size(); ++top_id) {
if (blob_loss_weights_.size() <= top_id_vecs_[layer_id][top_id]) {
blob_loss_weights_.resize(top_id_vecs_[layer_id][top_id] + 1, Dtype(0));
}
//blob_loss_weights_,每次遍历一个layer的时候,都会resize blob_loss_weights_,
//然后调用模板类layer 的loss 函数返回loss_weight
blob_loss_weights_[top_id_vecs_[layer_id][top_id]] = layer->loss(top_id);
LOG_IF(INFO, Caffe::root_solver())
<< "Top shape: " << top_vecs_[layer_id][top_id]->shape_string();
if (layer->loss(top_id)) {
LOG_IF(INFO, Caffe::root_solver())
<< " with loss weight " << layer->loss(top_id);
}
memory_used_ += top_vecs_[layer_id][top_id]->count();// 计算所需内存
}
LOG_IF(INFO, Caffe::root_solver())
<< "Memory required for data: " << memory_used_ * sizeof(Dtype);
// 对每层的param blob的处理
const int param_size = layer_param.param_size();
// param_size 是Layermeter 类型对象layer_param 中ParamSpec param 成员的个数,
// 是层内blob_ 的数量,即该层有几个权重参数(每个blob内有一个参数)
// 例如;cov层和IP层都有两个参数对应w和b
const int num_param_blobs = layers_[layer_id]->blobs().size();
// num_param_blobs是一个Layer中learnable parameter blob的个数,
// param_size <= num_param_blobs
CHECK_LE(param_size, num_param_blobs)
<< "Too many params specified for layer " << layer_param.name();
ParamSpec default_param_spec;
for (int param_id = 0; param_id < num_param_blobs; ++param_id) {
const ParamSpec* param_spec = (param_id < param_size) ?
&layer_param.param(param_id) : &default_param_spec;
const bool param_need_backward = param_spec->lr_mult() != 0;
// 这里说明了如果在prototxt 中将lr置为0,即关掉,该层参数便不再更新
need_backward |= param_need_backward;
// 由param_need_backward 来决定need_backward 是否为真,
// 并且,只要有一次遍历使得need_backward 为真,则这个for循环结束后,need_backward 也为真
layers_[layer_id]->set_param_propagate_down(param_id,
param_need_backward);
}
// 添加parameter blob,如果当前layer没有parameter blob(num_param_blobs==0),
// 比如ReLU,那么就不进入循环,不添加parameter blob
// AppendParam 只是执行为当前layer 添加parameter blob 的相关工作,
// 并不会修改与backward的相关属性
for (int param_id = 0; param_id < num_param_blobs; ++param_id) {
AppendParam(param, layer_id, param_id);
// 将param blob以及blob的id添加到params_,param_id_vecs_等
}
// Finally, set the backward flag
// 之前在AppendTop 函数中,在遍历当前层的每一个top blob的时候
// 都已将一个false(默认值)压入向量blob_need_backward_中
// 下面如果这个layer need backward,则会更新blob_need_backward_
layer_need_backward_.push_back(need_backward);
if (need_backward) {
for (int top_id = 0; top_id < top_id_vecs_[layer_id].size(); ++top_id) {
blob_need_backward_[top_id_vecs_[layer_id][top_id]] = true;
}
}
}
// 至此,各个层都已被创建并启动,下面部分是按照反向顺序修正backward设置?
// Go through the net backwards to determine which blobs contribute to the
// loss. We can skip backward computation for blobs that don't contribute
// to the loss.
// Also checks if all bottom blobs don't need backward computation (possible
// because the skip_propagate_down param) and so we can skip bacward
// computation for the entire layer
// 需要注意的是,上述代码中关于backward设置的部分,是按照前向的顺序设置的,
// 而下面的代码是按反向顺序修正前向设置的结果。
// 一个layer是否需要backward computation,主要依据两个方面:
// (1)该layer的top blob 是否参与loss的计算;
// (2)该layer的bottom blob 是否需要backward computation,
// 比如Data层一般就不需要backward computation
set<string> blobs_under_loss;