Net完成的工作主要是:根据咱们写的prototxt网络结构文件,完成层之间的连接和初始化。
这里先介绍其数据结构Netparameter ,再详解源码。关于protobuf的前置知识可以参考之前的博文。
message NetParameter {
optional string name = 1; // consider giving the network a name网络名
// DEPRECATED. See InputParameter. The input blobs to the network.
repeated string input = 3;
// DEPRECATED. See InputParameter. The shape of the input blobs.输入blob的形状,嵌套了BlobShape,下面介绍
repeated BlobShape input_shape = 8;
// 4D input dimensions -- deprecated. Use "input_shape" instead.
// If specified, for each input blob there should be four
// values specifying the num, channels, height and width of the input blob.
// Thus, there should be a total of (4 * #input) numbers.
repeated int32 input_dim = 4;
// Whether the network will force every layer to carry out backward operation.
// If set False, then whether to carry out backward is determined
// automatically according to the net structure and learning rates.是否要求每层都必须反向传播
optional bool force_backward = 5 [default = false];
// The current "state" of the network, including the phase, level, and stage.
// Some layers may be included/excluded depending on this state and the states
// specified in the layers' include and exclude fields.嵌套网络状态
optional NetState state = 6;
// Print debugging information about results while running Net::Forward,
// Net::Backward, and Net::Update.是否在训练网络时,打印结果调试信息
optional bool debug_info = 7 [default = false];
// The layers that make up the net. Each of their configurations, including
// connectivity and behavior, is specified as a LayerParameter.嵌套层参数,包括连接和具体操作
repeated LayerParameter layer = 100; // ID 100 so layers are printed last.
// DEPRECATED: use 'layer' instead.老版本的层参数设置,已经被新layer定义代替了,保留可能只是为了支持旧版本吧。
repeated V1LayerParameter layers = 2;
}
网络的初始化是solver调用完成的,其过程为:
Solver()构造函数 -> Init(param) -> InitTrainNet() -> net_.reset(new Net(net_param))
网络的初始化过程主要有以下几部分:
1.网络结构预处理:
FilterNet(in_param, &filtered_param): 将protobuf描述的网络结构,根据网络状态等要求,转换成网络在某种状态下运行的结构.
InsertSplits(filtered_param, ¶m): 当一个bottom blob被多个层共享时,插入个split层,将blob分成多份.这么做的主要原因是多个层反传给该blob的梯度需要累加。
2.逐层依次添加bootom blob, top blob ,param blob:
Net<Dtype>::AppendBottom() :
为各层创建bottom blob,由于当前层的输入blob是前一层的输出blob。因此,此函数并没没有真正的创建blob,只是在将前一层的指针压入到了bottom_vecs_中。
Net<Dtype>::AppendTop():
为各层创建top blob,该函数真正的为blob分配内存空间的对象,将其指针压入到top_vecs_中。
SetUp(bottom_vecs_[layer_id], top_vecs_[layer_id]):
为创建的参数blob分配数据内存空间,如有必要还需要调整该层的输入bottom blob 和输出top blob的shape。
Net<Dtype>::AppendParam():
修改和参数有关的变量,实际的层参数的blob在上面提到的setup()函数中已经创建。如:将层参数blob的指针压入到params_。
下面为关于Net的Init()初始化的源码部分:
#include <algorithm>
#include <map>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "hdf5.h"
#include "caffe/common.hpp"
#include "caffe/layer.hpp"
#include "caffe/net.hpp"
#include "caffe/parallel.hpp"
#include "caffe/proto/caffe.pb.h"
#include "caffe/util/hdf5.hpp"
#include "caffe/util/insert_splits.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/util/upgrade_proto.hpp"
namespace caffe {
//构造函数是获得解析的protobuf网络参数后,调用Init进行初始化
template <typename Dtype>
Net<Dtype>::Net(const NetParameter& param) {
Init(param);
}
//关于NetState参数的解析可以之前发的博文。
template <typename Dtype>
Net<Dtype>::Net(const string& param_file, Phase phase,
const int level, const vector<string>* stages) {
NetParameter param;
//从prototxt文件中读取网络参数设置。
ReadNetParamsFromTextFileOrDie(param_file, ¶m);
//将网络状态保存在数据结构Netparameter中嵌套的NetState数据结构中,借助protobuf生成的相应的setter函数。
param.mutable_state()->set_phase(phase);
if (stages != NULL) {
for (int i = 0; i < stages->size(); i++) {
param.mutable_state()->add_stage((*stages)[i]);
}
}
param.mutable_state()->set_level(level);
Init(param);
}
template <typename Dtype>
void Net<Dtype>::Init(const NetParameter& in_param) {
// Set phase from the state.根据输入的NetParameter中存储的phase,设置类成员变量
phase_ = in_param.state().phase();
// Filter layers based on their include/exclude rules and
// the current NetState.
//将输入的NetParameter变量,根据其状态和包含/滤除规则,来生成当前网络状态下的网络参数。
NetParameter filtered_param;
FilterNet(in_param, &filtered_param);
LOG_IF(INFO, Caffe::root_solver())
<< "Initializing net from parameters: " << std::endl
<< filtered_param.DebugString();
// Create a copy of filtered_param with splits added where necessary.
// InsertSplits函数来自caffe/util/insert_splits.hpp,其注释为:
// Copy NetParameters with SplitLayers added to replace any shared bottom
// blobs with unique bottom blobs provided by the SplitLayer.
// 在需要的地方插入SplitLayers来将任何多层共享的bottom blobs替换成的单个bottom blob+SplitLayer
NetParameter param;
InsertSplits(filtered_param, ¶m);
// Basically, build all the layers and set up their connections.
// 之后的代码,就是建立所有的层和他们之间的链接。
name_ = param.name();
// 定义map容器,建立输入字符串(blob名字)得到其索引的映射。
map<string, int> blob_name_to_idx;
// 定义set容器,存储所有可获得的blobs。
set<string> available_blobs;
memory_used_ = 0;
// For each layer, set up its input and output
// 接下来设置每层的输入输出。
bottom_vecs_.resize(param.layer_size());//根据网络中层的数量,获得bottom_vecs_(存储指向所有bottom blob指针的vector容器)的大小。
top_vecs_.resize(param.layer_size());//根据网络中层的数量,获得top_vecs_(存储指向所有top blob指针的vector容器)的大小。
bottom_id_vecs_.resize(param.layer_size());//根据网络中层的数量,获得bottom_id_vecs_(存储所有bottom blob的id的vector容器)的大小。
param_id_vecs_.resize(param.layer_size());//根据网络中层的数量,获得param_id_vecs_(存储所有param参数 blob的id的vector容器)的大小。
top_id_vecs_.resize(param.layer_size());//根据网络中层的数量,获得top_id_vecs_(存储所有top blob的id的vector容器)的大小。
bottom_need_backward_.resize(param.layer_size());//根据网络中层的数量,获得bottom_need_backward_(存储所有bottom blob是否需要反向传播的vector容器)的大小。
//逐层设置以上vector内容
for (int layer_id = 0; layer_id < param.layer_size(); ++layer_id) {
// Inherit phase from net if unset.
// 如果没有设置层的phase,就从网络的phase继承,因为能通过FilterNet留下来肯定符合该phase
if (!param.layer(layer_id).has_phase()) {
param.mutable_layer(layer_id)->set_phase(phase_);
}
// Setup layer.
// 设置层参数
const LayerParameter& layer_param = param.layer(layer_id);
// 判断反向传播的层的梯度数量,即propagate_down的大小,是否与bottom_size吻合。不反向传播,大小就该是0
// propagate_down:与bottom blob相等长度的bool型向量,每个索引表示是否将误差梯度向下传播至bottom blob对应索引处
if (layer_param.propagate_down_size() > 0) {
CHECK_EQ(layer_param.propagate_down_size(),
layer_param.bottom_size())
<< "propagate_down param must be specified "
<< "either 0 or bottom_size times ";
}
// layer_ 是存储shared_ptr的vector容器。根据层参数创建(注册)该层,并返回shared_ptr智能指针,存入layer_
layers_.push_back(LayerRegistry<Dtype>::CreateLayer(layer_param));
// layer_names_ 是存储字符串的vector容器。将层名存入layer_names_
layer_names_.push_back(layer_param.name());
// 这时就会输出我们在log文件中可以经常看到的Creating Layer + 层名
LOG_IF(INFO, Caffe::root_solver())
<< "Creating Layer " << layer_param.name();
bool need_backward = false;
// Figure out this layer's input and output
// 搞清楚层的输入输出
// 逐个bottom blob设置bottom_vecs_、bottom_id_vecs_、available_blobs、bottom_need_backward_
// 其中blob_name_to_idx在输入层初始化过了 blob_name_to_idx[blob_name] = i
for (int bottom_id = 0; bottom_id < layer_param.bottom_size();
++bottom_id) {
//此函数为该层创建bottom blob,由于网络是堆叠而成,即:当前层的输出 bottom是前一层的输出top blob
//因此此函数并没没有真正的创建blob,只是在将前一层的指针压入到了bottom_vecs_中。
const int blob_id = AppendBottom(param, layer_id, bottom_id,
&available_blobs, &blob_name_to_idx);
// If a blob needs backward, this layer should provide it.
// 只要该层的任何一个bottom blob需要反向传播,那么本层需要反向。
need_backward |= blob_need_backward_[blob_id];
}
// 逐个top blob设置top_vecs_、top_id_vecs_、available_blobs、bottom_need_backward_
// 并将新创立的top blob存入blob_、blob_names_
// 其中blob_name_to_idx在输入层初始化过了 blob_name_to_idx[blob_name] = i
int num_top = layer_param.top_size();
for (int top_id = 0; top_id < num_top; ++top_id) {
// 此函数为该层创建top blob,该函数真正的new的一个blob的对象。并将topblob 的指针压入到top_vecs_中
AppendTop(param, layer_id, top_id, &available_blobs, &blob_name_to_idx);
// Collect Input layer tops as Net inputs.
// 将输入层的top作为网络输入,并收集起来。
if (layer_param.type() == "Input") {
const int blob_id = blobs_.size() - 1;
net_input_blob_indices_.push_back(blob_id);
net_input_blobs_.push_back(blobs_[blob_id].get());
}
}
// If the layer specifies that AutoTopBlobs() -> true and the LayerParameter
// specified fewer than the required number (as specified by
// ExactNumTopBlobs() or MinTopBlobs()), allocate them here.
// 如果设置了自动生成top blob,具体可参看关于之前关于layer的博文:
// 自动创建top blobs来满足ExactNumTopBlobs()和MinTopBlobs()的需要
Layer<Dtype>* layer = layers_[layer_id].get();
if (layer->AutoTopBlobs()) {
const int needed_num_top =
std::max(layer->MinTopBlobs(), layer->ExactNumTopBlobs());
for (; num_top < needed_num_top; ++num_top) {
// Add "anonymous" top blobs -- do not modify available_blobs or
// blob_name_to_idx as we don't want these blobs to be usable as input
// to other layers.
AppendTop(param, layer_id, num_top, NULL, NULL);
}
}
// After this layer is connected, set it up.
// 前面创建了具体的层,并为层创建了输入bottom blob 和输出top blob。
// 层都连接起来后,就调用层的SetUp函数,输入bottom blob 和top blob 的智能指针,建立层。
//setup()函数的功能是为创建的参数blob分配数据内存空间,如有必要还需要调整该层的输入bottom blob 和输出top blob的shape。
layers_[layer_id]->SetUp(bottom_vecs_[layer_id], top_vecs_[layer_id]);
LOG_IF(INFO, Caffe::root_solver())
<< "Setting up " << layer_names_[layer_id];
// 有多少个top_id_vecs_就需要多少blob_loss_weights_
for (int top_id = 0; top_id < top_vecs_[layer_id].size(); ++top_id) {
if (blob_loss_weights_.size() <= top_id_vecs_[layer_id][top_id]) {
blob_loss_weights_.resize(top_id_vecs_[layer_id][top_id] + 1, Dtype(0));
}
blob_loss_weights_[top_id_vecs_[layer_id][top_id]] = layer->loss(top_id);
// log输出中常见的Top shape: ,关于shape_string()可以参考之前关于blob的blob
LOG_IF(INFO, Caffe::root_solver())
<< "Top shape: " << top_vecs_[layer_id][top_id]->shape_string();
if (layer->loss(top_id)) {
LOG_IF(INFO, Caffe::root_solver())
<< " with loss weight " << layer->loss(top_id);
}
// 调用blob类的count函数来,来计算占用的空间。
memory_used_ += top_vecs_[layer_id][top_id]->count();
}
// log输出中常见的Memory required for data:
LOG_IF(INFO, Caffe::root_solver())
<< "Memory required for data: " << memory_used_ * sizeof(Dtype);
const int param_size = layer_param.param_size();
// 层内blob_的数量,即该层有几个权重参数,每个blob内有一个参数,例如;cov层和IP层都有两个参数
const int num_param_blobs = layers_[layer_id]->blobs().size();
//param_size是Layermeter类型对象layer_param中ParamSpec param成员的个数,
//num_param_blobs是一个Layer中learnable parameter blob的个数,
// 要 param_size <= num_param_blobs
CHECK_LE(param_size, num_param_blobs)
<< "Too many params specified for layer " << layer_param.name();
ParamSpec default_param_spec;
for (int param_id = 0; param_id < num_param_blobs; ++param_id) {
const ParamSpec* param_spec = (param_id < param_size) ?
&layer_param.param(param_id) : &default_param_spec;
//学习率不为0则为需要反向传播。
const bool param_need_backward = param_spec->lr_mult() != 0;
need_backward |= param_need_backward;
layers_[layer_id]->set_param_propagate_down(param_id,
param_need_backward);
}
for (int param_id = 0; param_id < num_param_blobs; ++param_id) {
// 为网络增加新的参数blob,只加有参数的层的param blob
// 对于某些有参数的层,例如:卷基层、全连接层有weight和bias。
// 该函数主要是修改和参数有关的变量,实际的层参数的blob在上面提到的setup()函数中已经创建。如:将层参数blob的指针压入到params_。
AppendParam(param, layer_id, param_id);
}
// Finally, set the backward flag
// 最后,设置反向标志
layer_need_backward_.push_back(need_backward);
if (need_backward) {
for (int top_id = 0; top_id < top_id_vecs_[layer_id].size(); ++top_id) {
blob_need_backward_[top_id_vecs_[layer_id][top_id]] = true;
}
}
}
/*至此上面部分各个层被创建并启动,下面部分是按后向顺序修正backward设置 */
// Go through the net backwards to determine which blobs contribute to the
// loss. We can skip backward computation for blobs that don't contribute
// to the loss.
// Also checks if all bottom blobs don't need backward computation (possible
// because the skip_propagate_down param) and so we can skip bacward
// computation for the entire layer
// 之前都是前向依次设置反向的,下面的是按后向顺序修正前向设置:
// 可以跳过对loss没贡献层的反向计算,同时检查是否所有bottom blob都需要反向计算。
// 因此,定义了两个set来存需要/不需要反向的blob的名字
set<string> blobs_under_loss;
set<string> blobs_skip_backp;
//反向id
for (int layer_id = layers_.size() - 1; layer_id >= 0; --layer_id) {
bool layer_contributes_loss = false;
bool layer_skip_propagate_down = true;
for (int top_id = 0; top_id < top_vecs_[layer_id].size(); ++top_id) {
const string& blob_name = blob_names_[top_id_vecs_[layer_id][top_id]];
if (layers_[layer_id]->loss(top_id) ||
(blobs_under_loss.find(blob_name) != blobs_under_loss.end())) {
layer_contributes_loss = true;
}
if (blobs_skip_backp.find(blob_name) == blobs_skip_backp.end()) {
layer_skip_propagate_down = false;
}
if (layer_contributes_loss && !layer_skip_propagate_down)
break;
}
// If this layer can skip backward computation, also all his bottom blobs
// don't need backpropagation
if (layer_need_backward_[layer_id] && layer_skip_propagate_down) {
layer_need_backward_[layer_id] = false;
for (int bottom_id = 0; bottom_id < bottom_vecs_[layer_id].size();
++bottom_id) {
bottom_need_backward_[layer_id][bottom_id] = false;
}
}
if (!layer_contributes_loss) { layer_need_backward_[layer_id] = false; }
if (Caffe::root_solver()) {
if (layer_need_backward_[layer_id]) {
LOG(INFO) << layer_names_[layer_id] << " needs backward computation.";
} else {
LOG(INFO) << layer_names_[layer_id]
<< " does not need backward computation.";
}
}
//修正前向设置的反向传播要求
for (int bottom_id = 0; bottom_id < bottom_vecs_[layer_id].size();
++bottom_id) {
if (layer_contributes_loss) {
const string& blob_name =
blob_names_[bottom_id_vecs_[layer_id][bottom_id]];
blobs_under_loss.insert(blob_name);
} else {
bottom_need_backward_[layer_id][bottom_id] = false;
}
if (!bottom_need_backward_[layer_id][bottom_id]) {
const string& blob_name =
blob_names_[bottom_id_vecs_[layer_id][bottom_id]];
blobs_skip_backp.insert(blob_name);
}
}
}
// Handle force_backward if needed.
if (param.force_backward()) {
for (int layer_id = 0; layer_id < layers_.size(); ++layer_id) {
layer_need_backward_[layer_id] = true;
for (int bottom_id = 0;
bottom_id < bottom_need_backward_[layer_id].size(); ++bottom_id) {
bottom_need_backward_[layer_id][bottom_id] =
bottom_need_backward_[layer_id][bottom_id] ||
layers_[layer_id]->AllowForceBackward(bottom_id);
blob_need_backward_[bottom_id_vecs_[layer_id][bottom_id]] =
blob_need_backward_[bottom_id_vecs_[layer_id][bottom_id]] ||
bottom_need_backward_[layer_id][bottom_id];
}
for (int param_id = 0; param_id < layers_[layer_id]->blobs().size();
++param_id) {
layers_[layer_id]->set_param_propagate_down(param_id, true);
}
}
}
// In the end, all remaining blobs are considered output blobs.
// 最终,所有还在available_blobs中的blob都会被视为输出。
for (set<string>::iterator it = available_blobs.begin();
it != available_blobs.end(); ++it) {
LOG_IF(INFO, Caffe::root_solver())
<< "This network produces output " << *it;
net_output_blobs_.push_back(blobs_[blob_name_to_idx[*it]].get());
net_output_blob_indices_.push_back(blob_name_to_idx[*it]);
}
for (size_t blob_id = 0; blob_id < blob_names_.size(); ++blob_id) {
blob_names_index_[blob_names_[blob_id]] = blob_id;
}
for (size_t layer_id = 0; layer_id < layer_names_.size(); ++layer_id) {
layer_names_index_[layer_names_[layer_id]] = layer_id;
}
ShareWeights();
debug_info_ = param.debug_info();
LOG_IF(INFO, Caffe::root_solver()) << "Network initialization done.";
}
//根据网络当前状态和包含/滤除原则,按照要求滤去一些层
template <typename Dtype>
void Net<Dtype>::FilterNet(const NetParameter& param,
NetParameter* param_filtered) {
NetState net_state(param.state());
//借助protobuf生成的数据结构API,将输入网络参数param拷贝到param_filtered
param_filtered->CopyFrom(param);
//借助protobuf生成的数据结构API,将param_filtered的layer这一属性清空。
param_filtered->clear_layer();
//逐层检查该层是否有包含/滤除规则,如果有,则按规则删减,可参看之前关于stage level 博客看具体解释。
for (int i = 0; i < param.layer_size(); ++i) {
const LayerParameter& layer_param = param.layer(i);
const string& layer_name = layer_param.name();
//逐层检查该层是否有包含/滤除规则
CHECK(layer_param.include_size() == 0 || layer_param.exclude_size() == 0)
<< "Specify either include rules or exclude rules; not both.";
// If no include rules are specified, the layer is included by default and
// only excluded if it meets one of the exclude rules.
// 如果有,则按规则删减
bool layer_included = (layer_param.include_size() == 0);
for (int j = 0; layer_included && j < layer_param.exclude_size(); ++j) {
if (StateMeetsRule(net_state, layer_param.exclude(j), layer_name)) {
layer_included = false;
}
}
for (int j = 0; !layer_included && j < layer_param.include_size(); ++j) {
if (StateMeetsRule(net_state, layer_param.include(j), layer_name)) {
layer_included = true;
}
}
if (layer_included) {
param_filtered->add_layer()->CopyFrom(layer_param);
}
}
}
//检查该层的状态(phase level stage)是否符合要求,返回bool来决定是否包含该层。
template <typename Dtype>
bool Net<Dtype>::StateMeetsRule(const NetState& state,
const NetStateRule& rule, const string& layer_name) {
// Check whether the rule is broken due to phase.
if (rule.has_phase()) {
if (rule.phase() != state.phase()) {
LOG_IF(INFO, Caffe::root_solver())
<< "The NetState phase (" << state.phase()
<< ") differed from the phase (" << rule.phase()
<< ") specified by a rule in layer " << layer_name;
return false;
}
}
// Check whether the rule is broken due to min level.
if (rule.has_min_level()) {
if (state.level() < rule.min_level()) {
LOG_IF(INFO, Caffe::root_solver())
<< "The NetState level (" << state.level()
<< ") is above the min_level (" << rule.min_level()
<< ") specified by a rule in layer " << layer_name;
return false;
}
}
// Check whether the rule is broken due to max level.
if (rule.has_max_level()) {
if (state.level() > rule.max_level()) {
LOG_IF(INFO, Caffe::root_solver())
<< "The NetState level (" << state.level()
<< ") is above the max_level (" << rule.max_level()
<< ") specified by a rule in layer " << layer_name;
return false;
}
}
// Check whether the rule is broken due to stage. The NetState must
// contain ALL of the rule's stages to meet it.
for (int i = 0; i < rule.stage_size(); ++i) {
// Check that the NetState contains the rule's ith stage.
bool has_stage = false;
for (int j = 0; !has_stage && j < state.stage_size(); ++j) {
if (rule.stage(i) == state.stage(j)) { has_stage = true; }
}
if (!has_stage) {
LOG_IF(INFO, Caffe::root_solver())
<< "The NetState did not contain stage '" << rule.stage(i)
<< "' specified by a rule in layer " << layer_name;
return false;
}
}
// Check whether the rule is broken due to not_stage. The NetState must
// contain NONE of the rule's not_stages to meet it.
for (int i = 0; i < rule.not_stage_size(); ++i) {
// Check that the NetState contains the rule's ith not_stage.
bool has_stage = false;
for (int j = 0; !has_stage && j < state.stage_size(); ++j) {
if (rule.not_stage(i) == state.stage(j)) { has_stage = true; }
}
if (has_stage) {
LOG_IF(INFO, Caffe::root_solver())
<< "The NetState contained a not_stage '" << rule.not_stage(i)
<< "' specified by a rule in layer " << layer_name;
return false;
}
}
return true;
}