caffe源码追踪-net

先来看看caffe/include/caffe/net.hpp

#ifndef CAFFE_NET_HPP_
#define CAFFE_NET_HPP_

#include <map>
#include <set>
#include <string>
#include <utility>
#include <vector>

#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/layer.hpp"
#include "caffe/proto/caffe.pb.h"

namespace caffe {
template <typename Dtype>
class Net {
 public:
  explicit Net(const NetParameter& param, const Net* root_net = NULL);//构造函数
  explicit Net(const string& param_file, Phase phase,
      const int level = 0, const vector<string>* stages = NULL,
      const Net* root_net = NULL);
  virtual ~Net() {}//虚析构函数
  void Init(const NetParameter& param);//用网络参数初始化网络
  const vector<Blob<Dtype>*>& Forward(Dtype* loss = NULL);//前向传播
  const vector<Blob<Dtype>*>& ForwardPrefilled(Dtype* loss = NULL) {//不建议使用,用上面代替
    LOG_EVERY_N(WARNING, 1000) << "DEPRECATED: ForwardPrefilled() "
        << "will be removed in a future version. Use Forward().";
    return Forward(loss);
  }
  Dtype ForwardFromTo(int start, int end);
  Dtype ForwardFrom(int start);
  Dtype ForwardTo(int end);
  const vector<Blob<Dtype>*>& Forward(const vector<Blob<Dtype>* > & bottom,
      Dtype* loss = NULL);//不建议使用,用Forward()代替
  void ClearParamDiffs();//清空所有梯度,backward之前必要的一步
  void Backward();//接下来都是反向传播
  void BackwardFromTo(int start, int end);
  void BackwardFrom(int start);
  void BackwardTo(int end);
  void Reshape();//从输入到输出层层塑形
  Dtype ForwardBackward() {
    Dtype loss;
    Forward(&loss);
    Backward();
    return loss;
  }
  void Update();//更新一次网络权重
  void ShareWeights();//初始化会调用该函数,用了共享权重
  void ShareTrainedLayersWith(const Net* other);//共享其他网络的一些层
  void CopyTrainedLayersFrom(const NetParameter& param);
  void CopyTrainedLayersFrom(const string trained_filename);
  void CopyTrainedLayersFromBinaryProto(const string trained_filename);
  void CopyTrainedLayersFromHDF5(const string trained_filename);
  void ToProto(NetParameter* param, bool write_diff = false) const;//序列化网络为protobuf
  void ToHDF5(const string& filename, bool write_diff = false) const;//将网络转换到HDF5文件
  inline const string& name() const { return name_; }//获取网络名字
  inline const vector<string>& layer_names() const { return layer_names_; }//获取层的名字
  inline const vector<string>& blob_names() const { return blob_names_; }//获取blob名字
  inline const vector<shared_ptr<Blob<Dtype> > >& blobs() const {//获取指向blob数据的指针
    return blobs_;
  }
  inline const vector<shared_ptr<Layer<Dtype> > >& layers() const {//获取指向layer数据的指针
    return layers_;
  }
  inline Phase phase() const { return phase_; }//获取阶段
  inline const vector<vector<Blob<Dtype>*> >& bottom_vecs() const {//获取每一层输入向量,通常不需要,除非是做一些训练前的检查
    return bottom_vecs_;
  }
  inline const vector<vector<Blob<Dtype>*> >& top_vecs() const {//获取每一层输出向量,通常不需要,除非是做一些训练前的检查
    return top_vecs_;
  }
  inline const vector<int> & top_ids(int i) const {//获取第i层的输出blob id
    CHECK_GE(i, 0) << "Invalid layer id";
    CHECK_LT(i, top_id_vecs_.size()) << "Invalid layer id";
    return top_id_vecs_[i];
  }
  inline const vector<int> & bottom_ids(int i) const {//获取第i层的输入blob id
    CHECK_GE(i, 0) << "Invalid layer id";
    CHECK_LT(i, bottom_id_vecs_.size()) << "Invalid layer id";
    return bottom_id_vecs_[i];
  }
  inline const vector<vector<bool> >& bottom_need_backward() const {//输入是否需要反向传播
    return bottom_need_backward_;
  }
  inline const vector<Dtype>& blob_loss_weights() const {//获取loss_weights
    return blob_loss_weights_;
  }
  inline const vector<bool>& layer_need_backward() const {//该层是否需要反向传播
    return layer_need_backward_;
  }
  inline const vector<shared_ptr<Blob<Dtype> > >& params() const {//获取参数
    return params_;
  }
  inline const vector<Blob<Dtype>*>& learnable_params() const {//获取可学习参数
    return learnable_params_;
  }
  inline const vector<float>& params_lr() const { return params_lr_; }//获取学习率
  inline const vector<bool>& has_params_lr() const { return has_params_lr_; }//是否有学习率
  inline const vector<float>& params_weight_decay() const {//获取可学习参数decay
    return params_weight_decay_;
  }
  inline const vector<bool>& has_params_decay() const {
    return has_params_decay_;
  }
  const map<string, int>& param_names_index() const {
    return param_names_index_;
  }
  inline const vector<int>& param_owners() const { return param_owners_; }
  inline const vector<string>& param_display_names() const {
    return param_display_names_;
  }
  // 接下来就是一些获取输入输出大小
  inline int num_inputs() const { return net_input_blobs_.size(); }
  inline int num_outputs() const { return net_output_blobs_.size(); }
  inline const vector<Blob<Dtype>*>& input_blobs() const {
    return net_input_blobs_;
  }
  inline const vector<Blob<Dtype>*>& output_blobs() const {
    return net_output_blobs_;
  }
  inline const vector<int>& input_blob_indices() const {
    return net_input_blob_indices_;
  }
  inline const vector<int>& output_blob_indices() const {
    return net_output_blob_indices_;
  }
  bool has_blob(const string& blob_name) const;
  const shared_ptr<Blob<Dtype> > blob_by_name(const string& blob_name) const;
  bool has_layer(const string& layer_name) const;
  const shared_ptr<Layer<Dtype> > layer_by_name(const string& layer_name) const;
  void set_debug_info(const bool value) { debug_info_ = value; }
  static void FilterNet(const NetParameter& param,//移走一些现阶段不需要的层,具体见cpp文件
      NetParameter* param_filtered);
  static bool StateMeetsRule(const NetState& state, const NetStateRule& rule,
      const string& layer_name);//判断state是否满足rule

 protected:
  void AppendTop(const NetParameter& param, const int layer_id,//为网络添加新的输出
                 const int top_id, set<string>* available_blobs,
                 map<string, int>* blob_name_to_idx);
  int AppendBottom(const NetParameter& param, const int layer_id,//为网络添加新的输入
                   const int bottom_id, set<string>* available_blobs,
                   map<string, int>* blob_name_to_idx);
  void AppendParam(const NetParameter& param, const int layer_id,//为网络添加新的参数
                   const int param_id);
  void ForwardDebugInfo(const int layer_id);//是否打印调试信息在Forward过程中
  void BackwardDebugInfo(const int layer_id);//是否打印调试信息在Backward过程中
  void UpdateDebugInfo(const int param_id);//是否打印调试信息在 Update过程中
  string name_;//网络的名字
  Phase phase_;//阶段
  vector<shared_ptr<Layer<Dtype> > > layers_;//所有层
  vector<string> layer_names_;//每个层名字
  map<string, int> layer_names_index_;//层的名字及其索引
  vector<bool> layer_need_backward_;//每个层是否需要backward
//接下来是一些存储层之间的中间结果的容器
  vector<shared_ptr<Blob<Dtype> > > blobs_;//
  vector<string> blob_names_;//blob名字
  map<string, int> blob_names_index_;
  vector<bool> blob_need_backward_;//blob是否需要backward
  vector<vector<Blob<Dtype>*> > bottom_vecs_;//实际数据存储在blobs_里面,这里只是装的指针:假如有n个层,则bottom_vecs_装的是n个指针,每个指针指向具体输入数据
  vector<vector<int> > bottom_id_vecs_;
  vector<vector<bool> > bottom_need_backward_;//存放各层各输入是否需要backward
  vector<vector<Blob<Dtype>*> > top_vecs_;
  vector<vector<int> > top_id_vecs_;
  vector<Dtype> blob_loss_weights_;
  vector<vector<int> > param_id_vecs_;
  vector<int> param_owners_;
  vector<string> param_display_names_;
  vector<pair<int, int> > param_layer_indices_;
  map<string, int> param_names_index_;
  vector<int> net_input_blob_indices_;
  vector<int> net_output_blob_indices_;
  vector<Blob<Dtype>*> net_input_blobs_;
  vector<Blob<Dtype>*> net_output_blobs_;
  vector<shared_ptr<Blob<Dtype> > > params_;
  vector<Blob<Dtype>*> learnable_params_;
  vector<int> learnable_param_ids_;
  vector<float> params_lr_;
  vector<bool> has_params_lr_;
  vector<float> params_weight_decay_;
  vector<bool> has_params_decay_;
  size_t memory_used_;
  bool debug_info_;
  const Net* const root_net_;
  DISABLE_COPY_AND_ASSIGN(Net);
};
}  // namespace caffe

#endif  // CAFFE_NET_HPP_

上面只是一些函数声明,我们来重点看一下函数实现部分,在文件caffe/src/caffe/net.cpp中

#include <algorithm>
#include <map>
#include <set>
#include <string>
#include <utility>
#include <vector>

#include "hdf5.h"

#include "caffe/common.hpp"
#include "caffe/layer.hpp"
#include "caffe/net.hpp"
#include "caffe/parallel.hpp"
#include "caffe/proto/caffe.pb.h"
#include "caffe/util/hdf5.hpp"
#include "caffe/util/insert_splits.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/util/upgrade_proto.hpp"

#include "caffe/test/test_caffe_main.hpp"

namespace caffe {
template <typename Dtype>
Net<Dtype>::Net(const NetParameter& param, const Net* root_net)//root_net与多GPU并行训练有关,暂时忽略,接受NetParameter输入初始化网络
    : root_net_(root_net) {
  Init(param);
}
template <typename Dtype>
Net<Dtype>::Net(const string& param_file, Phase phase,//接受prototxt文件作为输入
    const int level, const vector<string>* stages,
    const Net* root_net)
    : root_net_(root_net) {
  NetParameter param;
  ReadNetParamsFromTextFileOrDie(param_file, &param);
  param.mutable_state()->set_phase(phase);
  if (stages != NULL) {
    for (int i = 0; i < stages->size(); i++) {
      param.mutable_state()->add_stage((*stages)[i]);
    }
  }
  param.mutable_state()->set_level(level);
  Init(param);
}
template <typename Dtype>
void Net<Dtype>::Init(const NetParameter& in_param) {
  CHECK(Caffe::root_solver() || root_net_)
      << "root_net_ needs to be set for all non-root solvers";
  phase_ = in_param.state().phase();
  NetParameter filtered_param;
  FilterNet(in_param, &filtered_param);//过滤网络,filtered_param即为过滤后的
  LOG_IF(INFO, Caffe::root_solver())
      << "Initializing net from parameters: " << std::endl
      << filtered_param.DebugString();
  NetParameter param;
  InsertSplits(filtered_param, &param);//对于多个输入一个输出的blob需要加入分裂层,为了在反向传播时将梯度累加,函数声明在caffe/util/insert_splits.hpp文件
  name_ = param.name();//网络名字
  map<string, int> blob_name_to_idx;//blob名字与id,整个网络的输入输出blob名字及其id,不重复(in-place计算可以)
  set<string> available_blobs;//可用的blob,暂时还没有用到过的,存放的是blob的名字,用了则删除该blob名字
  memory_used_ = 0;//内存使用初始化为0
  bottom_vecs_.resize(param.layer_size());//假如有n个层,则bottom_vecs_装的是n个指针,每个指针指向具体输入数据,所以大小与层数一致
  top_vecs_.resize(param.layer_size());//同理
  bottom_id_vecs_.resize(param.layer_size());//同理
  param_id_vecs_.resize(param.layer_size());//同理
  top_id_vecs_.resize(param.layer_size());//同理
  bottom_need_backward_.resize(param.layer_size());//同理
  for (int layer_id = 0; layer_id < param.layer_size(); ++layer_id) {//逐层开始
    bool share_from_root = !Caffe::root_solver()
        && root_net_->layers_[layer_id]->ShareInParallel();
    if (!param.layer(layer_id).has_phase()) {
      param.mutable_layer(layer_id)->set_phase(phase_);
    }
    const LayerParameter& layer_param = param.layer(layer_id);//layer_param为当前层
    if (layer_param.propagate_down_size() > 0) {
      CHECK_EQ(layer_param.propagate_down_size(),//要么不传递,为0;要么传递,与输入blob大小相等
          layer_param.bottom_size())
          << "propagate_down param must be specified "
          << "either 0 or bottom_size times ";
    }
    if (share_from_root) {
      LOG(INFO) << "Sharing layer " << layer_param.name() << " from root net";
      layers_.push_back(root_net_->layers_[layer_id]);
      layers_[layer_id]->SetShared(true);
    } else {
      layers_.push_back(LayerRegistry<Dtype>::CreateLayer(layer_param));//加层
    }
    layer_names_.push_back(layer_param.name());//层名字压入layer_names_
    LOG_IF(INFO, Caffe::root_solver())
        << "Creating Layer " << layer_param.name();
    bool need_backward = false;
    for (int bottom_id = 0; bottom_id < layer_param.bottom_size();
         ++bottom_id) {//添加属于该层的每个输入blob
      const int blob_id = AppendBottom(param, layer_id, bottom_id,
                                       &available_blobs, &blob_name_to_idx);
      need_backward |= blob_need_backward_[blob_id];//只要一个输入需要back,则整个层设置为需要back
    }
    int num_top = layer_param.top_size();//该层的输出blob
    for (int top_id = 0; top_id < num_top; ++top_id) {//添加属于该层的每个输出blob,最开始的data层,没有输入,因此实际先AppendTop
      AppendTop(param, layer_id, top_id, &available_blobs, &blob_name_to_idx);
      if (layer_param.type() == "Input") {//如果层类型是Input,就将该层作为网络输入
        const int blob_id = blobs_.size() - 1;
        net_input_blob_indices_.push_back(blob_id);//将该层所有输出blob_id压入net_input_blob_indices_
        net_input_blobs_.push_back(blobs_[blob_id].get());//将该层所有指向输出数据的指针放进net_input_blobs_
      }
    }
    Layer<Dtype>* layer = layers_[layer_id].get();//layer指向当前层
    if (layer->AutoTopBlobs()) {//如果该层设置的AutoTopBlobs()为true,则如果层参数给的输出blob个数少于设置(比如ExactNumTopBlobs(),MinTopBlobs()之类)的个数,则开辟空间
      const int needed_num_top =
          std::max(layer->MinTopBlobs(), layer->ExactNumTopBlobs());
      for (; num_top < needed_num_top; ++num_top) {
        AppendTop(param, layer_id, num_top, NULL, NULL);//添加匿名Blob,不更新available_blobs和blob_name_to_idx,因为我们不想让这些未知层被其他层所用
      }
    }
    if (share_from_root) {
      // Set up size of top blobs using root_net_
      const vector<Blob<Dtype>*>& base_top = root_net_->top_vecs_[layer_id];
      const vector<Blob<Dtype>*>& this_top = this->top_vecs_[layer_id];
      for (int top_id = 0; top_id < base_top.size(); ++top_id) {
        this_top[top_id]->ReshapeLike(*base_top[top_id]);
        LOG(INFO) << "Created top blob " << top_id << " (shape: "
            << this_top[top_id]->shape_string() <<  ") for shared layer "
            << layer_param.name();
      }
    } else {
      layers_[layer_id]->SetUp(bottom_vecs_[layer_id], top_vecs_[layer_id]);//层连接起来了,进入各层内部的SetUp函数
    }
    LOG_IF(INFO, Caffe::root_solver())
        << "Setting up " << layer_names_[layer_id];
    for (int top_id = 0; top_id < top_vecs_[layer_id].size(); ++top_id) {
      if (blob_loss_weights_.size() <= top_id_vecs_[layer_id][top_id]) {//为blob_loss_weights_开辟空间,并初始化为0
        blob_loss_weights_.resize(top_id_vecs_[layer_id][top_id] + 1, Dtype(0));
      }
      blob_loss_weights_[top_id_vecs_[layer_id][top_id]] = layer->loss(top_id);//将每层loss值放进blob_loss_weights_里面,每个blob有个loss值
      LOG_IF(INFO, Caffe::root_solver())
          << "Top shape: " << top_vecs_[layer_id][top_id]->shape_string();
      if (layer->loss(top_id)) {
        LOG_IF(INFO, Caffe::root_solver())
            << "    with loss weight " << layer->loss(top_id);
      }
      memory_used_ += top_vecs_[layer_id][top_id]->count();//使用的内存为所有输出blob数据的总和
    }
    LOG_IF(INFO, Caffe::root_solver())
        << "Memory required for data: " << memory_used_ * sizeof(Dtype);
    const int param_size = layer_param.param_size();//该层ParamSpec类型参数的个数(该参数里面常用的是lr_mult和decay_mult)
    const int num_param_blobs = layers_[layer_id]->blobs().size();//该层可学习参数种类(权重与偏置等),param_size<=num_param_blobs
    CHECK_LE(param_size, num_param_blobs)
        << "Too many params specified for layer " << layer_param.name();
    ParamSpec default_param_spec;
    for (int param_id = 0; param_id < num_param_blobs; ++param_id) {
      const ParamSpec* param_spec = (param_id < param_size) ?
          &layer_param.param(param_id) : &default_param_spec;
      const bool param_need_backward = param_spec->lr_mult() != 0;//学习率不为0,需要反向传播,反之,不需要
      need_backward |= param_need_backward;//只要有一种参数需要反向传播,则该层需要backward
      layers_[layer_id]->set_param_propagate_down(param_id,
                                                  param_need_backward);//设置该层的该种参数是否需要反向传播
    }
    for (int param_id = 0; param_id < num_param_blobs; ++param_id) {
      AppendParam(param, layer_id, param_id);//对该层的每种参数进行添加
    }
    layer_need_backward_.push_back(need_backward);//最后将该层的是否需要backward信息进行保存
    if (need_backward) {//如果需要反向传播,
      for (int top_id = 0; top_id < top_id_vecs_[layer_id].size(); ++top_id) {
        blob_need_backward_[top_id_vecs_[layer_id][top_id]] = true;//更新 blob_need_backward_(之前压入默认值false)
      }
    }
  }
  set<string> blobs_under_loss;//记录需要反向传播的输入Blob名字
  set<string> blobs_skip_backp;//记录不需要反向传播的输入Blob名字
  for (int layer_id = layers_.size() - 1; layer_id >= 0; --layer_id) {//逐层进行
    bool layer_contributes_loss = false;//判断该层对损失是否有贡献,初始化为没有贡献
    bool layer_skip_propagate_down = true;//判断该层是否需要反向传播,初始化为不需要反向
    for (int top_id = 0; top_id < top_vecs_[layer_id].size(); ++top_id) {//对该层的每个输出
      const string& blob_name = blob_names_[top_id_vecs_[layer_id][top_id]];//获取该输出的名字
      if (layers_[layer_id]->loss(top_id) ||
          (blobs_under_loss.find(blob_name) != blobs_under_loss.end())) {//如果该层存在损失或者该输出存在损失,则将该层对损失设为有贡献
        layer_contributes_loss = true;
      }
      if (blobs_skip_backp.find(blob_name) == blobs_skip_backp.end()) {//如果该输出需要反向传播则将该层设为需要反向传播
        layer_skip_propagate_down = false;
      }
      if (layer_contributes_loss && !layer_skip_propagate_down)//如果该层对损失有贡献且需要bp则退出内层循环
        break;
    }
    if (layer_need_backward_[layer_id] && layer_skip_propagate_down) {//一旦设置了该层不传递梯度(layer_skip_propagate_down),则将该层及其所有输入设为不需要bp
      layer_need_backward_[layer_id] = false;
      for (int bottom_id = 0; bottom_id < bottom_vecs_[layer_id].size();
               ++bottom_id) {
        bottom_need_backward_[layer_id][bottom_id] = false;
      }
    }
    if (!layer_contributes_loss) { layer_need_backward_[layer_id] = false; }//如果该层对损失没有贡献,同样将该层设为不需要bp
    if (Caffe::root_solver()) {
      if (layer_need_backward_[layer_id]) {
        LOG(INFO) << layer_names_[layer_id] << " needs backward computation.";
      } else {
        LOG(INFO) << layer_names_[layer_id]
            << " does not need backward computation.";
      }
    }
    for (int bottom_id = 0; bottom_id < bottom_vecs_[layer_id].size();
         ++bottom_id) {
      if (layer_contributes_loss) {
        const string& blob_name =
            blob_names_[bottom_id_vecs_[layer_id][bottom_id]];
        blobs_under_loss.insert(blob_name);//将需要反向传播的输入Blob名字记录到blobs_under_loss 里面
      } else {
        bottom_need_backward_[layer_id][bottom_id] = false;
      }
      if (!bottom_need_backward_[layer_id][bottom_id]) {
        const string& blob_name =
                   blob_names_[bottom_id_vecs_[layer_id][bottom_id]];
        blobs_skip_backp.insert(blob_name);//将需要反向传播的输入Blob名字记录到blobs_skip_backp里面
      }
    }
  }
  if (param.force_backward()) {//如果设置了强制反向传播,则更新相应标志
    for (int layer_id = 0; layer_id < layers_.size(); ++layer_id) {
      layer_need_backward_[layer_id] = true;
      for (int bottom_id = 0;
           bottom_id < bottom_need_backward_[layer_id].size(); ++bottom_id) {
        bottom_need_backward_[layer_id][bottom_id] =
            bottom_need_backward_[layer_id][bottom_id] ||
            layers_[layer_id]->AllowForceBackward(bottom_id);
        blob_need_backward_[bottom_id_vecs_[layer_id][bottom_id]] =
            blob_need_backward_[bottom_id_vecs_[layer_id][bottom_id]] ||
            bottom_need_backward_[layer_id][bottom_id];
      }
      for (int param_id = 0; param_id < layers_[layer_id]->blobs().size();
           ++param_id) {
        layers_[layer_id]->set_param_propagate_down(param_id, true);
      }
    }
  }
  //最后,available_blobs所有未使用blob,就是那些没有作为其他层输入的blob,被视为网络输出blob
  for (set<string>::iterator it = available_blobs.begin();
      it != available_blobs.end(); ++it) {
    LOG_IF(INFO, Caffe::root_solver())
        << "This network produces output " << *it;
    net_output_blobs_.push_back(blobs_[blob_name_to_idx[*it]].get());
    net_output_blob_indices_.push_back(blob_name_to_idx[*it]);
  }
  for (size_t blob_id = 0; blob_id < blob_names_.size(); ++blob_id) {
    blob_names_index_[blob_names_[blob_id]] = blob_id;
  }
  for (size_t layer_id = 0; layer_id < layer_names_.size(); ++layer_id) {
    layer_names_index_[layer_names_[layer_id]] = layer_id;
  }
  ShareWeights();//共享权重
  debug_info_ = param.debug_info();
  LOG_IF(INFO, Caffe::root_solver()) << "Network initialization done.";
}

template <typename Dtype>
void Net<Dtype>::FilterNet(const NetParameter& param,//将param中的不符合规则的层去掉,过滤后的层保存在param_filtered(比如prototxt文件中出现的include字段)
    NetParameter* param_filtered) {
  NetState net_state(param.state());
  param_filtered->CopyFrom(param);//初始化param_filtered
  param_filtered->clear_layer();//开始把它清空
  for (int i = 0; i < param.layer_size(); ++i) {//逐层判断,include和exclude字段
    const LayerParameter& layer_param = param.layer(i);
    const string& layer_name = layer_param.name();
    CHECK(layer_param.include_size() == 0 || layer_param.exclude_size() == 0)//include和exclude字段不能同时为空
          << "Specify either include rules or exclude rules; not both.";
    bool layer_included = (layer_param.include_size() == 0);
    for (int j = 0; layer_included && j < layer_param.exclude_size(); ++j) {//如果include字段为空(exclude字段就不为空),逐个判断是否满足exclude条件
      if (StateMeetsRule(net_state, layer_param.exclude(j), layer_name)) {
        layer_included = false;//满足exclude条件,则exclude,退出循环
      }
    }
    for (int j = 0; !layer_included && j < layer_param.include_size(); ++j) {//如果exclude字段为空(include字段就不为空),逐个判断是否满足include条件
      if (StateMeetsRule(net_state, layer_param.include(j), layer_name)) {
        layer_included = true;//满足include条件,则include,退出循环
      }
    }
    if (layer_included) {
      param_filtered->add_layer()->CopyFrom(layer_param);//将include的层加入到param_filtered中,即为过滤后的层
    }
  }
}

template <typename Dtype>
bool Net<Dtype>::StateMeetsRule(const NetState& state,
    const NetStateRule& rule, const string& layer_name) {//判断state符不符合rule条件
  if (rule.has_phase()) {//如果阶段不一样,则不符合,比如train net中出现test
      if (rule.phase() != state.phase()) {
        LOG_IF(INFO, Caffe::root_solver())
            << "The NetState phase (" << state.phase()
            << ") differed from the phase (" << rule.phase()
            << ") specified by a rule in layer " << layer_name;
        return false;
      }
  }
  if (rule.has_min_level()) {//根据min_level
    if (state.level() < rule.min_level()) {
      LOG_IF(INFO, Caffe::root_solver())
          << "The NetState level (" << state.level()
          << ") is above the min_level (" << rule.min_level()
          << ") specified by a rule in layer " << layer_name;
      return false;
    }
  }
  if (rule.has_max_level()) {//根据maxin_level
    if (state.level() > rule.max_level()) {
      LOG_IF(INFO, Caffe::root_solver())
          << "The NetState level (" << state.level()
          << ") is above the max_level (" << rule.max_level()
          << ") specified by a rule in layer " << layer_name;
      return false;
    }
  }
  // Check whether the rule is broken due to stage. The NetState must
  // contain ALL of the rule's stages to meet it.
  for (int i = 0; i < rule.stage_size(); ++i) {
    // Check that the NetState contains the rule's ith stage.
    bool has_stage = false;
    for (int j = 0; !has_stage && j < state.stage_size(); ++j) {
      if (rule.stage(i) == state.stage(j)) { has_stage = true; }
    }
    if (!has_stage) {
      LOG_IF(INFO, Caffe::root_solver())
          << "The NetState did not contain stage '" << rule.stage(i)
          << "' specified by a rule in layer " << layer_name;
      return false;
    }
  }
  // Check whether the rule is broken due to not_stage. The NetState must
  // contain NONE of the rule's not_stages to meet it.
  for (int i = 0; i < rule.not_stage_size(); ++i) {
    // Check that the NetState contains the rule's ith not_stage.
    bool has_stage = false;
    for (int j = 0; !has_stage && j < state.stage_size(); ++j) {
      if (rule.not_stage(i) == state.stage(j)) { has_stage = true; }
    }
    if (has_stage) {
      LOG_IF(INFO, Caffe::root_solver())
          << "The NetState contained a not_stage '" << rule.not_stage(i)
          << "' specified by a rule in layer " << layer_name;
      return false;
    }
  }
  return true;
}

// Helper for Net::Init: add a new top blob to the net.
template <typename Dtype>
void Net<Dtype>::AppendTop(const NetParameter& param, const int layer_id,
                           const int top_id, set<string>* available_blobs,
                           map<string, int>* blob_name_to_idx) {//在某层的某个位置添加新的输出
  shared_ptr<LayerParameter> layer_param(
      new LayerParameter(param.layer(layer_id)));//为层具体分配内存,layer_param表示该层
  const string& blob_name = (layer_param->top_size() > top_id) ?//获取名字,top装的是输出blob的名字
      layer_param->top(top_id) : "(automatic)";
  if (blob_name_to_idx && layer_param->bottom_size() > top_id &&
      blob_name == layer_param->bottom(top_id)) {// In-place 计算,输入与输出是同一个blob
    LOG_IF(INFO, Caffe::root_solver())
        << layer_param->name() << " -> " << blob_name << " (in-place)";
    top_vecs_[layer_id].push_back(blobs_[(*blob_name_to_idx)[blob_name]].get());
    top_id_vecs_[layer_id].push_back((*blob_name_to_idx)[blob_name]);
  } else if (blob_name_to_idx &&
             blob_name_to_idx->find(blob_name) != blob_name_to_idx->end()) {//如果没有进行In-place 计算,但有重复的blob则会报错
    LOG(FATAL) << "Top blob '" << blob_name
               << "' produced by multiple sources.";
  } else {//正常情况下
    if (Caffe::root_solver()) {
      LOG(INFO) << layer_param->name() << " -> " << blob_name;
    }
    shared_ptr<Blob<Dtype> > blob_pointer(new Blob<Dtype>());//开辟指针
    const int blob_id = blobs_.size();//每个blob,都会压入一个blob指针,刚开始没有,blob_id为0,之后累加
    blobs_.push_back(blob_pointer);//将指针压入blobs_
    blob_names_.push_back(blob_name);//将名字压入blob_names_
    blob_need_backward_.push_back(false);将该输出blob初始化为不需要backward,之后如果该层需要back,则会更新为true
    if (blob_name_to_idx) { (*blob_name_to_idx)[blob_name] = blob_id; }//将blob_name与blob_id放到blob_name_to_idx里面
    top_id_vecs_[layer_id].push_back(blob_id);//将blob_id放到该层的top_id_vecs_里面
    top_vecs_[layer_id].push_back(blob_pointer.get());//将指向具体blob数据的指针放到该层的top_vecs_里面
  }
  if (available_blobs) { available_blobs->insert(blob_name); }//把该blob名字存到available_blobs
}
template <typename Dtype>
int Net<Dtype>::AppendBottom(const NetParameter& param, const int layer_id,//在某层的某个位置添加新的输入,类似于多输入单输出,你要再加个输入,实际上中间值的存储在appendtop的过程中,appendbottom过程只是调用,因为这一层的输入是上一层的输出
    const int bottom_id, set<string>* available_blobs,
    map<string, int>* blob_name_to_idx) {
  const LayerParameter& layer_param = param.layer(layer_id);
  const string& blob_name = layer_param.bottom(bottom_id);//抽取该输入blob的名字
  if (available_blobs->find(blob_name) == available_blobs->end()) {//根据名字从所有可用blob中查找,找不到则报错,这一层的输入是上一层的输出
    LOG(FATAL) << "Unknown bottom blob '" << blob_name << "' (layer '"
               << layer_param.name() << "', bottom index " << bottom_id << ")";
  }
  const int blob_id = (*blob_name_to_idx)[blob_name];//根据该blob的名字获取id
  LOG_IF(INFO, Caffe::root_solver())
      << layer_names_[layer_id] << " <- " << blob_name;
  bottom_vecs_[layer_id].push_back(blobs_[blob_id].get());//将指向实际数据的指针放进该层的bottom_vecs_里
  bottom_id_vecs_[layer_id].push_back(blob_id);//将blob_id放进该层的bottom_id_vecs_里
  available_blobs->erase(blob_name);//用过的blob则删除
  bool need_backward = blob_need_backward_[blob_id];
  // Check if the backpropagation on bottom_id should be skipped
  if (layer_param.propagate_down_size() > 0) {
    need_backward = layer_param.propagate_down(bottom_id);//根据propagate_down参数,判断该该输入是否需要backward
  }
  bottom_need_backward_[layer_id].push_back(need_backward);//将结果保存到bottom_need_backward_里面
  return blob_id;
}

template <typename Dtype>
void Net<Dtype>::AppendParam(const NetParameter& param, const int layer_id,
                             const int param_id) {//加载层参数
  const LayerParameter& layer_param = layers_[layer_id]->layer_param();
  const int param_size = layer_param.param_size();
  string param_name =
      (param_size > param_id) ? layer_param.param(param_id).name() : "";//记录该种参数的name(在message ParamSpec中定义)
  if (param_name.size()) {
    param_display_names_.push_back(param_name);//如果不是空字符串,则压入param_display_names_
  } else {
    ostringstream param_display_name;//如果是空字符串,则将param_id转换为字符串压入param_display_names_
    param_display_name << param_id;
    param_display_names_.push_back(param_display_name.str());
  }
  const int net_param_id = params_.size();//net_param_id为参数全局id,开始为0.慢慢累加,param_id为每层的局部id
  params_.push_back(layers_[layer_id]->blobs()[param_id]);//将该层第param_id参数指向的blobs()指针压入params_(params_.size()就会加1),其中blobs()装的是每个层的可学习参数
  param_id_vecs_[layer_id].push_back(net_param_id);//将参数全局id压入param_id_vecs_
  param_layer_indices_.push_back(make_pair(layer_id, param_id));//将当前层id与参数局部id一一对应,放进param_layer_indices_,下标为net_param_id
  ParamSpec default_param_spec;
  const ParamSpec* param_spec = (layer_param.param_size() > param_id) ?
      &layer_param.param(param_id) : &default_param_spec;
  if (!param_size || !param_name.size() || (param_name.size() &&
      param_names_index_.find(param_name) == param_names_index_.end())) {//不共享参数的情况,这个时候 learnable_params_与params_存储一致
    param_owners_.push_back(-1);//表示当前层就是该参数的"owner"  
    if (param_name.size()) {
      param_names_index_[param_name] = net_param_id;//如果param_name不是空字符串,记录param_name及其owner id(net_param_id),存入param_names_index_里,为了在不同层之间共享参数,之后根据名字找到owner
    }
    const int learnable_param_id = learnable_params_.size();//learnable_params_用来装可学习参数
    learnable_params_.push_back(params_[net_param_id].get());//指向可学习参数数据的指针(从整个网络来看,而不是每一层)
    learnable_param_ids_.push_back(learnable_param_id);//可学习参数全局id
    has_params_lr_.push_back(param_spec->has_lr_mult());//是否有学习率参数
    has_params_decay_.push_back(param_spec->has_decay_mult());//是否有衰减率参数
    params_lr_.push_back(param_spec->lr_mult());//获取学习率参数
    params_weight_decay_.push_back(param_spec->decay_mult());//获取衰减率参数
  } else {//共享参数的情况,根据param_name来共享,这个时候 learnable_params_与params_存储不一致,前者存指向不重复的参数的指针(重复不压入,只将id保存),后者保存所以层参数指针
    const int owner_net_param_id = param_names_index_[param_name];//根据param_name找到其owner id
    param_owners_.push_back(owner_net_param_id);//将该id作为该参数的owners
    const pair<int, int>& owner_index =
        param_layer_indices_[owner_net_param_id];//获取该owner 对应的层和局部参数id
    const int owner_layer_id = owner_index.first;
    const int owner_param_id = owner_index.second;
    LOG_IF(INFO, Caffe::root_solver()) << "Sharing parameters '" << param_name
        << "' owned by "
        << "layer '" << layer_names_[owner_layer_id] << "', param "
        << "index " << owner_param_id;
    Blob<Dtype>* this_blob = layers_[layer_id]->blobs()[param_id].get();//获取该层该参数对应的可学习参数指针
    Blob<Dtype>* owner_blob =
        layers_[owner_layer_id]->blobs()[owner_param_id].get();//获取该owner对应的可学习参数指针
    const int param_size = layer_param.param_size();
    if (param_size > param_id && (layer_param.param(param_id).share_mode() ==
                                  ParamSpec_DimCheckMode_PERMISSIVE)) {//仅仅检查总数目是不是相等,相等才可以共享
      CHECK_EQ(this_blob->count(), owner_blob->count())
          << "Cannot share param '" << param_name << "' owned by layer '"
          << layer_names_[owner_layer_id] << "' with layer '"
          << layer_names_[layer_id] << "'; count mismatch.  Owner layer param "
          << "shape is " << owner_blob->shape_string() << "; sharing layer "
          << "shape is " << this_blob->shape_string();
    } else {
     //严格检查每一维度是不是相等,相等才可以共享
      CHECK(this_blob->shape() == owner_blob->shape())
          << "Cannot share param '" << param_name << "' owned by layer '"
          << layer_names_[owner_layer_id] << "' with layer '"
          << layer_names_[layer_id] << "'; shape mismatch.  Owner layer param "
          << "shape is " << owner_blob->shape_string() << "; sharing layer "
          << "expects shape " << this_blob->shape_string();
    }
    const int learnable_param_id = learnable_param_ids_[owner_net_param_id];
    learnable_param_ids_.push_back(learnable_param_id);//将其owner id压入
    if (param_spec->has_lr_mult()) {//有学习率参数
      if (has_params_lr_[learnable_param_id]) {//其owner有学习率参数,要相等才可以
        CHECK_EQ(param_spec->lr_mult(), params_lr_[learnable_param_id])
            << "Shared param '" << param_name << "' has mismatched lr_mult.";
      } else {
        has_params_lr_[learnable_param_id] = true;
        params_lr_[learnable_param_id] = param_spec->lr_mult();//赋值
      }
    }
    if (param_spec->has_decay_mult()) {//同上
      if (has_params_decay_[learnable_param_id]) {
        CHECK_EQ(param_spec->decay_mult(),
                 params_weight_decay_[learnable_param_id])
            << "Shared param '" << param_name << "' has mismatched decay_mult.";
      } else {
        has_params_decay_[learnable_param_id] = true;
        params_weight_decay_[learnable_param_id] = param_spec->decay_mult();
      }
    }
  }
} 
template <typename Dtype>
Dtype Net<Dtype>::ForwardFromTo(int start, int end) {//从start 传播到 end 为止
  CHECK_GE(start, 0);
  CHECK_LT(end, layers_.size());
  Dtype loss = 0;
  for (int i = start; i <= end; ++i) {
    // LOG(ERROR) << "Forwarding " << layer_names_[i];
    Dtype layer_loss = layers_[i]->Forward(bottom_vecs_[i], top_vecs_[i]);//进入到层内部的Forward函数
    loss += layer_loss;//将每一层的loss之和
    if (debug_info_) { ForwardDebugInfo(i); }
  }
  return loss;
}
template <typename Dtype>
Dtype Net<Dtype>::ForwardFrom(int start) {//从start 传播到 最后
  return ForwardFromTo(start, layers_.size() - 1);
}
template <typename Dtype>
Dtype Net<Dtype>::ForwardTo(int end) {//从开始传播到end
  return ForwardFromTo(0, end);
}
template <typename Dtype>
const vector<Blob<Dtype>*>& Net<Dtype>::Forward(Dtype* loss) {//传播结束后获取网络输出并保存损失
  if (loss != NULL) {
    *loss = ForwardFromTo(0, layers_.size() - 1);
  } else {
    ForwardFromTo(0, layers_.size() - 1);
  }
  return net_output_blobs_;
}
template <typename Dtype>
const vector<Blob<Dtype>*>& Net<Dtype>::Forward(
    const vector<Blob<Dtype>*> & bottom, Dtype* loss) {//不建议使用,用上面的
  LOG_EVERY_N(WARNING, 1000) << "DEPRECATED: Forward(bottom, loss) "
      << "will be removed in a future version. Use Forward(loss).";
  // Copy bottom to net bottoms
  for (int i = 0; i < bottom.size(); ++i) {
    net_input_blobs_[i]->CopyFrom(*bottom[i]);
  }
  return Forward(loss);
}
template <typename Dtype>
void Net<Dtype>::BackwardFromTo(int start, int end) {//进入到层内部的Backward,从start到end
  CHECK_GE(end, 0);
  CHECK_LT(start, layers_.size());
  for (int i = start; i >= end; --i) {
    if (layer_need_backward_[i]) {
      layers_[i]->Backward(
          top_vecs_[i], bottom_need_backward_[i], bottom_vecs_[i]);
      if (debug_info_) { BackwardDebugInfo(i); }
    }
  }
}
template <typename Dtype>
void Net<Dtype>::ForwardDebugInfo(const int layer_id) {//打印给定层前向传播的调试信息
  for (int top_id = 0; top_id < top_vecs_[layer_id].size(); ++top_id) {
    const Blob<Dtype>& blob = *top_vecs_[layer_id][top_id];
    const string& blob_name = blob_names_[top_id_vecs_[layer_id][top_id]];
    const Dtype data_abs_val_mean = blob.asum_data() / blob.count();
    LOG_IF(INFO, Caffe::root_solver())
        << "    [Forward] "
        << "Layer " << layer_names_[layer_id]
        << ", top blob " << blob_name
        << " data: " << data_abs_val_mean;
  }
  for (int param_id = 0; param_id < layers_[layer_id]->blobs().size();
       ++param_id) {
    const Blob<Dtype>& blob = *layers_[layer_id]->blobs()[param_id];
    const int net_param_id = param_id_vecs_[layer_id][param_id];
    const string& blob_name = param_display_names_[net_param_id];
    const Dtype data_abs_val_mean = blob.asum_data() / blob.count();
    LOG_IF(INFO, Caffe::root_solver())
        << "    [Forward] "
        << "Layer " << layer_names_[layer_id]
        << ", param blob " << blob_name
        << " data: " << data_abs_val_mean;
  }
}
template <typename Dtype>
void Net<Dtype>::BackwardDebugInfo(const int layer_id) {//打印给定层后向传播的调试信息
  const vector<Blob<Dtype>*>& bottom_vec = bottom_vecs_[layer_id];
  for (int bottom_id = 0; bottom_id < bottom_vec.size(); ++bottom_id) {
    if (!bottom_need_backward_[layer_id][bottom_id]) { continue; }
    const Blob<Dtype>& blob = *bottom_vec[bottom_id];
    const string& blob_name = blob_names_[bottom_id_vecs_[layer_id][bottom_id]];
    const Dtype diff_abs_val_mean = blob.asum_diff() / blob.count();
    LOG_IF(INFO, Caffe::root_solver())
        << "    [Backward] "
        << "Layer " << layer_names_[layer_id]
        << ", bottom blob " << blob_name
        << " diff: " << diff_abs_val_mean;
  }
  for (int param_id = 0; param_id < layers_[layer_id]->blobs().size();
       ++param_id) {
    if (!layers_[layer_id]->param_propagate_down(param_id)) { continue; }
    const Blob<Dtype>& blob = *layers_[layer_id]->blobs()[param_id];
    const Dtype diff_abs_val_mean = blob.asum_diff() / blob.count();
    LOG_IF(INFO, Caffe::root_solver())
        << "    [Backward] "
        << "Layer " << layer_names_[layer_id]
        << ", param blob " << param_id
        << " diff: " << diff_abs_val_mean;
  }
}
template <typename Dtype>
void Net<Dtype>::UpdateDebugInfo(const int param_id) {//打印更新过程的调试信息
  const Blob<Dtype>& blob = *params_[param_id];
  const int param_owner = param_owners_[param_id];
  const string& layer_name = layer_names_[param_layer_indices_[param_id].first];
  const string& param_display_name = param_display_names_[param_id];
  const Dtype diff_abs_val_mean = blob.asum_diff() / blob.count();
  if (param_owner < 0) {
    const Dtype data_abs_val_mean = blob.asum_data() / blob.count();
    LOG_IF(INFO, Caffe::root_solver())
        << "    [Update] Layer " << layer_name
        << ", param " << param_display_name
        << " data: " << data_abs_val_mean
        << "; diff: " << diff_abs_val_mean;
  } else {
    const string& owner_layer_name =
        layer_names_[param_layer_indices_[param_owner].first];
    LOG_IF(INFO, Caffe::root_solver())
        << "    [Update] Layer " << layer_name
        << ", param blob " << param_display_name
        << " (owned by layer " << owner_layer_name << ", " << "param "
        << param_display_names_[param_owners_[param_id]] << ")"
        << " diff: " << diff_abs_val_mean;
  }
}
template <typename Dtype>
void Net<Dtype>::ShareTrainedLayersWith(const Net* other) {//从其他网络中加载参数
  int num_source_layers = other->layers().size();
  for (int i = 0; i < num_source_layers; ++i) {
    Layer<Dtype>* source_layer = other->layers()[i].get();
    const string& source_layer_name = other->layer_names()[i];
    int target_layer_id = 0;
    while (target_layer_id != layer_names_.size() &&
        layer_names_[target_layer_id] != source_layer_name) {
      ++target_layer_id;
    }
    if (target_layer_id == layer_names_.size()) {
      LOG(INFO) << "Ignoring source layer " << source_layer_name;
      continue;
    }
    DLOG(INFO) << "Copying source layer " << source_layer_name;
    vector<shared_ptr<Blob<Dtype> > >& target_blobs =
        layers_[target_layer_id]->blobs();
    CHECK_EQ(target_blobs.size(), source_layer->blobs().size())
        << "Incompatible number of blobs for layer " << source_layer_name;
    for (int j = 0; j < target_blobs.size(); ++j) {
      Blob<Dtype>* source_blob = source_layer->blobs()[j].get();
      CHECK(target_blobs[j]->shape() == source_blob->shape())
          << "Cannot share param " << j << " weights from layer '"
          << source_layer_name << "'; shape mismatch.  Source param shape is "
          << source_blob->shape_string() << "; target param shape is "
          << target_blobs[j]->shape_string();
      target_blobs[j]->ShareData(*source_blob);
    }
  }
}
template <typename Dtype>
void Net<Dtype>::BackwardFrom(int start) {
  BackwardFromTo(start, 0);
}
template <typename Dtype>
void Net<Dtype>::BackwardTo(int end) {
  BackwardFromTo(layers_.size() - 1, end);
}
template <typename Dtype>
void Net<Dtype>::Backward() {
  BackwardFromTo(layers_.size() - 1, 0);
  if (debug_info_) {
    Dtype asum_data = 0, asum_diff = 0, sumsq_data = 0, sumsq_diff = 0;
    for (int i = 0; i < learnable_params_.size(); ++i) {
      asum_data += learnable_params_[i]->asum_data();
      asum_diff += learnable_params_[i]->asum_diff();
      sumsq_data += learnable_params_[i]->sumsq_data();
      sumsq_diff += learnable_params_[i]->sumsq_diff();
    }
    const Dtype l2norm_data = std::sqrt(sumsq_data);
    const Dtype l2norm_diff = std::sqrt(sumsq_diff);
    LOG(ERROR) << "    [Backward] All net params (data, diff): "
               << "L1 norm = (" << asum_data << ", " << asum_diff << "); "
               << "L2 norm = (" << l2norm_data << ", " << l2norm_diff << ")";
  }
}
template <typename Dtype>
void Net<Dtype>::Reshape() {//塑形
  for (int i = 0; i < layers_.size(); ++i) {
    layers_[i]->Reshape(bottom_vecs_[i], top_vecs_[i]);
  }
}
template <typename Dtype>
void Net<Dtype>::CopyTrainedLayersFrom(const NetParameter& param) {//从给定网络参数中加载参数
  int num_source_layers = param.layer_size();//给定网络中层的数量
  for (int i = 0; i < num_source_layers; ++i) {//对于每一层
    const LayerParameter& source_layer = param.layer(i);
    const string& source_layer_name = source_layer.name();//层的名字
    int target_layer_id = 0;
    while (target_layer_id != layer_names_.size() &&
        layer_names_[target_layer_id] != source_layer_name) {//如果网络中没有给定网络中的层,则进行下一层寻找
      ++target_layer_id;
    }
    if (target_layer_id == layer_names_.size()) {
      LOG(INFO) << "Ignoring source layer " << source_layer_name;
      continue;
    }
    DLOG(INFO) << "Copying source layer " << source_layer_name;
    vector<shared_ptr<Blob<Dtype> > >& target_blobs =
        layers_[target_layer_id]->blobs();
    CHECK_EQ(target_blobs.size(), source_layer.blobs_size())
        << "Incompatible number of blobs for layer " << source_layer_name;
    for (int j = 0; j < target_blobs.size(); ++j) {
      if (!target_blobs[j]->ShapeEquals(source_layer.blobs(j))) {
        Blob<Dtype> source_blob;
        const bool kReshape = true;
        source_blob.FromProto(source_layer.blobs(j), kReshape);
        LOG(FATAL) << "Cannot copy param " << j << " weights from layer '"
            << source_layer_name << "'; shape mismatch.  Source param shape is "
            << source_blob.shape_string() << "; target param shape is "
            << target_blobs[j]->shape_string() << ". "
            << "To learn this layer's parameters from scratch rather than "
            << "copying from a saved net, rename the layer.";
      }
      const bool kReshape = false;
      target_blobs[j]->FromProto(source_layer.blobs(j), kReshape);
    }
  }
}
template <typename Dtype>
void Net<Dtype>::CopyTrainedLayersFrom(const string trained_filename) {//从文件中加载参数,根据文件类型,调用各自函数加载参数
  if (trained_filename.size() >= 3 &&
      trained_filename.compare(trained_filename.size() - 3, 3, ".h5") == 0) {
    CopyTrainedLayersFromHDF5(trained_filename);
  } else {
    CopyTrainedLayersFromBinaryProto(trained_filename);
  }
}
template <typename Dtype>
void Net<Dtype>::CopyTrainedLayersFromBinaryProto(//从proto文件中加载参数
    const string trained_filename) {
  NetParameter param;
  ReadNetParamsFromBinaryFileOrDie(trained_filename, &param);
  CopyTrainedLayersFrom(param);
}
template <typename Dtype>
void Net<Dtype>::CopyTrainedLayersFromHDF5(const string trained_filename) {//从HDF5文件中加载参数
  hid_t file_hid = H5Fopen(trained_filename.c_str(), H5F_ACC_RDONLY,
                           H5P_DEFAULT);
  CHECK_GE(file_hid, 0) << "Couldn't open " << trained_filename;
  hid_t data_hid = H5Gopen2(file_hid, "data", H5P_DEFAULT);
  CHECK_GE(data_hid, 0) << "Error reading weights from " << trained_filename;
  int num_layers = hdf5_get_num_links(data_hid);
  for (int i = 0; i < num_layers; ++i) {
    string source_layer_name = hdf5_get_name_by_idx(data_hid, i);
    if (!layer_names_index_.count(source_layer_name)) {
      LOG(INFO) << "Ignoring source layer " << source_layer_name;
      continue;
    }
    int target_layer_id = layer_names_index_[source_layer_name];
    DLOG(INFO) << "Copying source layer " << source_layer_name;
    vector<shared_ptr<Blob<Dtype> > >& target_blobs =
        layers_[target_layer_id]->blobs();
    hid_t layer_hid = H5Gopen2(data_hid, source_layer_name.c_str(),
        H5P_DEFAULT);
    CHECK_GE(layer_hid, 0)
        << "Error reading weights from " << trained_filename;
    // Check that source layer doesn't have more params than target layer
    int num_source_params = hdf5_get_num_links(layer_hid);
    CHECK_LE(num_source_params, target_blobs.size())
        << "Incompatible number of blobs for layer " << source_layer_name;
    for (int j = 0; j < target_blobs.size(); ++j) {
      ostringstream oss;
      oss << j;
      string dataset_name = oss.str();
      int target_net_param_id = param_id_vecs_[target_layer_id][j];
      if (!H5Lexists(layer_hid, dataset_name.c_str(), H5P_DEFAULT)) {
        // Target param doesn't exist in source weights...
        if (param_owners_[target_net_param_id] != -1) {
          // ...but it's weight-shared in target, so that's fine.
          continue;
        } else {
          LOG(FATAL) << "Incompatible number of blobs for layer "
              << source_layer_name;
        }
      }
      hdf5_load_nd_dataset(layer_hid, dataset_name.c_str(), 0, kMaxBlobAxes,
          target_blobs[j].get());
    }
    H5Gclose(layer_hid);
  }
  H5Gclose(data_hid);
  H5Fclose(file_hid);
}
template <typename Dtype>
void Net<Dtype>::ToProto(NetParameter* param, bool write_diff) const {//序列化到proto buf
  param->Clear();
  param->set_name(name_);
  DLOG(INFO) << "Serializing " << layers_.size() << " layers";
  for (int i = 0; i < layers_.size(); ++i) {
    LayerParameter* layer_param = param->add_layer();
    layers_[i]->ToProto(layer_param, write_diff);
  }
}
template <typename Dtype>
void Net<Dtype>::ToHDF5(const string& filename, bool write_diff) const {//转换为HDF5文件
  hid_t file_hid = H5Fcreate(filename.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT,
      H5P_DEFAULT);
  CHECK_GE(file_hid, 0)
      << "Couldn't open " << filename << " to save weights.";
  hid_t data_hid = H5Gcreate2(file_hid, "data", H5P_DEFAULT, H5P_DEFAULT,
      H5P_DEFAULT);
  CHECK_GE(data_hid, 0) << "Error saving weights to " << filename << ".";
  hid_t diff_hid = -1;
  if (write_diff) {
    diff_hid = H5Gcreate2(file_hid, "diff", H5P_DEFAULT, H5P_DEFAULT,
        H5P_DEFAULT);
    CHECK_GE(diff_hid, 0) << "Error saving weights to " << filename << ".";
  }
  for (int layer_id = 0; layer_id < layers_.size(); ++layer_id) {
    const LayerParameter& layer_param = layers_[layer_id]->layer_param();
    string layer_name = layer_param.name();
    hid_t layer_data_hid = H5Gcreate2(data_hid, layer_name.c_str(),
        H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
    CHECK_GE(layer_data_hid, 0)
        << "Error saving weights to " << filename << ".";
    hid_t layer_diff_hid = -1;
    if (write_diff) {
      layer_diff_hid = H5Gcreate2(diff_hid, layer_name.c_str(),
          H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
      CHECK_GE(layer_diff_hid, 0)
          << "Error saving weights to " << filename << ".";
    }
    int num_params = layers_[layer_id]->blobs().size();
    for (int param_id = 0; param_id < num_params; ++param_id) {
      ostringstream dataset_name;
      dataset_name << param_id;
      const int net_param_id = param_id_vecs_[layer_id][param_id];
      if (param_owners_[net_param_id] == -1) {
        // Only save params that own themselves
        hdf5_save_nd_dataset<Dtype>(layer_data_hid, dataset_name.str(),
            *params_[net_param_id]);
      }
      if (write_diff) {
        // Write diffs regardless of weight-sharing
        hdf5_save_nd_dataset<Dtype>(layer_diff_hid, dataset_name.str(),
            *params_[net_param_id], true);
      }
    }
    H5Gclose(layer_data_hid);
    if (write_diff) {
      H5Gclose(layer_diff_hid);
    }
  }
  H5Gclose(data_hid);
  if (write_diff) {
    H5Gclose(diff_hid);
  }
  H5Fclose(file_hid);
}
template <typename Dtype>
void Net<Dtype>::Update() {//更新可学习参数
  for (int i = 0; i < learnable_params_.size(); ++i) {
    learnable_params_[i]->Update();
  }
}
template <typename Dtype>
void Net<Dtype>::ClearParamDiffs() {//重置参数梯度
  for (int i = 0; i < learnable_params_.size(); ++i) {
    Blob<Dtype>* blob = learnable_params_[i];
    switch (Caffe::mode()) {
    case Caffe::CPU:
      caffe_set(blob->count(), static_cast<Dtype>(0),
                blob->mutable_cpu_diff());
      break;
    case Caffe::GPU:
#ifndef CPU_ONLY
      caffe_gpu_set(blob->count(), static_cast<Dtype>(0),
                    blob->mutable_gpu_diff());
#else
      NO_GPU;
#endif
      break;
    }
  }
}
template <typename Dtype>
void Net<Dtype>::ShareWeights() {//共享权重
  for (int i = 0; i < params_.size(); ++i) {
    if (param_owners_[i] < 0) { continue; }//该参数属于当前层,不需要共享
    params_[i]->ShareData(*params_[param_owners_[i]]);//共享参数数据
    params_[i]->ShareDiff(*params_[param_owners_[i]]);//共享参数梯度
  }
}
template <typename Dtype>
bool Net<Dtype>::has_blob(const string& blob_name) const {//根据名字查看是否包括该blob
  return blob_names_index_.find(blob_name) != blob_names_index_.end();
}
template <typename Dtype>
const shared_ptr<Blob<Dtype> > Net<Dtype>::blob_by_name(
    const string& blob_name) const {//如果有该blob的话,则获取该blob的指针
  shared_ptr<Blob<Dtype> > blob_ptr;
  if (has_blob(blob_name)) {
    blob_ptr = blobs_[blob_names_index_.find(blob_name)->second];
  } else {
    blob_ptr.reset((Blob<Dtype>*)(NULL));
    LOG(WARNING) << "Unknown blob name " << blob_name;
  }
  return blob_ptr;
}
template <typename Dtype>
bool Net<Dtype>::has_layer(const string& layer_name) const {//根据层名查看网络中是否有该层
  return layer_names_index_.find(layer_name) != layer_names_index_.end();
}
template <typename Dtype>
const shared_ptr<Layer<Dtype> > Net<Dtype>::layer_by_name(
    const string& layer_name) const {//如果有的话,则返回指向该层的指针
  shared_ptr<Layer<Dtype> > layer_ptr;
  if (has_layer(layer_name)) {
    layer_ptr = layers_[layer_names_index_.find(layer_name)->second];
  } else {
    layer_ptr.reset((Layer<Dtype>*)(NULL));
    LOG(WARNING) << "Unknown layer name " << layer_name;
  }
  return layer_ptr;
}

INSTANTIATE_CLASS(Net);

}  // namespace caffe
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值