caffe源码解读(4)-concate_layer.cpp以及slice_layer.cpp

一.Concate

作用:实现多个输入数据的拼接
输入:x1,x2,…,xk 输出:y
x1: N*C*H*W
x2: N*C*H*W
xk: N*C*H*W
y: kN*C*H*W(concate_dim=0)
y: N*kC*H*W(concate_dim=1)
参数:两个作用相同 ①axis ②concate_dim

(1)caffe.proto层参数定义

message ConcatParameter {
 //指定拼接的维度,默认为1:channel;支持负索引:-1表示最后一个维度。
  optional int32 axis = 2 [default = 1];
  //作用同axis一样,但不能指定为负索引。
  optional uint32 concat_dim = 1 [default = 1];
}

(2)LayerSetUp

template <typename Dtype>
void ConcatLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
      //给出axis以及concate_dim参数信息
  const ConcatParameter& concat_param = this->layer_param_.concat_param();
  CHECK(!(concat_param.has_axis() && concat_param.has_concat_dim()))
      << "Either axis or concat_dim should be specified; not both.";
}

(3)Reshape

template <typename Dtype>
void ConcatLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
  //获取输入维度:num_axis
  //获取拼接参数:concate_param
  const int num_axes = bottom[0]->num_axes();
  const ConcatParameter& concat_param = this->layer_param_.concat_param();
  if (concat_param.has_concat_dim()) {
    //获取指定拼接维度:concate_dim
    concat_axis_ = static_cast<int>(concat_param.concat_dim());
    // Don't allow negative indexing for concat_dim, a uint32 -- almost
    // certainly unintended.
    //CHECK_GE(≥):拼接维度≥0,不支持负索引。
    CHECK_GE(concat_axis_, 0) << "casting concat_dim from uint32 to int32 "
        << "produced negative result; concat_dim must satisfy "
        << "0 <= concat_dim < " << kMaxBlobAxes;
    //CHECK_LT(<):拼接维度<输入维度
    CHECK_LT(concat_axis_, num_axes) << "concat_dim out of range.";
  } else {否则,以channel维度进行拼接
   //标准化参数索引
    concat_axis_ = bottom[0]->CanonicalAxisIndex(concat_param.axis());
  }
  // Initialize with the first blob.
  //初始化输出top_shape与输入shape大小一致
  vector<int> top_shape = bottom[0]->shape();//输出top和输入bottom的shape相同
  num_concats_ = bottom[0]->count(0, concat_axis_);//拼接的数目:num_concats_
  concat_input_size_ = bottom[0]->count(concat_axis_ + 1);
  int bottom_count_sum = bottom[0]->count();
  for (int i = 1; i < bottom.size(); ++i) {
    CHECK_EQ(num_axes, bottom[i]->num_axes())//判断输入维度num_axes是否一致
        << "All inputs must have the same #axes.";
            for (int j = 0; j < num_axes; ++j) {
    //除去待拼接的维度concate_axis_外,其他维度大小是否相同
      if (j == concat_axis_) { continue; }
      CHECK_EQ(top_shape[j], bottom[i]->shape(j))
          << "All inputs must have the same shape, except at concat_axis.";
    }
    bottom_count_sum += bottom[i]->count();//累加第i个输入的个数
    top_shape[concat_axis_] += bottom[i]->shape(concat_axis_);//累加到输出top
  }
  top[0]->Reshape(top_shape);//Reshape输出top
  CHECK_EQ(bottom_count_sum, top[0]->count());
  if (bottom.size() == 1) {
    //只有一个输入bottom[0],直接复制成输出top[0]
    //梯度shape也和输入bottom[0]一致
    top[0]->ShareData(*bottom[0]);
    top[0]->ShareDiff(*bottom[0]);
  }
}

(2)Forward前向传播

template <typename Dtype>
void ConcatLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
  if (bottom.size() == 1) { return; }
  Dtype* top_data = top[0]->mutable_cpu_data();
  int offset_concat_axis = 0;
  const int top_concat_axis = top[0]->shape(concat_axis_);
  for (int i = 0; i < bottom.size(); ++i) {//遍历所有bottom
    const Dtype* bottom_data = bottom[i]->cpu_data();
    const int bottom_concat_axis = bottom[i]->shape(concat_axis_);
    for (int n = 0; n < num_concats_; ++n) {
      //bottom复制给top
      caffe_copy(bottom_concat_axis * concat_input_size_,
          bottom_data + n * bottom_concat_axis * concat_input_size_,
          top_data + (n * top_concat_axis + offset_concat_axis)
              * concat_input_size_);
    }
    offset_concat_axis += bottom_concat_axis;
  }
}

(3) Backward反向传播
原理:把输出求得的梯度直接复制给输入即可。

template <typename Dtype>
void ConcatLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
      const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
  if (bottom.size() == 1) { return; }
  const Dtype* top_diff = top[0]->cpu_diff();//梯度
  int offset_concat_axis = 0;
  const int top_concat_axis = top[0]->shape(concat_axis_);
  for (int i = 0; i < bottom.size(); ++i) {
    const int bottom_concat_axis = bottom[i]->shape(concat_axis_);
    if (propagate_down[i]) {
      Dtype* bottom_diff = bottom[i]->mutable_cpu_diff();
      for (int n = 0; n < num_concats_; ++n) {
        caffe_copy(bottom_concat_axis * concat_input_size_, top_diff +
            (n * top_concat_axis + offset_concat_axis) * concat_input_size_,
            bottom_diff + n * bottom_concat_axis * concat_input_size_);
      }
    }
    offset_concat_axis += bottom_concat_axis;
  }
}

(4)Usage使用

layer{
  name: "feat_all"
  type: "Concat"
  bottom: "feat"//输入a
  bottom: "feat_p"//输入b
  top: "feat_all"//concate输出
  concat_param {
    axis: 1//在channel维度进行拼接
  }
}

二.Slice

作用:把输入按维度进行切片
(1)caffe.proto层参数定义
参数:①axis  ②slice_point  ③slice_dim

message SliceParameter {
  optional int32 axis = 3 [default = 1];//要进行切片的维度
  repeated uint32 slice_point = 2;//将维度axis按照slice_point进行切分
  optional uint32 slice_dim = 1 [default = 1];//作用同axis
}

同样包括LayerSetUp、Reshape、Forward、Backard
(1)LayerSetUp

template <typename Dtype>
void SliceLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
  //获取slice参数(slice_param):axis和slice_dim
  const SliceParameter& slice_param = this->layer_param_.slice_param();
  CHECK(!(slice_param.has_axis() && slice_param.has_slice_dim()))
      << "Either axis or slice_dim should be specified; not both.";
  slice_point_.clear();
  std::copy(slice_param.slice_point().begin(),
      slice_param.slice_point().end(),
      std::back_inserter(slice_point_));
}

(2)Reshape

template <typename Dtype>
void SliceLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
  const int num_axes = bottom[0]->num_axes();//输入维度数目
  const SliceParameter& slice_param = this->layer_param_.slice_param();
  if (slice_param.has_slice_dim()) {
   //若指定切片维度,则在相应维度上进行切片
    slice_axis_ = static_cast<int>(slice_param.slice_dim());
    // Don't allow negative indexing for slice_dim, a uint32 -- almost
    // certainly unintended.
    //0≤切片维度<输入维度数目
    CHECK_GE(slice_axis_, 0) << "casting slice_dim from uint32 to int32 "
        << "produced negative result; slice_dim must satisfy "
        << "0 <= slice_dim < " << kMaxBlobAxes;
    CHECK_LT(slice_axis_, num_axes) << "slice_dim out of range.";
 //若没有指定切片维度,则在channel维度上进行切分
  //标准化参数索引
    slice_axis_ = bottom[0]->CanonicalAxisIndex(slice_param.axis());
  }
  vector<int> top_shape = bottom[0]->shape();
  //获取切片维度slice_axis_大小shape(.)
  const int bottom_slice_axis = bottom[0]->shape(slice_axis_);
  //计算每次切分在一个batch中需要切分的次数,若在channel维进行切分,等于batchsize
  num_slices_ = bottom[0]->count(0, slice_axis_);
  //切分后feature map的大小,若在channel维度切分,等于H×W
  slice_size_ = bottom[0]->count(slice_axis_ + 1);
  int count = 0;
  if (slice_point_.size() != 0) {
    CHECK_EQ(slice_point_.size(), top.size() - 1);
    CHECK_LE(top.size(), bottom_slice_axis);
    int prev = 0;
    vector<int> slices;
    for (int i = 0; i < slice_point_.size(); ++i) {
      CHECK_GT(slice_point_[i], prev);
      //统计每次切分的跨度
      slices.push_back(slice_point_[i] - prev);
      prev = slice_point_[i];
    }
    slices.push_back(bottom_slice_axis - prev);
    //根据切分跨度,对输出top_shape进行Reshape,并统计每个输出切分维度的大小
    for (int i = 0; i < top.size(); ++i) {
      top_shape[slice_axis_] = slices[i];
      top[i]->Reshape(top_shape);
      count += top[i]->count();
    }
  } else {
    CHECK_EQ(bottom_slice_axis % top.size(), 0)
        << "Number of top blobs (" << top.size() << ") should evenly "
        << "divide input slice axis (" << bottom_slice_axis << ")";
    top_shape[slice_axis_] = bottom_slice_axis / top.size();
    for (int i = 0; i < top.size(); ++i) {
      top[i]->Reshape(top_shape);
      count += top[i]->count();
    }
  }
  CHECK_EQ(count, bottom[0]->count());
  if (top.size() == 1) {
    top[0]->ShareData(*bottom[0]);
    top[0]->ShareDiff(*bottom[0]);
  }
}

(3)Forward前向传播

template <typename Dtype>
void SliceLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
  //只有一个输出数据,不进行切分
  if (top.size() == 1) { return; }
  int offset_slice_axis = 0;
  //获取输入数据bottom_data
  const Dtype* bottom_data = bottom[0]->cpu_data();
  //获取要切分的维度slice_axis_大小shape(.)
  const int bottom_slice_axis = bottom[0]->shape(slice_axis_);
  for (int i = 0; i < top.size(); ++i) {
    Dtype* top_data = top[i]->mutable_cpu_data();//对输出top_data进行slice
    const int top_slice_axis = top[i]->shape(slice_axis_);//要切分的维度大小
    for (int n = 0; n < num_slices_; ++n) {
      const int top_offset = n * top_slice_axis * slice_size_;
      const int bottom_offset =
          (n * bottom_slice_axis + offset_slice_axis) * slice_size_;
      //拷贝数据
      caffe_copy(top_slice_axis * slice_size_,
          bottom_data + bottom_offset, top_data + top_offset);
    }
    //移动下一个切分点
    offset_slice_axis += top_slice_axis;
  }
}

(4)Backward反向传播

template <typename Dtype>
void SliceLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
      const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
  if (!propagate_down[0] || top.size() == 1) { return; }
  int offset_slice_axis = 0;
  Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
  const int bottom_slice_axis = bottom[0]->shape(slice_axis_);
  for (int i = 0; i < top.size(); ++i) {
    const Dtype* top_diff = top[i]->cpu_diff();
    const int top_slice_axis = top[i]->shape(slice_axis_);
    for (int n = 0; n < num_slices_; ++n) {
   //将top diff按照切分大小分别拷贝到bottom_diff中,每次移动top_slice_axis = 4个feature map
      const int top_offset = n * top_slice_axis * slice_size_;
      //而bottom diff则每次要移动16个feature map,对应不同的输入
      const int bottom_offset =
          (n * bottom_slice_axis + offset_slice_axis) * slice_size_;
      //拷贝数据
      caffe_copy(top_slice_axis * slice_size_,
          top_diff + top_offset, bottom_diff + bottom_offset);
    }
    //移动下一个切分点
    //bottom diff指针移动到下一个切分点,然后继续从top diff中拷贝
    offset_slice_axis += top_slice_axis;
  }
}

(5)Usage使用
说明:slice_point的个数等于top个数减去1

layer {
  name: "slice_pair"
  type: "Slice"
  bottom: "pair_data"
  top: "data"
  top: "data_p"
  slice_param {
    slice_dim: 1 //在channel维度上进行slice
    slice_point: 1
  }
}
  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值