Caffe代码解读(一):blob.hpp

Blob是一个模板类,声明在include/caffe/blob.hpp中,封装了SyncedMemory类.

ifndef CAFFE_BLOB_HPP_
#define CAFFE_BLOB_HPP_

#include <algorithm>
#include <string>
#include <vector>

#include "caffe/common.hpp"         
#include "caffe/proto/caffe.pb.h"     
#include "caffe/syncedmem.hpp"        <span style="color:#cc0000;">//CPU/GPU共享内存类,用于数据同步</span>

const int kMaxBlobAxes = 32;          <span style="color:#cc0000;">//Blob最大维数目</span>

namespace caffe {
<span style="color:#cc0000;">//整个blob.hpp就声明了这么一个类,包括了类里面的变量和函数</span>
template <typename Dtype>
class Blob {
 public:
<span style="color:#cc0000;">//默认构造函数</span>
  Blob()
       : data_(), diff_(), count_(0), capacity_(0) {}

<span style="color:#cc0000;">//显示构造函数</span>
  explicit Blob(const int num, const int channels, const int height,
      const int width);
  explicit Blob(const vector<int>& shape);

<span style="color:#cc0000;">//变形函数,根据输入参数重新设置当前blob形状</span>
  void Reshape(const int num, const int channels, const int height,
      const int width);
  void Reshape(const vector<int>& shape);
  void Reshape(const BlobShape& shape);
  void ReshapeLike(const Blob& other);
<span style="color:#cc0000;">//打印出数据维度</span>
  inline string shape_string() const {
    ostringstream stream;
    for (int i = 0; i < shape_.size(); ++i) {
      stream << shape_[i] << " ";
    }
    stream << "(" << count_ << ")";
    return stream.str();
  }
<span style="color:#cc0000;">//返回Blob形状</span>
  inline const vector<int>& shape() const { return shape_; }

<span style="color:#cc0000;">//返回某一维度尺寸</span>
  inline int shape(int index) const {
    return shape_[CanonicalAxisIndex(index)];
  }
<span style="color:#cc0000;">//返回维度数目</span>
  inline int num_axes() const { return shape_.size(); }
<span style="color:#cc0000;">//返回Blob中元素总数</span>
  inline int count() const { return count_; }

<span style="color:#cc0000;">//返回Blob中某几维子集的元素总数</span>
  inline int count(int start_axis, int end_axis) const {
    CHECK_LE(start_axis, end_axis);
    CHECK_GE(start_axis, 0);
    CHECK_GE(end_axis, 0);
    CHECK_LE(start_axis, num_axes());
    CHECK_LE(end_axis, num_axes());
    int count = 1;
    for (int i = start_axis; i < end_axis; ++i) {
      count *= shape(i);
    }
    return count;
  }
 
<span style="color:#cc0000;">//计算从某一维开始的元素总数</span>
  inline int count(int start_axis) const {
    return count(start_axis, num_axes());
  }

<span style="color:#cc0000;">//转换坐标轴索引[-N,N)--->[0,N)</span>
  inline int CanonicalAxisIndex(int axis_index) const {
    CHECK_GE(axis_index, -num_axes())
        << "axis " << axis_index << " out of range for " << num_axes()
        << "-D Blob with shape " << shape_string();
    CHECK_LT(axis_index, num_axes())
        << "axis " << axis_index << " out of range for " << num_axes()
        << "-D Blob with shape " << shape_string();
    if (axis_index < 0) {
      return axis_index + num_axes();
    }
    return axis_index;
  }

<span style="color:#cc0000;">//获取形状某一维的尺寸</span>
  inline int num() const { return LegacyShape(0); }
  inline int channels() const { return LegacyShape(1); }
  inline int height() const { return LegacyShape(2); }
  inline int width() const { return LegacyShape(3); }
  inline int LegacyShape(int index) const {
    CHECK_LE(num_axes(), 4)
        << "Cannot use legacy accessors on Blobs with > 4 axes.";
    CHECK_LT(index, 4);
    CHECK_GE(index, -4);
    if (index >= num_axes() || index < -num_axes()) {
      // Axis is out of range, but still in [0, 3] (or [-4, -1] for reverse
      // indexing) -- this special case simulates the one-padding used to fill
      // extraneous axes of legacy blobs.
      return 1;
    }
    return shape(index);
  }

<span style="color:#cc0000;">//计算一维线性偏移量(即输入的这个点在全部线性数据中排第几...)</span>
  inline int offset(const int n, const int c = 0, const int h = 0,
      const int w = 0) const {
    CHECK_GE(n, 0);
    CHECK_LE(n, num());
    CHECK_GE(channels(), 0);
    CHECK_LE(c, channels());
    CHECK_GE(height(), 0);
    CHECK_LE(h, height());
    CHECK_GE(width(), 0);
    CHECK_LE(w, width());
    return ((n * channels() + c) * height() + h) * width() + w;
  }
  inline int offset(const vector<int>& indices) const {
    CHECK_LE(indices.size(), num_axes());
    int offset = 0;
    for (int i = 0; i < num_axes(); ++i) {
      offset *= shape(i);
      if (indices.size() > i) {
        CHECK_GE(indices[i], 0);
        CHECK_LT(indices[i], shape(i));
        offset += indices[i];
      }
    }
    return offset;
  }
  
<span style="color:#cc0000;">//按值拷贝Blob到当前Blob</span>
  void CopyFrom(const Blob<Dtype>& source, bool copy_diff = false,
      bool reshape = false);

<span style="color:#cc0000;">//这几个函数是存取器(输入坐标,返回该坐标的值)</span>
  inline Dtype data_at(const int n, const int c, const int h,
      const int w) const {
    return cpu_data()[offset(n, c, h, w)];
  }
  inline Dtype diff_at(const int n, const int c, const int h,
      const int w) const {
    return cpu_diff()[offset(n, c, h, w)];
  }
  inline Dtype data_at(const vector<int>& index) const {
    return cpu_data()[offset(index)];
  }
  inline Dtype diff_at(const vector<int>& index) const {
    return cpu_diff()[offset(index)];
  }
  inline const shared_ptr<SyncedMemory>& data() const {
    CHECK(data_);
    return data_;
  }
  inline const shared_ptr<SyncedMemory>& diff() const {
    CHECK(diff_);
    return diff_;
  }

<span style="color:#cc0000;">//按要求访问某些数据</span>
  const Dtype* cpu_data() const;
  void set_cpu_data(Dtype* data);
  const int* gpu_shape() const;
  const Dtype* gpu_data() const;
  const Dtype* cpu_diff() const;
  const Dtype* gpu_diff() const;
  Dtype* mutable_cpu_data();
  Dtype* mutable_gpu_data();
  Dtype* mutable_cpu_diff();
  Dtype* mutable_gpu_diff();
<span style="color:#cc0000;">//Blob更新运算</span>
void Update();
<span style="color:#cc0000;">//反序列化函数,从BlobProto中恢复一个Blob对象</span>
  void FromProto(const BlobProto& proto, bool reshape = true);
<span style="color:#cc0000;">//序列化函数,将Blob对象保存到BlobProto中</span>
  void ToProto(BlobProto* proto, bool write_diff = false) const;

<span style="color:#cc0000;">//计算data的L1范数</span>
  Dtype asum_data() const;
<span style="color:#cc0000;">//计算diff的L1范数</span>
  Dtype asum_diff() const;
<span style="color:#cc0000;">//计算data的L2范数</span>
  Dtype sumsq_data() const;
<span style="color:#cc0000;">//计算diff的L2范数</span>
  Dtype sumsq_diff() const;

<span style="color:#cc0000;">//data乘以一个标量</span>
  void scale_data(Dtype scale_factor);
<span style="color:#cc0000;">//diff乘以一个标量</span>
  void scale_diff(Dtype scale_factor);

<span style="color:#cc0000;">//共享另一个Blob的data</span>
  void ShareData(const Blob& other);
<span style="color:#cc0000;">//共享另一个Blob的diff</span>
  void ShareDiff(const Blob& other);

  bool ShapeEquals(const BlobProto& other);

 protected:
<span style="color:#cc0000;">//这里面就是一些变量的声明...</span>
  shared_ptr<SyncedMemory> data_;
  shared_ptr<SyncedMemory> diff_;
  shared_ptr<SyncedMemory> shape_data_;
  vector<int> shape_;
  int count_;      <span style="color:#cc0000;">//存放有效元素数目信息</span>
  int capacity_;   <span style="color:#cc0000;">//存放Blob容器的容量信息</span>

  DISABLE_COPY_AND_ASSIGN(Blob);
};  // class Blob

}  // namespace caffe

#endif  // CAFFE_BLOB_HPP_





                
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值