Blob在内存中表示四维数组,从高到底分别表示(width_,heigth_,channels_,num_),其中width_和heigth_分别表示图像的宽和高,heigth_表示颜色通道,num_表示第几张图片
成员变量:
shared_ptr<SyncedMemory> data_;//指向data的指针
shared_ptr<SyncedMemory> diff_;//指向增量的指针
shared_ptr<SyncedMemory> shape_data_;
vector<int> shape_;//形状信息
int count_;//有效元素的数目
int capacity_;//Blob容器的容量
1、构造函数
Blob(): data_(), diff_(), count_(0), capacity_(0) {}
template <typename Dtype>
Blob<Dtype>::Blob(const int num, const int channels, const int height,
const int width)
// capacity_ must be initialized before calling Reshape
: capacity_(0) {
Reshape(num, channels, height, width);
}
template <typename Dtype>
Blob<Dtype>::Blob(const vector<int>& shape)
// capacity_ must be initialized before calling Reshape
: capacity_(0) {
Reshape(shape);
}
2、变维函数
template <typename Dtype>
void Blob<Dtype>::Reshape(const int num, const int channels, const int height,
const int width) {
vector<int> shape(4);
shape[0] = num;
shape[1] = channels;
shape[2] = height;
shape[3] = width;
Reshape(shape);
}
template <typename Dtype>
void Blob<Dtype>::Reshape(const vector<int>& shape) {
CHECK_LE(shape.size(), kMaxBlobAxes);//<=32
count_ = 1;//元素总数=num×channels×height×width
shape_.resize(shape.size());//成员变量的大小
if (!shape_data_ || shape_data_->size() < shape.size() * sizeof(int)) {
shape_data_.reset(new SyncedMemory(shape.size() * sizeof(int)));
}
int* shape_data = static_cast<int*>(shape_data_->mutable_cpu_data());
for (int i = 0; i < shape.size(); ++i) {
CHECK_GE(shape[i], 0);
if (count_ != 0) {
CHECK_LE(shape[i], INT_MAX / count_) << "blob size exceeds INT_MAX";
}
count_ *= shape[i];//累积计算元素总数
shape_[i] = shape[i];
shape_data[i] = shape[i];
}
if (count_ > capacity_) {//如果元素总数大于blob空间大小,需要重新分配空间
capacity_ = count_;
data_.reset(new SyncedMemory(capacity_ * sizeof(Dtype)));
diff_.reset(new SyncedMemory(capacity_ * sizeof(Dtype)));
}
}
void Reshape(const BlobShape& shape);
void ReshapeLike(const Blob& other);
3、计算偏移量
inline int offset(const int n, const int c = 0, const int h = 0,
const int w = 0) const {
CHECK_GE(n, 0);
CHECK_LE(n, num());
CHECK_GE(channels(), 0);
CHECK_LE(c, channels());
CHECK_GE(height(), 0);
CHECK_LE(h, height());
CHECK_GE(width(), 0);
CHECK_LE(w, width());
return ((n * channels() + c) * height() + h) * width() + w;
}
inline int offset(const vector<int>& indices) const {
CHECK_LE(indices.size(), num_axes());
int offset = 0;
for (int i = 0; i < num_axes(); ++i) {
offset *= shape(i);
if (indices.size() > i) {
CHECK_GE(indices[i], 0);
CHECK_LT(indices[i], shape(i));
offset += indices[i];
}
}
return offset;
}
4、数据的存取操作
inline Dtype data_at(const int n, const int c, const int h,
const int w) const {
return cpu_data()[offset(n, c, h, w)];
}
inline Dtype diff_at(const int n, const int c, const int h,
const int w) const {
return cpu_diff()[offset(n, c, h, w)];
}
inline Dtype data_at(const vector<int>& index) const {
return cpu_data()[offset(index)];
}
inline Dtype diff_at(const vector<int>& index) const {
return cpu_diff()[offset(index)];
}
5、成员变量操作函数
(1)const Dtype* cpu_data() const;//获取cpu指针
template <typename Dtype>
const Dtype* Blob<Dtype>::cpu_data() const {
CHECK(data_);
return (const Dtype*)data_->cpu_data();
}
(2)void set_cpu_data(Dtype* data);//设置cpu指针
template <typename Dtype>
void Blob<Dtype>::set_cpu_data(Dtype* data) {
CHECK(data);
data_->set_cpu_data(data);
}
其他都类似:
const int* gpu_shape() const;
const Dtype* gpu_data() const;
const Dtype* cpu_diff() const;
const Dtype* gpu_diff() const;
(3)Dtype* mutable_cpu_data();//读写访问cpu data指针
template <typename Dtype>
Dtype* Blob<Dtype>::mutable_cpu_data() {
CHECK(data_);
return static_cast<Dtype*>(data_->mutable_cpu_data());
}
其他都类似:
Dtype* mutable_gpu_data();
Dtype* mutable_cpu_diff();
Dtype* mutable_gpu_diff();
6、Blob节点更新
//使用Update()函数更新网络参数Blob(int和unsigned int未实现)
template <> void Blob<unsigned int>::Update() { NOT_IMPLEMENTED; }
template <> void Blob<int>::Update() { NOT_IMPLEMENTED; }
template <typename Dtype>
void Blob<Dtype>::Update() {
// We will perform update based on where the data is located.(data在哪就在那更新)
switch (data_->head()) {
case SyncedMemory::HEAD_AT_CPU:
// perform computation on CPU(cpu上计算data_[i]=data[i]-diff[i])
caffe_axpy<Dtype>(count_, Dtype(-1),
static_cast<const Dtype*>(diff_->cpu_data()),
static_cast<Dtype*>(data_->mutable_cpu_data()));
break;
case SyncedMemory::HEAD_AT_GPU://data位于GPU或GPU/cpu已经同步
case SyncedMemory::SYNCED:
#ifndef CPU_ONLY
// perform computation on GPU(gpu上计算data_[i]=data[i]-diff[i])
caffe_gpu_axpy<Dtype>(count_, Dtype(-1),
static_cast<const Dtype*>(diff_->gpu_data()),
static_cast<Dtype*>(data_->mutable_gpu_data()));
#else
NO_GPU;//编译时打开了CPU_ONLY,GPU禁用
#endif
break;
default:
LOG(FATAL) << "Syncedmem not initialized.";
}
}
7、data的范数计算
(1)L1范数
template <> unsigned int Blob<unsigned int>::asum_data() const
template <> int Blob<int>::asum_data() const
template <typename Dtype>
Dtype Blob<Dtype>::asum_data() const {
if (!data_) { return 0; }
switch (data_->head()) {
case SyncedMemory::HEAD_AT_CPU:
return caffe_cpu_asum(count_, cpu_data());
case SyncedMemory::HEAD_AT_GPU:
case SyncedMemory::SYNCED:
#ifndef CPU_ONLY
{
Dtype asum;
caffe_gpu_asum(count_, gpu_data(), &asum);
return asum;
}
#else
NO_GPU;
#endif
case SyncedMemory::UNINITIALIZED:
return 0;
default:
LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head();
}
return 0;
}
Dtype asum_diff() const;
(2)L2范数
template <> unsigned int Blob<unsigned int>::sumsq_data() const
template <> int Blob<int>::sumsq_data() const
template <typename Dtype>
Dtype Blob<Dtype>::sumsq_data() const {
Dtype sumsq;
const Dtype* data;
if (!data_) { return 0; }
switch (data_->head()) {
case SyncedMemory::HEAD_AT_CPU:
data = cpu_data();
sumsq = caffe_cpu_dot(count_, data, data);
break;
case SyncedMemory::HEAD_AT_GPU:
case SyncedMemory::SYNCED:
#ifndef CPU_ONLY
data = gpu_data();
caffe_gpu_dot(count_, data, data, &sumsq);
#else
NO_GPU;
#endif
break;
case SyncedMemory::UNINITIALIZED:
return 0;
default:
LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head();
}
return sumsq;
}
Dtype sumsq_diff() const;
7、其他相关操作
(1)幅度缩放
未实现:
emplate <> void Blob<unsigned int>::scale_data(unsigned int scale_factor)
template <> void Blob<int>::scale_data(int scale_factor)
template <> void Blob<unsigned int>::scale_diff(unsigned int scale_factor)
template <> void Blob<int>::scale_diff(int scale_factor)
template <typename Dtype>
void Blob<Dtype>::scale_data(Dtype scale_factor) {
Dtype* data;
if (!data_) { return; }
switch (data_->head()) {
case SyncedMemory::HEAD_AT_CPU:
data = mutable_cpu_data();
caffe_scal(count_, scale_factor, data);//data[i]=data[i]*scale_factor
return;
case SyncedMemory::HEAD_AT_GPU:
case SyncedMemory::SYNCED:
#ifndef CPU_ONLY
data = mutable_gpu_data();
caffe_gpu_scal(count_, scale_factor, data);
return;
#else
NO_GPU;
#endif
case SyncedMemory::UNINITIALIZED:
return;
default:
LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head();
}
}
template <typename Dtype>
void Blob<Dtype>::scale_diff(Dtype scale_factor)
(2)判断形状是否相同
template <typename Dtype>
bool Blob<Dtype>::ShapeEquals(const BlobProto& other) {
if (other.has_num() || other.has_channels() ||
other.has_height() || other.has_width()) {
// Using deprecated 4D Blob dimensions --
// shape is (num, channels, height, width).
// Note: we do not use the normal Blob::num(), Blob::channels(), etc.
// methods as these index from the beginning of the blob shape, where legacy
// parameter blobs were indexed from the end of the blob shape (e.g., bias
// Blob shape (1 x 1 x 1 x N), IP layer weight Blob shape (1 x 1 x M x N)).
return shape_.size() <= 4 &&
LegacyShape(-4) == other.num() &&
LegacyShape(-3) == other.channels() &&
LegacyShape(-2) == other.height() &&
LegacyShape(-1) == other.width();
}
//直接比较
vector<int> other_shape(other.shape().dim_size());
for (int i = 0; i < other.shape().dim_size(); ++i) {
other_shape[i] = other.shape().dim(i);
}
return shape_ == other_shape;
}
(3)拷贝Blob节点
template <typename Dtype>
void Blob<Dtype>::CopyFrom(const Blob& source, bool copy_diff, bool reshape) {
if (source.count() != count_ || source.shape() != shape_) {
if (reshape) {
ReshapeLike(source);//如果拷贝的维数与目标维数不想等且需要变维,执行变维操作
} else {//如果变维条件为假
LOG(FATAL) << "Trying to copy blobs of different sizes.";
}
}
switch (Caffe::mode()) {
case Caffe::GPU:
if (copy_diff) {//如果拷贝增量为真
caffe_copy(count_, source.gpu_diff(),
static_cast<Dtype*>(diff_->mutable_gpu_data()));
} else {
caffe_copy(count_, source.gpu_data(),
static_cast<Dtype*>(data_->mutable_gpu_data()));
}
break;
case Caffe::CPU:
if (copy_diff) {
caffe_copy(count_, source.cpu_diff(),
static_cast<Dtype*>(diff_->mutable_cpu_data()));
} else {
caffe_copy(count_, source.cpu_data(),
static_cast<Dtype*>(data_->mutable_cpu_data()));
}
break;
default:
LOG(FATAL) << "Unknown caffe mode.";
}
}
(4)从磁盘中将Blob节点导入
template <typename Dtype>
void Blob<Dtype>::FromProto(const BlobProto& proto, bool reshape) {
if (reshape) {//从BlobProto对象中获取所需的各个维度的信息
vector<int> shape;
if (proto.has_num() || proto.has_channels() ||
proto.has_height() || proto.has_width()) {
// Using deprecated 4D Blob dimensions --
// shape is (num, channels, height, width).(过时的维度信息)
shape.resize(4);
shape[0] = proto.num();
shape[1] = proto.channels();
shape[2] = proto.height();
shape[3] = proto.width();
} else {
shape.resize(proto.shape().dim_size());
for (int i = 0; i < proto.shape().dim_size(); ++i) {
shape[i] = proto.shape().dim(i);
}
}
Reshape(shape);//变维
} else
CHECK(ShapeEquals(proto)) << "shape mismatch (reshape not set)";
}
// copy data
Dtype* data_vec = mutable_cpu_data();
if (proto.double_data_size() > 0) {//如果之前保存的为double类型的参数
CHECK_EQ(count_, proto.double_data_size());
for (int i = 0; i < count_; ++i) {
data_vec[i] = proto.double_data(i);//加载数据
}
} else {
CHECK_EQ(count_, proto.data_size());
for (int i = 0; i < count_; ++i) {
data_vec[i] = proto.data(i);//否则加载float类型的数据
}
}
if (proto.double_diff_size() > 0) {
CHECK_EQ(count_, proto.double_diff_size());
Dtype* diff_vec = mutable_cpu_diff();
for (int i = 0; i < count_; ++i) {
diff_vec[i] = proto.double_diff(i);
}
} else if (proto.diff_size() > 0) {
CHECK_EQ(count_, proto.diff_size());
Dtype* diff_vec = mutable_cpu_diff();
for (int i = 0; i < count_; ++i) {
diff_vec[i] = proto.diff(i);
}
}
}
(5)将Blob数据保存到磁盘上
template <>
void Blob<double>::ToProto(BlobProto* proto, bool write_diff) const {
proto->clear_shape();
for (int i = 0; i < shape_.size(); ++i) {
proto->mutable_shape()->add_dim(shape_[i]);
}
proto->clear_double_data();
proto->clear_double_diff();
const double* data_vec = cpu_data();
for (int i = 0; i < count_; ++i) {
proto->add_double_data(data_vec[i]);
}
if (write_diff) {
const double* diff_vec = cpu_diff();
for (int i = 0; i < count_; ++i) {
proto->add_double_diff(diff_vec[i]);
}
}
}
template <>
void Blob<float>::ToProto(BlobProto* proto, bool write_diff) const {
proto->clear_shape();//重置proto的维度
for (int i = 0; i < shape_.size(); ++i) {
proto->mutable_shape()->add_dim(shape_[i]);
}
proto->clear_data();//清除data
proto->clear_diff();//清除diff
const float* data_vec = cpu_data();//将data导出proto
for (int i = 0; i < count_; ++i) {
proto->add_data(data_vec[i]);
}
if (write_diff) {
const float* diff_vec = cpu_diff();
for (int i = 0; i < count_; ++i) {
proto->add_diff(diff_vec[i]);
}
}
}