caffe源码 之 dropout层

17 篇文章 1 订阅
2 篇文章 0 订阅

本文转载自:http://blog.csdn.net/lanxueCC/article/details/53319872?locationNum=2&fps=1

本文主要解析caffe源码文件/src/caffe/layers/Dropout_layer.cpp,该文件实现的功能是防止过拟合。

综述 
dropout层的作用是防止训练的时候过拟合。在训练的时候,传统的训练方法是每次迭代经过某一层时,将所有的结点拿来做参与更新,训练整个网络。加入dropout层,我们只需要按一定的概率(retaining probability)p 来对weight layer 的参数进行随机采样,将被采样的结点拿来参与更新,将这个子网络作为此次更新的目标网络。这样做的好处是,由于随机的让一些节点不工作了,因此可以避免某些特征只在固定组合下才生效,有意识地让网络去学习一些普遍的共性(而不是某些训练样本的一些特性)这样能提高训练出的模型的鲁棒性!!!

下面记录下我在看dropout层时的注释,如有错误,请指出~~~

Dropout_layer.hpp

#ifndef CAFFE_DROPOUT_LAYER_HPP_
#define CAFFE_DROPOUT_LAYER_HPP_

#include <vector>

#include "caffe/blob.hpp"
#include "caffe/layer.hpp"
#include "caffe/proto/caffe.pb.h"

#include "caffe/layers/neuron_layer.hpp"

namespace caffe {

/**
 * @brief During training only, sets a random portion of @f$x@f$ to 0, adjusting
 *        the rest of the vector magnitude accordingly.
 *
 * @param bottom input Blob vector (length 1)
 *   -# @f$ (N \times C \times H \times W) @f$
 *      the inputs @f$ x @f$
 * @param top output Blob vector (length 1)
 *   -# @f$ (N \times C \times H \times W) @f$
 *      the computed outputs @f$ y = |x| @f$
 */
 /*DropoutLayer类继承了类NeuronLayer类*/
template <typename Dtype>
class DropoutLayer : public NeuronLayer<Dtype> {
 public:
  /**
   * @param param provides DropoutParameter dropout_param,
   *     with DropoutLayer options:
   *   - dropout_ratio (\b optional, default 0.5).
   *     Sets the probability @f$ p @f$ that any given unit is dropped.
   */
   /*构造函数*/
  explicit DropoutLayer(const LayerParameter& param)
      : NeuronLayer<Dtype>(param) {}
  /*设置函数*/
  virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top);
  /*内存分配与输入输出数据形状reshape函数*/
  virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top);

  /*返回当前层的类型*/
  virtual inline const char* type() const { return "Dropout"; }

 protected:
  /**
   * @param bottom input Blob vector (length 1)
   *   -# @f$ (N \times C \times H \times W) @f$
   *      the inputs @f$ x @f$
   * @param top output Blob vector (length 1)
   *   -# @f$ (N \times C \times H \times W) @f$
   *      the computed outputs. At training time, we have @f$
   *      y_{\mbox{train}} = \left\{
   *         \begin{array}{ll}
   *            \frac{x}{1 - p} & \mbox{if } u > p \\
   *            0 & \mbox{otherwise}
   *         \end{array} \right.
   *      @f$, where @f$ u \sim U(0, 1)@f$ is generated independently for each
   *      input at each iteration. At test time, we simply have
   *      @f$ y_{\mbox{test}} = \mathbb{E}[y_{\mbox{train}}] = x @f$.
   */
   /*cpu前向传播函数*/
  virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top);
  /*gpu前向传播函数*/
  virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top);
  /*cpu返向传播函数*/
  virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
      const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);

  /*gpu返回传播函数*/
  virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
      const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);

  /// when divided by UINT_MAX, the randomly generated values @f$u\sim U(0,1)@f$
  /*blob类型的,保存伯努利二项分布的随机数的变量*/
  Blob<unsigned int> rand_vec_;
  /// the probability @f$ p @f$ of dropping any input
  /*数据被dropout(意思就是迭代的某次训练不用)的概率*/
  Dtype threshold_;
  /// the scale for undropped inputs at train time @f$ 1 / (1 - p) @f$
  /*scale_ == 1 / (1 - threshold_)*/
  Dtype scale_;
  /*没有具体用到,不知其何意*/
  unsigned int uint_thres_;
};

}  // namespace caffe

#endif  // CAFFE_DROPOUT_LAYER_HPP_

Dropout_layer.cpp

// TODO (sergeyk): effect should not be dependent on phase. wasted memcpy.

#include <vector>

#include "caffe/layers/dropout_layer.hpp"
#include "caffe/util/math_functions.hpp"

namespace caffe {

/*设置dropout层对象,先调用NeuronLayer类完成基本设置*/
template <typename Dtype>
void DropoutLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
  NeuronLayer<Dtype>::LayerSetUp(bottom, top);
  /*protobuf文件中传入的dropout的概率,也就是当前去除掉threshold_概率个数据不用*/
  /*因为是有放回的随机去除掉threshold_概率个数据,那么每个数据被去除的概率为threshold_*/
  threshold_ = this->layer_param_.dropout_param().dropout_ratio();
  DCHECK(threshold_ > 0.);
  DCHECK(threshold_ < 1.);
  /*(1. - threshold_)是这个数据被取用的概率*/
  scale_ = 1. / (1. - threshold_);
  uint_thres_ = static_cast<unsigned int>(UINT_MAX * threshold_);/*貌似没有用到*/
}

/*形状reshape和内存分配,同理先调用NeuronLayer类的Reshape函数完成基本的top与bottom数据的reshape*/
template <typename Dtype>
void DropoutLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
  NeuronLayer<Dtype>::Reshape(bottom, top);
  // Set up the cache for random number generation
  // ReshapeLike does not work because rand_vec_ is of Dtype uint
  //这个类要单独分配一段内存用来存储满足伯努利分布的随机数
  rand_vec_.Reshape(bottom[0]->shape());
}

/*dropout层的前向传播*/
template <typename Dtype>
void DropoutLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
    const vector<Blob<Dtype>*>& top) {
  const Dtype* bottom_data = bottom[0]->cpu_data();/*前面一层数据内存地址(输入数据)*/
  Dtype* top_data = top[0]->mutable_cpu_data();/*后面一层数据内存地址(输出数据)*/
  unsigned int* mask = rand_vec_.mutable_cpu_data();/*伯努利分布的随机数的内存地址*/
  const int count = bottom[0]->count();/*输入数据blob个数*/
  if (this->phase_ == TRAIN) {/*当前处在训练阶段*/
    // Create random numbers
    caffe_rng_bernoulli(count, 1. - threshold_, mask); /*产生伯努利随机数*/
    for (int i = 0; i < count; ++i) {
      top_data[i] = bottom_data[i] * mask[i] * scale_;  /*遍历每个数据在满足伯努利分布的下的输出值*/
    }
  } else {
    caffe_copy(bottom[0]->count(), bottom_data, top_data); /*测试阶段每个数据都要输出*/
  }
}

/*dropout层的后向传播*/
template <typename Dtype>
void DropoutLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
    const vector<bool>& propagate_down,  /*这个向量记录当前数据了是否进行返向传播*/
    const vector<Blob<Dtype>*>& bottom) {
  if (propagate_down[0]) {/*如果进行反向传播*/
    const Dtype* top_diff = top[0]->cpu_diff();/*后面一层梯度(输入数据)*/
    Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();/*前面一层梯度(输入数据)*/
    if (this->phase_ == TRAIN) {/*训练阶段*/
      const unsigned int* mask = rand_vec_.cpu_data();/*伯努利分布的随机数*/
      const int count = bottom[0]->count();/*输入数据blob个数*/
      for (int i = 0; i < count; ++i) {
        bottom_diff[i] = top_diff[i] * mask[i] * scale_;/*返向传播梯度*/
      }
    } else {
      caffe_copy(top[0]->count(), top_diff, bottom_diff);/*如果不是训练就直接拷贝数据*/
    }
  }
}


#ifdef CPU_ONLY
STUB_GPU(DropoutLayer);
#endif

INSTANTIATE_CLASS(DropoutLayer);
REGISTER_LAYER_CLASS(Dropout);

}  // namespace caffe


http://m.blog.csdn.net/article/details?id=50890473 
http://blog.csdn.net/u012702874/article/details/45030991

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值