MSCNN代码解读之Detection_loss_layer

记录自己对MSCNN代码的一些理解。

首先是include/caffe/layers/detection_loss_layer.hpp文件,包含了losslayer的定义。

// ------------------------------------------------------------------
// MS-CNN
// Copyright (c) 2016 The Regents of the University of California
// see mscnn/LICENSE for details
// Written by Zhaowei Cai [zwcai-at-ucsd.edu]
// Please email me if you find bugs, or have suggestions or questions!
// ------------------------------------------------------------------

#ifndef CAFFE_DETECTION_LOSS_LAYERS_HPP_
#define CAFFE_DETECTION_LOSS_LAYERS_HPP_

#include <string>
#include <utility>
#include <vector>

#include "caffe/blob.hpp"
#include "caffe/layer.hpp"
#include "caffe/proto/caffe.pb.h"

#include "caffe/layers/loss_layer.hpp"

namespace caffe {

template <typename Dtype>
class DetectionLossLayer : public LossLayer<Dtype> { //public继承LossLayer类
 public:
 //构造函数。explicit的主要用法就是放在单参数的构造函数中,防止隐式转换。即普通构造函数能够被隐式调用,而explicit构造函数只能被显示调用。
  explicit DetectionLossLayer(const LayerParameter& param)
      : LossLayer<Dtype>(param) {} 
  virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top);
  virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top);

  virtual inline const char* type() const { return "DetectionLoss"; }
  virtual inline int ExactNumTopBlobs() const { return 1; }

 protected:
 //虚函数:在Base的派生类Derived中可以通过重写虚拟函数来实现对基类虚拟函数的覆盖。当基类Base的指针point指向派生类Derived的对象时,对point的某虚函数A的调用实际上是调用了Derived的A函数而不是Base的A函数。
  virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top);
  virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top);
 
  virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
      const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
  virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
      const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);


  /// The internal SoftmaxLayer used to map predictions to a distribution.
  shared_ptr<Layer<Dtype> > softmax_layer_;
  /// prob stores the output probability predictions from the SoftmaxLayer.
  Blob<Dtype> prob_;
  /// bottom vector holder used in call to the underlying SoftmaxLayer::Forward
  vector<Blob<Dtype>*> softmax_bottom_vec_; //存储指针的vector
  /// top vector holder used in call to the underlying SoftmaxLayer::Forward
  vector<Blob<Dtype>*> softmax_top_vec_;
  
  Blob<Dtype> cls_bottom_; //输入的类别数据
  Blob<Dtype> coord_bottom_; //输入的坐标数据
  Blob<Dtype> coord_diff_;
  Blob<Dtype> bootstrap_map_;
  Blob<Dtype> weight_map_;
  /// Whether to ignore instances with a certain label.
  bool has_ignore_label_;
  /// The label indicating that an instance should be ignored.
  int ignore_label_;
  
  int cls_num_;
  int coord_num_;
  float lambda_;
  int field_h_;
  int field_w_;
  float field_whr_;
  float field_xyr_;
  int downsample_rate_;
  bool bb_smooth_;
  float bg_threshold_;
  int bg_multiple_;
  string sample_mode_;
  bool objectness_;
  bool iou_weighted_;
  bool pos_neg_weighted_;
  Blob<Dtype> bbox_mean_;
  Blob<Dtype> bbox_std_;
};

}  // namespace caffe

#endif  // CAFFE_DETECTION_LOSS_LAYERS_HPP_
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值