<Learning Transferable Features with Deep Adaptation Networks>caffe 添加MMDLoss层(caffe 自定义网络层)

这篇文章主要通过《Learning Transferable Features with Deep Adaptation Networks》这篇论文,增加MMDLoss网络层,对caffe增加网络层进行讲解。

1.添加头文件

   在/caffe-master/include/caffe/layers/neuron_layer.hpp 中添加MMDLoss的头文件声明

#ifndef CAFFE_NEURON_LAYER_HPP_
#define CAFFE_NEURON_LAYER_HPP_

#include <vector>

#include "caffe/blob.hpp"
#include "caffe/layer.hpp"
#include "caffe/proto/caffe.pb.h"

namespace caffe {

/**
 * @brief An interface for layers that take one blob as input (@f$ x @f$)
 *        and produce one equally-sized blob as output (@f$ y @f$), where
 *        each element of the output depends only on the corresponding input
 *        element.
 */
template <typename Dtype>
class NeuronLayer : public Layer<Dtype> {
 public:
  explicit NeuronLayer(const LayerParameter& param)
     : Layer<Dtype>(param) {}
  virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top);

  virtual inline int ExactNumBottomBlobs() const { return 1; }
  virtual inline int ExactNumTopBlobs() const { return 1; }
};

template <typename Dtype>
class MMDLossLayer : public NeuronLayer<Dtype> {
 public:
  explicit MMDLossLayer(const LayerParameter& param)
      : NeuronLayer<Dtype>(param){}
  virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top);
  virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top);

  virtual inline const char* type() const { return "MMDLoss"; }
  virtual inline int ExactNumBottomBlobs() const { return -1; }

 protected:
  virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top);
  virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top);
  virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
      const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
  virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
      const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
  Dtype* beta_;
  Blob<Dtype> mmd_data_;
  Dtype mmd_lambda_;
  int input_num_;
  int data_dim_;
  int size_of_source_;
  int size_of_target_;
  Dtype gamma_;
  int num_of_kernel_;
  int* source_index_;
  int* target_index_;
  int iter_of_epoch_;
  int now_iter_;
  bool fix_gamma_;
  Dtype** Q_;
  Dtype* sum_of_epoch_;
  Dtype* variance_;
  Dtype I_lambda_;
  int all_sample_num_;
  int top_k_;
  Dtype* sum_of_pure_mmd_;
  int method_number_;
  Dtype kernel_mul_;
};

}  // namespace caffe

#endif  // CAFFE_NEURON_LAYER_HPP_

2.实现MMDLoss

   在/caffe-master/src/caffe/layers里添加mmd_layer.cpp文件

#include <algorithm>
#include <cfloat>
#include <vector>

#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/neuron_layer.hpp"

namespace caffe {

template <typename Dtype>
void MMDLossLayer<Dtype>::LayerSetUp(
    const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
  NeuronLayer<Dtype>::LayerSetUp(bottom, top);
  input_num_ = bottom[0]->count(0, 1);
  data_dim_ = bottom[0]->count(1);
  num_of_kernel_ = this->layer_param_.mmd_param().num_of_kernel(); 
  mmd_lambda_ = this->layer_param_.mmd_param().mmd_lambda();
  iter_of_epoch_ = this->layer_param_.mmd_param().iter_of_epoch();
  fix_gamma_ = this->layer_param_.mmd_param().fix_gamma();
  beta_ = new Dtype[num_of_kernel_];
  caffe_set(num_of_kernel_, Dtype(1.0) / num_of_kernel_, beta_);
  now_iter_ = 0;
  sum_of_epoch_ = new Dtype[num_of_kernel_];
  caffe_set(num_of_kernel_, Dtype(0), sum_of_epoch_);
  gamma_ = Dtype(-1);
  Q_ = new Dtype* [num_of_kernel_];
  for(int i = 0; i < num_of_kernel_; i++){
      Q_[i] = new Dtype[num_of_kernel_];
      caffe_set(num_of_kernel_, Dtype(0), Q_[i]);
  }
  variance_ = new Dtype[num_of_kernel_];
  caffe_set(num_of_kernel_, Dtype(0), variance_);
  sum_of_pure_mmd_ = new Dtype[num_of_kernel_];
  caffe_set(num_of_kernel_, Dtype(0), sum_of_pure_mmd_);
  all_sample_num_ = 0;
  kernel_mul_ = this->layer_param_.mmd_param().kernel_mul();
  if(this->layer_param_.mmd_param().method() == "max"){
        method_number_ = 1;
        top_k_ = this->layer_param_.mmd_param().method_param().top_num();
  }
  else if(this->layer_param_.mmd_param().method() == "none"){
        method_number_ = 0;
  }
  else if(this->layer_param_.mmd_param().method() == "L2"){
        method_number_ = 4;
        top_k_ = this->layer_param_.mmd_param().method_param().top_num();
        I_lambda_ = this->layer_param_.mmd_param().method_param().i_lambda();
  }
  else if(this->layer_param_.mmd_param().method() == "max_ratio"){
        top_k_ = this->layer_param_.mmd_param().method_param().top_num();
        method_number_ = 3;
  }
  LOG(INFO) << this->layer_param_.mmd_param().method() << " num: " << method_number_;
  source_index_ = new int[input_num_];
  target_index_ = new int[input_num_];
  mmd_data_.Reshape(1, 1, 1, data_dim_);
}

template <typename Dtype>
void MMDLossLayer<Dtype>::Reshape(
    const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
  NeuronLayer<Dtype>::Reshape(bottom, top
  • 2
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 4
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 4
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值