Caffe C++ WeightedSoftmaxWithLoss 实现

本文档记录了在Caffe中实现WeightedSoftmaxWithLoss的过程,包括修改caffe.proto文件,添加weighted_softmax_loss_layer.cpp和.cu文件,创建对应的头文件,并在.prototxt文件中配置权重。
摘要由CSDN通过智能技术生成
  • List item

Caffe C++ WeightedSoftmaxWithLoss 实现

参考 liaomingg的github

主要是caffe没有 weighted softmax with loss,需要使用的时候找不到,在此做个记录
1.首先是修改 caffe_3d/src/caffe/proto/caffe.proto,在SoftmaxParameter里添加如下两行

message SoftmaxParameter {
 	.....
    repeated float pos_mult = 3;    // class weight, you should select appropriate id, such as 3.
    repeated float pos_cid = 4;     // class id
}

2.然后在 caffe_3d/src/caffe/layers/ 目录下面添加两个文件,分别是weighted_softmax_loss_layer.cpp 和 weighted_softmax_loss_layer.cpp

weighted_softmax_loss_layer.cpp内容如下

#include <algorithm>
#include <cfloat>
#include <vector>

#include "caffe/layers/weighted_softmax_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"

namespace caffe {
   

template <typename Dtype>
WeightedSoftmaxWithLossLayer<Dtype>::~WeightedSoftmaxWithLossLayer() {
   
  if (weights_) {
   
    delete []weights_;
  }
#ifndef CPU_ONLY
  if (Caffe::mode() == Caffe::GPU && gpu_weights_) {
   
    cudaFree(gpu_weights_);
  }
#endif
}

template <typename Dtype>
void WeightedSoftmaxWithLossLayer<Dtype>::LayerSetUp(
    const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
   
  LossLayer<Dtype>::LayerSetUp(bottom, top);
  LayerParameter softmax_param(this->layer_param_);
  softmax_param.set_type("Softmax");
  softmax_layer_ = LayerRegistry<Dtype>::CreateLayer(softmax_param);
  softmax_bottom_vec_.clear();
  softmax_bottom_vec_.push_back(bottom[0]);
  softmax_top_vec_.clear();
  softmax_top_vec_.push_back(&prob_);
  softmax_layer_->SetUp(softmax_bottom_vec_, softmax_top_vec_);

  int pos_mult_size = this->layer_param_.softmax_param().pos_mult_size();
  int pos_cid_size = this->layer_param_.softmax_param().pos_cid_size();
  CHECK_EQ(pos_mult_size, pos_cid_size)
    << "the size of pos_mult must compatible with the size of pos_cid.";
  weights_ = new Dtype[bottom[0]->channels()];
  // initialize weights to 1.0
  for (int i = 0; i < bottom[0]->channels(); ++i) {
    
    weights_[i] = 1.0;
  }
  LOG(INFO) << "Now load class weight from prototxt.";
  for (int i = 0; i < pos_mult_size; ++i) {
   
    float pos_mult = this->layer_param_.softmax_param().pos_mult(i);
    int pos_cid = this->layer_param_.softmax_param().pos_cid(i);
    // get class weight for specified class id.
    weights_[pos_cid] = pos_mult;
  }
#ifndef CPU_ONLY
  if (Caffe::mode() == Caffe::GPU) {
   
    cudaMalloc((void**)&gpu_weights_, bottom[0]->channels() * sizeof(Dtype));
    cudaMemcpy(gpu_weights_, weights_, bottom[0]->channels() * sizeof(Dtype), cudaMemcpyHostToDevice);
  }
#endif
  LOG(INFO) << "Loaded weights, all weights: ";
  for (int i = 0; i < bottom[0]->channels(); ++i) {
   
    LOG(INFO) << "cid: " << i << ", mult: " << weights_[i];
  }
  has_ignore_label_ =
    this->layer_param_.loss_param().has_ignore_label();
  if (has_ignore_label_) {
   
    ignore_label_ = this->layer_param_.loss_param().ignore_label();
  }
  if (!this->layer_param_.loss_param().has_normalization() &&
      this->layer_param_.loss_param().has_normalize()) {
   
    normalization_ = this->layer_param_.loss_param().normalize() ?
                     LossParameter_NormalizationMode_VALID :
                     LossParameter_NormalizationMode_BATCH_SIZE;
  } else {
   
    normalization_ = this->layer_param_.loss_param
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值