smoothl1函数_r-cnn学习(五):SmoothL1LossLayer论文与代码的结合理解

#include "caffe/fast_rcnn_layers.hpp"

namespace caffe {

//SmoothL1前向计算(3)式

template

__global__ void SmoothL1Forward(const int n, const Dtype* in, Dtype*out,

Dtype sigma2) {// f(x) = 0.5 * (sigma * x)^2 if |x| < 1 / sigma /sigma// |x| - 0.5 / sigma /sigma otherwise

CUDA_KERNEL_LOOP(index, n) {

Dtype val= in[index];

Dtype abs_val=abs(val);if (abs_val < 1.0 /sigma2) {

out[index]= 0.5 * val * val *sigma2;

}else{

out[index]= abs_val - 0.5 /sigma2;

}

}

}

//

templatevoid SmoothL1LossLayer::Forward_gpu(const vector*>&bottom,

const vector*>&top) {

int count= bottom[0]->count();

caffe_gpu_sub(

count,

bottom[0]->gpu_data(), //ti

bottom[1]->gpu_data(), //ti*

diff_.mutable_gpu_data());// d := ti-ti*if(has_weights_) { //乘上相关的权重,对应于(1)式中的pi*,有目标时为1// apply "inside"weights

caffe_gpu_mul(

count,

bottom[2]->gpu_data(), //pi*

diff_.gpu_data(),

diff_.mutable_gpu_data());// d := w_in * (b0 -b1)

}

//代入计算SmoothL1

SmoothL1Forward<<>>(

count, diff_.gpu_data(), errors_.mutable_gpu_data(), sigma2_);

CUDA_POST_KERNEL_CHECK;if(has_weights_) { //乘上相关的权重// apply "outside"weights

caffe_gpu_mul(

count,

bottom[3]->gpu_data(), // 1/Nreg

errors_.gpu_data(),

errors_.mutable_gpu_data());// d := w_out * SmoothL1(w_in * (b0 -b1))

}

Dtype loss;

caffe_gpu_dot(count, ones_.gpu_data(), errors_.gpu_data(),&loss);

top[0]->mutable_cpu_data()[0] = loss / bottom[0]->num();

}

//反向计算,对smoothLoss求导

template

__global__ void SmoothL1Backward(const int n, const Dtype* in, Dtype*out,

Dtype sigma2) {// f'(x) = sigma * sigma * x if |x| < 1 / sigma / sigma

// =sign(x) otherwise

CUDA_KERNEL_LOOP(index, n) {

Dtype val= in[index];

Dtype abs_val=abs(val);if (abs_val < 1.0 /sigma2) {

out[index]= sigma2 *val;

}else{

out[index]= (Dtype(0) < val) - (val

}

}

}

//

templatevoid SmoothL1LossLayer::Backward_gpu(const vector*>&top,

const vector& propagate_down, const vector*>&bottom) {// after forwards, diff_ holds w_in * (b0 -b1)

int count=diff_.count();

//调用反向smoothloss,diff_.gpu_data()表示x,diff_.mutable_gpu_data()表示smoothloss的导数

SmoothL1Backward<<>>(

count, diff_.gpu_data(), diff_.mutable_gpu_data(), sigma2_);

//类似于前向

CUDA_POST_KERNEL_CHECK;for (int i = 0; i < 2; ++i) {if(propagate_down[i]) {

const Dtype sign= (i == 0) ? 1 : -1;

const Dtype alpha= sign * top[0]->cpu_diff()[0] / bottom[i]->num();

caffe_gpu_axpby(

count,//count

alpha,//alpha

diff_.gpu_data(),//x

Dtype(0),//beta

bottom[i]->mutable_gpu_diff()); //yif(has_weights_) {// Scale by "inside"weight

caffe_gpu_mul(

count,

bottom[2]->gpu_data(),

bottom[i]->gpu_diff(),

bottom[i]->mutable_gpu_diff());// Scale by "outside"weight

caffe_gpu_mul(

count,

bottom[3]->gpu_data(),

bottom[i]->gpu_diff(),

bottom[i]->mutable_gpu_diff());

}

}

}

}

INSTANTIATE_LAYER_GPU_FUNCS(SmoothL1LossLayer);

}// namespace caffe

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值