添加新Layer -caffe (绝对值损失,SmoothL1损失)

1.RoiPoolingLayer 

2.SmoothL1LossLayer

caffe 的 Fast_Rcnn 实现, 里面包含 RoiPoolingLayer  和 SmoothL1LossLayer

https://github.com/rbgirshick/caffe-fast-rcnn/

注意: 把编译改成 61 否者 cu文件无法编译成功

 

想要在现有的caffe版本添加ROIPoolingLayer 和SmoothL1LossLayer,首先到这里https://github.com/rbgirshick/caffe-fast-rcnn可以找到他们的定义。caffe添加layer的详细方法在这里可以看到https://github.com/BVLC/caffe/wiki/,然后我们就可以开始啦!!

1.ROIPoolingLayer 和SmoothL1LossLayer的声明与实现:

声明:caffe_root/include/caffe/fast_rcnn_layers.hpp

实现:caffe_root/src/caffe/layers/roi_pooling_layer.cpp

    caffe_root/src/caffe/layers/roi_pooling_layer.cu

    caffe_root/src/caffe/layers/smooth_L1_loss_layer.cpp

    caffe_root/src/caffe/layers/smooth_L1_loss_layer.cu

2.ROIPoolingLayer 和SmoothL1LossLayer的测试:

caffe_root/src/caffe/test/test_roi_pooling_layer.cpp

caffe_root/src/caffe/test/test_smooth_L1_loss_layer.cpp

 

上面的整体过程是没有问题的,但是在 实现的时候 SmoothL1有个方法 LayerSetUp 在实现时没有调用父类方法,所以导致梯度反向传播时权重以直为0 无法更新,下面的代码已修改该问题。

 

3.注册

caffe_root/src/caffe/proto/caffe.proto添加

optional ROIPoolingParameter roi_pooling_param = 8266711;



// Message that stores parameters used by ROIPoolingLayer
message ROIPoolingParameter {
// Pad, kernel size, and stride are all given as a single value for equal
// dimensions in height and width or as Y, X pairs.
  optional uint32 pooled_h = 1 [default = 0]; // The pooled output height
  optional uint32 pooled_w = 2 [default = 0]; // The pooled output width
  // Multiplicative spatial scale factor to translate ROI coords from their
  // input scale to the scale used when pooling
  optional float spatial_scale = 3 [default = 1];
}

optional SmoothL1LossParameter smooth_l1_loss_param = 8266712;

message SmoothL1LossParameter {
  // SmoothL1Loss(x) =
  //   0.5 * (sigma * x) ** 2    -- if x < 1.0 / sigma / sigma
  //   |x| - 0.5 / sigma / sigma -- otherwise
  optional float sigma = 1 [default = 1];
}

完成







caffe  添加 smooth

smooth_L1_loss_layer.hpp

#ifndef CAFFE_SMOOTH_L1_LOSS_LAYER_HPP_
#define CAFFE_SMOOTH_L1_LOSS_LAYER_HPP_


#include <vector>


#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/layer.hpp"
#include "caffe/proto/caffe.pb.h"


#include "caffe/layers/loss_layer.hpp"


namespace caffe {




	template <typename Dtype>
	class SmoothL1LossLayer : public LossLayer<Dtype> {
	public:
		explicit SmoothL1LossLayer(const LayerParameter& param)
			: LossLayer<Dtype>(param), diff_() {}
		virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
			const vector<Blob<Dtype>*>& top);
		virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
			const vector<Blob<Dtype>*>& top);


		virtual inline const char* type() const { return "SmoothL1Loss"; }


		//virtual inline int ExactNumBottomBlobs() const { return -1; }
		//virtual inline int MinBottomBlobs() const { return 2; }
		//virtual inline int MaxBottomBlobs() const { return 4; }


		/**
		 * Unlike most loss layers, in the SmoothL1LossLayer we can backpropagate
		 * to both inputs -- override to return true and always allow force_backward.
		 */
		virtual inline bool AllowForceBackward(const int bottom_index) const {
			return true;
		}


	protected:
		virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
			const vector<Blob<Dtype>*>& top);
		//virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
		//	const vector<Blob<Dtype>*>& top);


		virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
			const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
		//virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
		//	const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);


		Blob<Dtype> diff_;
		Blob<Dtype> errors_;
		Blob<Dtype> ones_;
		bool has_weights_;
		Dtype sigma2_;
	};


}  // namespace caffe


#endif  // CAFFE_FAST_RCNN_LAYERS_HPP_



smooth_L1_loss_layer.cpp



#include <vector>


#include "caffe/layers/smooth_L1_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"


namespace caffe {


	template <typename Dtype>
	void SmoothL1LossLayer<Dtype>::LayerSetUp(
		const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
		LossLayer<Dtype>::LayerSetUp(bottom, top);
		SmoothL1LossParameter loss_param = this->layer_param_.smooth_l1_loss_param();
		sigma2_ = loss_param.sigma() * loss_param.sigma();
		has_weights_ = (bottom.size() >= 3);
		if (has_weights_) {
			CHECK_EQ(bottom.size(), 4) << "If weights are used, must specify both "
				"inside and outside weights";
		}
	}


	template <typename Dtype>
	void SmoothL1LossLayer<Dtype>::Reshape(
		const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
		LossLayer<Dtype>::Reshape(bottom, top);
		CHECK_EQ(bottom[0]->channels(), bottom[1]->channels());
		CHECK_EQ(bottom[0]->height(), bottom[1]->height());
		CHECK_EQ(bottom[0]->width(), bottom[1]->width());
		if (has_weights_) {
			CHECK_EQ(bottom[0]->channels(), bottom[2]->channels());
			CHECK_EQ(bottom[0]->height(), bottom[2]->height());
			CHECK_EQ(bottom[0]->width(), bottom[2]->width());
			CHECK_EQ(bottom[0]->channels(), bottom[3]->channels());
			CHECK_EQ(bottom[0]->height(), bottom[3]->height());
			CHECK_EQ(bottom[0]->width(), bottom[3]->width());
		}
		diff_.Reshape(bottom[0]->num(), bottom[0]->channels(),
			bottom[0]->height(), bottom[0]->width());
		errors_.Reshape(bottom[0]->num(), bottom[0]->channels(),
			bottom[0]->height(), bottom[0]->width());
		// vector of ones used to sum
		ones_.Reshape(bottom[0]->num(), bottom[0]->channels(),
			bottom[0]->height(), bottom[0]->width());
		for (int i = 0; i < bottom[0]->count(); ++i) {
			ones_.mutable_cpu_data()[i] = Dtype(1);
		}
	}


	template <typename Dtype>
	void SmoothL1LossLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
		const vector<Blob<Dtype>*>& top) {
		// cpu implementation
		CHECK_EQ(bottom[0]->count(1), bottom[1]->count(1))
			<< "Inputs must have the same dimension.";
		int count = bottom[0]->count();
		caffe_sub(count,
			bottom[0]->cpu_data(),
			bottom[1]->cpu_data(),
			diff_.mutable_cpu_data());


		if (has_weights_) {
			caffe_mul(count,
				bottom[2]->cpu_data(),
				diff_.cpu_data(),
				diff_.mutable_cpu_data());
		}
		// f(x) = 0.5 * (sigma * x)^2          if |x| < 1 / sigma / sigma
		//        |x| - 0.5 / sigma / sigma    otherwise
		const Dtype* in = diff_.cpu_data();
		Dtype* out = errors_.mutable_cpu_data();
		for (int index = 0; index<count; ++index) {
			Dtype val = in[index];
			Dtype abs_val = abs(val);
			if (abs_val < 1.0 / sigma2_) {
				out[index] = 0.5 * val * val * sigma2_;
			}
			else {
				out[index] = abs_val - 0.5 / sigma2_;
			}
		}


		if (has_weights_) {
			caffe_mul(count, bottom[3]->cpu_data(), out, errors_.mutable_cpu_data());
		}


		// compute loss
		Dtype loss = caffe_cpu_dot(count, ones_.cpu_data(), errors_.cpu_data());
		top[0]->mutable_cpu_data()[0] = loss / bottom[0]->num();
		// end cpu implementation
	}


	template <typename Dtype>
	void SmoothL1LossLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
		const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {


		// cpu implementation
		int count = diff_.count();
		const Dtype* in = diff_.cpu_data();
		Dtype* out = diff_.mutable_cpu_data();
		for (int index = 0; index < count; index++) {
			Dtype val = in[index];
			Dtype abs_val = abs(val);
			if (abs_val < 1.0 / sigma2_) {
				out[index] = sigma2_ *  val;
			}
			else {
                                //cool val > 0时 out = 1, val < 0 时 out = -1, val = 0时  out== 0  对 |x|求梯度
				out[index] = (Dtype(0) < val) - (val < Dtype(0));
			}
		}


		for (int i = 0; i<2; ++i) {
			if (propagate_down[i]) {
				const Dtype sign = (i == 0) ? 1 : -1;
				const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->num();
				caffe_cpu_axpby(
					count,
					alpha,
					out,//diff_.cpu_data(), 
					Dtype(0),
					bottom[i]->mutable_cpu_diff());


				if (has_weights_) {
					caffe_mul(
						count,
						bottom[2]->cpu_data(),
						bottom[i]->cpu_diff(),
						bottom[i]->mutable_cpu_data());
					caffe_mul(
						count,
						bottom[3]->cpu_data(),
						bottom[i]->cpu_diff(),
						bottom[i]->mutable_cpu_data());
				}
			}
		}
		// end cpu implementation
	}


#ifdef CPU_ONLY
	STUB_GPU(SmoothL1LossLayer);
#endif


	INSTANTIATE_CLASS(SmoothL1LossLayer);
	REGISTER_LAYER_CLASS(SmoothL1Loss);


}  // namespace caffe

 

 

 

注册:

注意  SmoothL1Loss 最好与 virtual inline const char* type() const { return "SmoothL1Loss"; } 的返回  保持一致

optional SmoothL1LossParameter smooth_l1_loss_param = 152;

message SmoothL1LossParameter {
  optional float sigma = 1 [default = 1];
}

 

 

 

添加 absolute_loss_layer

1.注册

message AbsoluteLossParameter
{
  optional float dis = 1 [default = 1];
} 
optional AbsoluteLossParameter Absolute_loss_param = 151;
//*****************************************
#ifndef CAFFE_ABSOLUTE_LOSS_LAYER_HPP_  
#define CAFFE_ABSOLUTE_LOSS_LAYER_HPP_  
//*****************************************

#include <vector>  


#include "caffe/blob.hpp"  
#include "caffe/layer.hpp"  
#include "caffe/proto/caffe.pb.h"  


#include "caffe/layers/loss_layer.hpp"  


namespace caffe {


	template <typename Dtype>
	class AbsoluteLossLayer : public LossLayer<Dtype> {
	public:
		explicit AbsoluteLossLayer(const LayerParameter& param)
			: LossLayer<Dtype>(param), dis_() {}
		virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
			const vector<Blob<Dtype>*>& top);


		virtual inline const char* type() const { return "AbsoluteLoss"; }


		virtual inline bool AllowForceBackward(const int bottom_index) const {
			return true;
		}


	protected:
		/// @copydoc AbsoluteLossLayer  
		virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
			const vector<Blob<Dtype>*>& top);
		//virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,  
		//    const vector<Blob<Dtype>*>& top);  


		virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
			const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
		//virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,  
		//    const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);  

		Blob<Dtype> dis_;
	};

}  // namespace caffe  

#endif  // CAFFE_EUCLIDEAN_LOSS_LAYER_HPP_  
#include <vector>  
  
#include "caffe/layers/absolute_loss_layer.hpp"  
#include "caffe/util/math_functions.hpp"  
  
namespace caffe {  
  
template <typename Dtype>  
void AbsoluteLossLayer<Dtype>::Reshape(  
  const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {  
  LossLayer<Dtype>::Reshape(bottom, top);   //在LossLayer 中定义  
  CHECK_EQ(bottom[0]->count(1), bottom[1]->count(1))  //保证输入维度相同  
      << "Inputs must have the same dimension.";  
  dis_.ReshapeLike(*bottom[0]);           //Blob 类型的diff_用来存放两个bottom的差,和bottom具有相同的  
}  
  
template <typename Dtype>  
void AbsoluteLossLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,  
    const vector<Blob<Dtype>*>& top) {  
  int count = bottom[0]->count();   //总共有count个featuremap   
  caffe_sub(  
      count,  
      bottom[0]->cpu_data(),  
      bottom[1]->cpu_data(),  
      dis_.mutable_cpu_data());    //diff_ = bottom[0] - bottom[1]   
  Dtype loss_param = this->layer_param_.absolute_loss_param().dis();  
  Dtype abs_sum = caffe_cpu_abs_sum(count,dis_.cpu_data());  
  //Dtype dot = caffe_cpu_abs_sum()(count, diff_.cpu_data(), dis_.cpu_data());  
  Dtype loss = loss_param * abs_sum / bottom[0]->num();  
  top[0]->mutable_cpu_data()[0] = loss;  
}  
  
template <typename Dtype>  
void AbsoluteLossLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,  
    const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { 
	int count = bottom[0]->count();
	Dtype * dis_value = dis_.mutable_cpu_data();
	for (int i = 0; i < count; i++)
	{
		dis_value[i] = (Dtype(0) < dis_value[i]) - (dis_value[i] < Dtype(0));
		//替换方案 表达一致 
		//if (dis_value[i] > 0)
		//{
		//	dis_value[i] = 1;
		//}
		//else if(dis_value[i] < 0)
		//{
		//	dis_value[i] = -1;
		//}
		//else
		//{
		//	dis_value[i] = 0;
		//}
	}
  for (int i = 0; i < 2; ++i) {  
    if (propagate_down[i]) {  
       //对于输入的label bottom propagate_dowm为0
	  //LOG_IF(INFO, Caffe::root_solver()) << "use cpu code ................ i is:" << i;
      const Dtype sign = (i == 0) ? 1 : -1;  
      const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->num();  
      caffe_cpu_axpby(  
          bottom[i]->count(),                       // count  
          alpha,                             // alpha  
          dis_.cpu_data(),                        // a  
          Dtype(0),                           // beta  
          bottom[i]->mutable_cpu_diff());                 // b  
    }     //bottom[i]->mutable_cpu_diff()) = alpha*dis_.cpu_data()  
  }  
}  
  
#ifdef CPU_ONLY  
STUB_GPU(AbsoluteLossLayer);  
#endif  
  
INSTANTIATE_CLASS(AbsoluteLossLayer);  
REGISTER_LAYER_CLASS(AbsoluteLoss);  
  
}  // namespace caffe  

 

 

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

NineDays66

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值