如果需要在Euclidean loss层ignore掉某一类标签,caffe框架里的Euclidean loss层并没有实现这一条件,需要自己根据已有的Euclidean loss层来更改。
1.了解 euclidean loss计算方式
http://blog.csdn.net/seashell_9/article/details/68064294
2.这个版本的Euclidean loss 中的N为全部的标签的个数,并不是去除需忽略标签的标签数。
3.代码
https://github.com/BVLC/caffe/pull/5250/commits/2ba99ef81f87355930891b1b3f14f183a3055806
4. 主要更改代码
euclidean_loss_layer.hpp
#ifndef CAFFE_EUCLIDEAN_LOSS_LAYER_HPP_
#define CAFFE_EUCLIDEAN_LOSS_LAYER_HPP_
#include <vector>
#include "caffe/blob.hpp"
#include "caffe/layer.hpp"
#include "caffe/proto/caffe.pb.h"
#include "caffe/layers/loss_layer.hpp"
namespace caffe {
/**
* @brief Computes the Euclidean (L2) loss @f$
* E = \frac{1}{2N} \sum\limits_{n=1}^N \left| \left| \hat{y}_n - y_n
* \right| \right|_2^2 @f$ for real-valued regression tasks.
*
* @param bottom input Blob vector (length 2)
* -# @f$ (N \times C \times H \times W) @f$
* the predictions @f$ \hat{y} \in [-\infty, +\infty]@f$
* -# @f$ (N \times C \times H \times W) @f$
* the targets @f$ y \in [-\infty, +\infty]@f$
* @param top output Blob vector (length 1)
* -# @f$ (1 \times 1 \times 1 \times 1) @f$
* the computed Euclidean loss: @f$ E =
* \frac{1}{2n} \sum\limits_{n=1}^N \left| \left| \hat{y}_n - y_n
* \right| \right|_2^2 @f$
*
* This can be used for least-squares regression tasks. An InnerProductLayer
* input to a EuclideanLossLayer exactly formulates a linear least squares
* regression problem. With non-zero weight decay the problem becomes one of
* ridge regression -- see src/caffe/test/test_sgd_solver.cpp for a concrete
* example wherein we check that the gradients computed for a Net with exactly
* this structure match hand-computed gradient formulas for ridge regression.
*
* (Note: Caffe, and SGD in general, is certainly \b not the best way to solve
* linear least squares problems! We use it only as an instructive example.)
*/
template <typename Dtype>
class EuclideanLossLayer : public LossLayer<Dtype> {
public:
explicit EuclideanLossLayer(const LayerParameter& param)
: LossLayer<Dtype>(param), diff_() {}
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual inline const char* type() const { return "EuclideanLoss"; }
/**
* Unlike most loss layers, in the EuclideanLossLayer we can backpropagate
* to both inputs -- override to return true and always allow force_backward.
*/
virtual inline bool AllowForceBackward(const int bottom_index) const {
return true;
}
protected:
/// @copydoc EuclideanLossLayer
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
/**
* @brief Computes the Euclidean error gradient w.r.t. the inputs.
*
* Unlike other children of LossLayer, EuclideanLossLayer \b can compute
* gradients with respect to the label inputs bottom[1] (but still only will
* if propagate_down[1] is set, due to being produced by learnable parameters
* or if force_backward is set). In fact, this layer is "commutative" -- the
* result is the same regardless of the order of the two bottoms.
*
* @param top output Blob vector (length 1), providing the error gradient with
* respect to the outputs
* -# @f$ (1 \times 1 \times 1 \times 1) @f$
* This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$,
* as @f$ \lambda @f$ is the coefficient of this layer's output
* @f$\ell_i@f$ in the overall Net loss
* @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence
* @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$.
* (*Assuming that this top Blob is not used as a bottom (input) by any
* other layer of the Net.)
* @param propagate_down see Layer::Backward.
* @param bottom input Blob vector (length 2)
* -# @f$ (N \times C \times H \times W) @f$
* the predictions @f$\hat{y}@f$; Backward fills their diff with
* gradients @f$
* \frac{\partial E}{\partial \hat{y}} =
* \frac{1}{n} \sum\limits_{n=1}^N (\hat{y}_n - y_n)
* @f$ if propagate_down[0]
* -# @f$ (N \times C \times H \times W) @f$
* the targets @f$y@f$; Backward fills their diff with gradients
* @f$ \frac{\partial E}{\partial y} =
* \frac{1}{n} \sum\limits_{n=1}^N (y_n - \hat{y}_n)
* @f$ if propagate_down[1]
*/
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
Blob<Dtype> diff_;
bool has_ignore_label_;
int ignore_label_;
//int* counts;
};
} // namespace caffe
#endif // CAFFE_EUCLIDEAN_LOSS_LAYER_HPP_
euclidean_loss_layer.cpp
#include <vector>
#include "caffe/layers/euclidean_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
void EuclideanLossLayer<Dtype>::Reshape(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
LossLayer<Dtype>::Reshape(bottom, top);
CHECK_EQ(bottom[0]->count(1), bottom[1]->count(1))
<< "Inputs must have the same dimension.";
diff_.ReshapeLike(*bottom[0]);
has_ignore_label_ =
this->layer_param_.loss_param().has_ignore_label();
if (has_ignore_label_) {
ignore_label_ = this->layer_param_.loss_param().ignore_label();
}
//int temp_count = bottom[0]->count();
//counts = &temp_count;
}
template <typename Dtype>
void EuclideanLossLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
int count = bottom[0]->count();
caffe_sub(
count,
bottom[0]->cpu_data(),
bottom[1]->cpu_data(),
diff_.mutable_cpu_data());
if (has_ignore_label_) {
const Dtype* label_data = bottom[1]->cpu_data();
Dtype* diff__data = diff_.mutable_cpu_data();
for (int i = 0; i < count; ++i) {
const int label_value = static_cast<int>(label_data[i]);
if (label_value == ignore_label_) {
diff__data[i] = 0;
//*counts = *counts-1;
}
}
}
Dtype dot = caffe_cpu_dot(count, diff_.cpu_data(), diff_.cpu_data());
Dtype loss = dot / bottom[0]->num() / Dtype(2);
top[0]->mutable_cpu_data()[0] = loss;
}
template <typename Dtype>
void EuclideanLossLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
for (int i = 0; i < 2; ++i) {
if (propagate_down[i]) {
const Dtype sign = (i == 0) ? 1 : -1;
const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[0]->num();
caffe_cpu_axpby(
bottom[i]->count(), // count
alpha, // alpha
diff_.cpu_data(), // a
Dtype(0), // beta
bottom[i]->mutable_cpu_diff()); // b
}
}
}
#ifdef CPU_ONLY
STUB_GPU(EuclideanLossLayer);
#endif
INSTANTIATE_CLASS(EuclideanLossLayer);
REGISTER_LAYER_CLASS(EuclideanLoss);
} // namespace caffe
euclidean_loss_layer.cu
#include <vector>
#include "caffe/layers/euclidean_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void EuclideanLossForwardGPU(const int n,
const Dtype* label_data_, Dtype* diff__data_,
const int ignore_label_) {
CUDA_KERNEL_LOOP(index, n) {
const int label_value = static_cast<int>(label_data_[index]);
if (label_value == ignore_label_) {
diff__data_[index] = 0;
//*counts = *counts - 1;
}
}
}
template <typename Dtype>
void EuclideanLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
int count = bottom[0]->count();
caffe_gpu_sub(
count,
bottom[0]->gpu_data(),
bottom[1]->gpu_data(),
diff_.mutable_gpu_data());
if (has_ignore_label_) {
EuclideanLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(count, bottom[1]->gpu_data(),
diff_.mutable_gpu_data(),
ignore_label_);
}
Dtype dot;
caffe_gpu_dot(count, diff_.gpu_data(), diff_.gpu_data(), &dot);
Dtype loss = dot / bottom[0]->num() / Dtype(2);
top[0]->mutable_cpu_data()[0] = loss;
}
template <typename Dtype>
void EuclideanLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
for (int i = 0; i < 2; ++i) {
if (propagate_down[i]) {
const Dtype sign = (i == 0) ? 1 : -1;
const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[0]->num();
caffe_gpu_axpby(
bottom[i]->count(), // count
alpha, // alpha
diff_.gpu_data(), // a
Dtype(0), // beta
bottom[i]->mutable_gpu_diff()); // b
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(EuclideanLossLayer);
} // namespace caffe