caffe-yolo 训练

【更新完这个之后,不再更行yolo的东西,应该也不会再使用yolo框架】
一、添加LeakyLayer层
leaky_layer.hpp

template <typename Dtype>
class LeakyLayer : public NeuronLayer<Dtype>{
public:
    explicit LeakyLayer(const LayerParameter& param)
       : NeuronLayer<Dtype>(param) {}
    virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
          const vector<Blob<Dtype>*>& top);
    virtual inline const char* type() const { return "Leaky"; }
    virtual inline int ExactNumBottomBlobs() const { return 1; }
    virtual inline int ExactNumTopBlobs() const { return 1; }
protected:
      virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
          const vector<Blob<Dtype>*>& top);
      virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
          const vector<Blob<Dtype>*>& top);
      virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
          const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom){};//ÔÝʱ»¹Ã»ÊµÏÖ
      virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
          const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
};

leaky_layer.cpp

template <typename Dtype>
void LeakyLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
          const vector<Blob<Dtype>*>& top) {
    NeuronLayer<Dtype>::LayerSetUp(bottom, top);
      CHECK_NE(top[0], bottom[0]) << this->type() << " Layer does not "
        "allow in-place computation.";
}

template <typename Dtype>
void LeakyLayer<Dtype>::Forward_cpu(
        const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
      const int count = top[0]->count();
      Dtype* top_data = top[0]->mutable_cpu_data();
      const Dtype* bottom_data = bottom[0]->cpu_data();
      for(int i = 0; i < count; ++i){
          if(bottom_data[i] > 0)
              top_data[i] = bottom_data[i];
          else
              top_data[i] = 0.1*bottom_data[i];
          //top_data[i] = bottom_data[i] > 0£¿ bottom_data[i]: (Dtype(0.1)*bottom_data[i]);
      }
}
#ifdef CPU_ONLY
STUB_GPU(LeakyLayer);
#endif
INSTANTIATE_CLASS(LeakyLayer);
REGISTER_LAYER_CLASS(Leaky);

leaky_layer.cu

template <typename Dtype>
__global__ void LeakyForward(const int n, const Dtype* in, Dtype* out){
    CUDA_KERNEL_LOOP(index, n){
        out[index] = in[index] > 0 ? in[index] : in[index]*0.1;
    }
}

template <typename Dtype>
void LeakyLayer<Dtype>::Forward_gpu(
    const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
  const int count = bottom[0]->count();
  const Dtype* bottom_data = bottom[0]->gpu_data();
  Dtype* top_data = top[0]->mutable_gpu_data();
  LeakyForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
          count, bottom_data, top_data);
  CUDA_POST_KERNEL_CHECK;
}

template<typename Dtype>
__global__ void LeakyBackward(const int n, const Dtype* bottom_data, Dtype* bottom_diff, const Dtype* top_diff){
    CUDA_KERNEL_LOOP(index, n){
        bottom_diff[index] = bottom_data[index] > 0 ? top_diff[index] : top_diff[index]*0.1;
    }
}

template<typename Dtype>
void LeakyLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
          const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom){
    const int count = bottom[0]->count();
    Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
    const Dtype* top_diff = top[0]->mutable_gpu_diff();
    const Dtype* bottom_data = bottom[0]->gpu_data();
    LeakyBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
          count, bottom_data, bottom_diff, top_diff);
}
INSTANTIATE_LAYER_GPU_FUNCS(LeakyLayer);

二、添加detect层(loss)
detect_layer.hpp

template<typename Dtype>
class DetectLayer : public Layer<Dtype>{
public:
    explicit DetectLayer(const LayerParameter& param);
    virtual ~DetectLayer(){}
    virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
            const vector<Blob<Dtype>*>& top);
    virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
            const vector<Blob<Dtype>*>& top);
    virtual inline const char* type() const { return "Detect";}
    virtual inline int ExactNumBottomBlobs() const {
  return 2;}
    virtual inline int ExactNumTopBlobs() const { return 1;}
protected:
    virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
            const vector<Blob<Dtype>*>& top);
    virtual void Backward_cpu(const vector<Blob<Dtype>*>& bottom,
            const vector<bool>& propagate_down,const vector<Blob<Dtype>*>& top);
    int classes;
    int coords;
    int rescore;
    int side;
    int num;
    bool softmax;
    bool sqrt;
    float jiter;
    float object_scale;
    float noobject_scale;
    float class_scale;
    float coord_scale;

};

detect_layer.cpp

template<typename Dtype>
Dtype lap(Dtype x1_min,Dtype x1_max,Dtype x2_min,Dtype x2_max){
    if(x1_min < x2_min){
        if(x1_max < x2_min){
            return 0;
        }else{
            if(x1_max > x2_min){
                if(x1_max < x2_max){
                    return x1_max - x2_min;
                }else{
                    return x2_max - x2_min;
                }
            }else{
                return 0;
            }
        }
    }else{
        if(x1_min < x2_max){
            if(x1_max < x2_max)
                return x1_max-x1_min;
            else
                return x2_max-x1_min;
        }else{
            return 0;
        }
    }
}

template<typen
评论 29
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值