caffe添加Layer,复现Feature Learning based Deep Supervidsed Hashing with PL

前一段时间在搞图像哈希。因为南京大学这个文章很不错,但是源代码是matconvertnet写的。 无法实用。
因此,用caffe复现。c++调用API即可。 测试的 mAP=0.7459 稍好于论文中的结果(0.713)。

/*************************************************************************
     File Name: deep_feature_hash_layer.cpp
     Author: bin.wang
     Mail:   sa615168@mail.ustc.edu.cn
     Created Time: Fri 03 Mar 2017 11:23:12 AM CST
 ************************************************************************/
#include <algorithm>
#include <vector>

#include "caffe/layers/deep_feature_hash_layer.hpp"
#include "caffe/util/math_functions.hpp"

namespace caffe {
    template <typename Dtype>
    void DeepFeatureHashLayer<Dtype>::LayerSetUp(
        const vector<Blob<Dtype>*>& bottom,const vector<Blob<Dtype>*>& top){
            LossLayer<Dtype>::LayerSetUp(bottom,top);
            CHECK_EQ(bottom[0]->height(),1);
            CHECK_EQ(bottom[0]->width(),1);
            CHECK_EQ(bottom[1]->height(),1);
            CHECK_EQ(bottom[1]->width(),1);

            const int batchSize = bottom[0]->num();
            const int channels = bottom[0]->channels();
            forward_data_.Reshape(1,channels,1,1);      //allocate memory ,shape:[1,code_length,1,1]
            diff_.Reshape(1,channels,1,1);
            ALPHA_.Reshape(batchSize,batchSize,1,1);    //upper case ALPHA_ (matrix : batchSize* batchSize)
            U_.Reshape(channels,batchSize,1,1);    
            reg_Termfactor = this->layer_param().deep_hash_param().reg_factor();
        }
    template <typename Dtype>
    void DeepFeatureHashLayer<Dtype>::Forward_cpu(
        const vector<Blob<Dtype>*>& bottom,
        const vector<Blob<Dtype>*>& top){
            const int batchSize = bottom[0]->num();
            const int channels = bottom[0]->channels();
            Dtype * bout = bottom[0]->mutable_cpu_diff();
            Dtype hash_loss(0.0);
            Dtype theta_ij(0.0);
            Dtype sim_term(0.0);      
            Dtype regular_term(0.0);
            bool simliarity_ij(0);
            Dtype* alpha = ALPHA_.mutable_cpu_data();
            caffe_set(channels* batchSize,Dtype(0),bout);

            Dtype* pu = U_.mutable_cpu_data();
            for(int i = 0; i<  channels; ++i){          //transpose bottom data[features] ==> U_ 
                for(int j = 0; j< batchSize; ++j){
                    *(pu+ i*batchSize+j) = *(bottom[0]->cpu_data()+j*channels+i);
                    //std::cout << *(pu+ i*batchSize+j)<< " ";
                }
            //std::cout <<std::endl;
            }

            for(int i  = 0;i < batchSize; i++){
                for(int j= 0;j<batchSize;j++){
                    //1-compute theta_ij

                    theta_ij = caffe_cpu_dot(channels,
                                             bottom[0]->cpu_data()+(i*channels),
                                             bottom[0]->cpu_data()+(j*channels));
                   // std::cout << "theta_ij "<< theta_ij <<std::endl;
                    theta_ij *= 0.5;   // theta_ij has been compute already

                    simliarity_ij = (static_cast<int>(bottom[1]->cpu_data()[i]) ==
                                     static_cast<int>(bottom[1]->cpu_data()[j]));

                    sim_term += simliarity_ij * theta_ij - log(1+exp(theta_ij)); // end of loss of first term

                    // compute diff for backward 
                    *((alpha + i *batchSize)+j) = (1/(1+exp(-theta_ij)) - simliarity_ij) ;   //ALPHA_[i][j] assignment

                    /* for debug */
                }   

                    // compute bi 
                Dtype* tmp_data = forward_data_.mutable_cpu_data();
                for(int k = 0; k<12; ++k){

                    *tmp_data = elemswise_sign(*(bottom[0]->cpu_data()+(i*channels)+k));
                    tmp_data++;
                    //std::cout << *(bottom[0]->cpu_data()+(i*channels)+k)<< ":";
                    //std::cout << forward_data_.cpu_data()[k] << "  ";

                }   // bi has been computed
                //std::cout << std::endl;
                    // compute bi - ui
                caffe_sub(channels,
                         forward_data_.cpu_data(),                //bi 
                         bottom[0]->cpu_data()+(i*channels),       //net output binary code(ui) 
                         diff_.mutable_cpu_data());                //bi - ui 
                //for(int k = 0;k<12; ++k){
                //    std::cout <<*(diff_.cpu_data()+k)<< " ";
                //}
                //std::cout << std::endl;
                //std::cout << caffe_cpu_dot(channels,diff_.cpu_data(),diff_.cpu_data()) << std::endl;
                regular_term += caffe_cpu_dot(channels,diff_.cpu_data(),diff_.cpu_data());

                caffe_cpu_axpby(
                    channels,
                    (-2* reg_Termfactor) / (batchSize ), 
                    diff_.cpu_data(),               //bi - ui
                    Dtype(1.0),
                    bout + (i*channels)            //dj/du
                    );

            /*        for(int ki = 0; ki<12;ki++){
                        std::cout << *(bout + (i*channels)+ki) << " ";
                    }
                    std::cout << std::endl;*/
                //if(i%40 ==0){std::cout << sim_term <<std::endl;}
            }

        /*for(int nx=0; nx<batchSize; ++nx){
            std::cout << *(ALPHA_.cpu_data()+nx) << std::endl;
        }*/

        for(int chi = 0; chi< batchSize; ++chi){
            for(int chk = 0; chk <channels; ++chk){
                *(bout+chi*channels+chk) += caffe_cpu_dot(batchSize,
                                                          ALPHA_.cpu_data()+(chi*batchSize),
                                                          U_.cpu_data()+(chk * batchSize))/(batchSize*(batchSize-1));
                //std::cout << (*(bout+chi*channels+chk))<<std::endl;
            }
        }

            sim_term /= batchSize * (batchSize);
            regular_term /= batchSize;
            //std::cout << "sim_term "     << sim_term << std::endl;
            //std::cout << "regular_term " << regular_term << std::endl << std::endl;
            hash_loss = reg_Termfactor * regular_term - sim_term;  //TODO: eita
            top[0]->mutable_cpu_data()[0] = hash_loss;
        }



template <typename Dtype>
void DeepFeatureHashLayer<Dtype>::Backward_cpu(
    const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,const vector<Blob<Dtype>*>& bottom){
        if(propagate_down[1]){
            LOG(FATAL) << this->type()
            << "Layer can not backpropagate to lable inputs.";
        } 
        /*
        Dtype* bout = bottom[0]->mutable_cpu_diff();
        const int batchSize = bottom[0]->num();
        const int channels = bottom[0]->channels();*/

    }



#ifdef CPU_ONLY
STUB_GPU(DeepFeatureHashLayer);
#endif

INSTANTIATE_CLASS(DeepFeatureHashLayer);
REGISTER_LAYER_CLASS(DeepFeatureHash);

}// namespace caffe 

中间加了一些调试信息。

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值