看Caffe/wiki/Development,同时对照ArgMaxLayer来看!
layer的声明要写到:
l common_layers.hpp
l data_layers.hpp
l loss_layers.hpp
l neuron_layers.hpp
l vision_layers.hpp
要实现以下函数:
l LayerSetUp
l Reshape
l Forward_cpu
l Backward_cpu
l *Blobs(optional)
n ExactNumBottomBlobs
n MinBottomBlobs
n MaxBottomBlobs
n ExactNumTopBlobs
n MinTopBlobs
n MaxTopBlobs
n EqualNumBottomTopBlobs
例如,ArgMaxLayer在common_layers.hpp中有
template<typename Dtype> class ArgMaxLayer : public Layer<Dtype> { public: explicit ArgMaxLayer(const LayerParameter& param) : Layer<Dtype>(param) {} // 一次性调用,干的事:读取参数等 virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top); //干的事:计算top blobs的size等,SetUp时和每次Forward时都会调用 virtual void ReShape(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top);
virtual inline const char* type() const { return "ArgMax"; } virtual inline int ExactNumBottomBlobs() const { return 1; } virtual inline int ExactNumTopBlobs() const { return 1; }
protected: virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top); virtual void Backward_cpu(const vector<Blob<Dtype>*>& bottom, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& top) { NOT_IMPLEMENTED; }
bool out_max_val_; size_t top_k_; }; |
具体实现:
template<typename Dtype> void ArgMaxLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { out_max_val_ = this->layer_param_.argmax_param().out_max_val(); top_k_ = this->layer_param_.argmax_param().top_k(); }
template<typename Dtype> void ArgMaxLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { if (out_max_val_) { top[0]->Reshape(bottom[0]->num(), 2, top_k_, 1); } else { top[0]->Reshape(bottom[0]->num(), 1, top_k_, 1); } }
template<typename Dtype> void ArgMaxLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); Dtype* top_data = top[0]->mutable_cpu_data();
int num = bottom[0]->num(); int dim = bottom[0]->count() / bottom[0]->num();
for (int i = 0; i < num; ++i) { vector<pair<Dtype, int> > bottom_data_vector;
for (int j = 0; j < dim; ++j) { bottom_data_vector.push_back(make_pair(bottom_data[i*dim + j], j)); }
partial_sort( bottom_data_vector.begin(), bottom_data_vector.begin()+top_k_, bottom_data_vector.end(), greater<pair<Dtype, int> >());
for (int j = 0; j < top_k_; ++j) { top_data[top[0]->offset(i, 0, j)] = bottom_data_vector[j].second; } if (out_max_val_) { for (int j = 0; j < top_k_; ++j) { top_data[top[0]->offset(i, 1, j)] = bottom_data_vector[j].first; } } } } |
Layer参数在proto/caffe.proto中定义
message LayerParameter{ …… optional ArgMaxParameter argmax_param = 103; …… } message ArgMaxParameter { // If true produce pairs (argmax, maxval) optional bool out_max_val = 1 [default = false]; optional uint32 top_k = 2 [default = 1]; } |
在layer的cpp文件中实例化并注册自定义的layer,使用INSTANTIATE_CLASS和REGISTER_LAYER_CLASS宏:
INSTANTIATE_CLASS(ArgMaxLayer) REGISTER_LAYER_CLASS(ArgMax) |
测试在test/test_argmax_layer.cpp中。
参考:
What is the reason for having separate `LayerSetUp` and `Reshape`? #1385