1. SoftmaxParameter
- engine
- CAFFE = 1
- CUDNN = 2
- axis
沿着第几维做softmax,可以是正数(正序)和负数(倒序)
// Message that stores parameters used by SoftmaxLayer, SoftmaxWithLossLayer
message SoftmaxParameter {
enum Engine {
DEFAULT = 0;
CAFFE = 1;
CUDNN = 2;
}
optional Engine engine = 1 [default = DEFAULT];
// The axis along which to perform the softmax -- may be negative to index
// from the end (e.g., -1 for the last axis).
// Any other axes will be evaluated as independent softmaxes.
optional int32 axis = 2 [default = 1];
}
2. 结构
caffe里的softmax层值关注softmax函数本身,是一个二部图。对于输入
zi(i=1,⋯,k),
其对应输出为
ai=ezi∑j=1kezj.
- bottom
如果[N C H W]的矩阵,N为batchsize,C为类别数,HxW被称作spatial dimention,此时 zi 就是第二维上的元素 - top
[N C H W]的矩阵,为对应输出
3. 正向传播
计算过程
Step1: 计算输入最大值
z=max{zi}
Step2: 减去最大值
zi=zi−z
Step3: 求指数
zi=ezi
Step4: 求和
zsum=∑i=1kezi
Step5: softmax
zi=zizsum
代码
template <typename Dtype>
void SoftmaxLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->cpu_data();
Dtype* top_data = top[0]->mutable_cpu_data();
Dtype* scale_data = scale_.mutable_cpu_data();
// channles对应类别的数目
int channels = bottom[0]->shape(softmax_axis_);
int dim = bottom[0]->count() / outer_num_;
// 对top进行初始化,初始化为bottom的值
caffe_copy(bottom[0]->count(), bottom_data, top_data);
// We need to subtract the max to avoid numerical issues, compute the exp,
// and then normalize.
// 对outer_num_个数据依次处理
for (int i = 0; i < outer_num_; ++i) {
// 初始化scale_data为第i个数据,并找到softmax对应维度上的数据最大值
// Step1:
// initialize scale_data to the first plane
caffe_copy(inner_num_, bottom_data + i * dim, scale_data);
for (int j = 0; j < channels; j++) {
for (int k = 0; k < inner_num_; k++) {
scale_data[k] = std::max(scale_data[k],
bottom_data[i * dim + j * inner_num_ + k]);
}
}
// subtraction, Step2:
caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, channels, inner_num_,
1, -1., sum_multiplier_.cpu_data(), scale_data, 1., top_data);
// exponentiation, Step3:
caffe_exp<Dtype>(dim, top_data, top_data);
// sum after exp, Step4:
caffe_cpu_gemv<Dtype>(CblasTrans, channels, inner_num_, 1.,
top_data, sum_multiplier_.cpu_data(), 0., scale_data);
// division, Step5:
for (int j = 0; j < channels; j++) {
caffe_div(inner_num_, top_data, scale_data, top_data);
top_data += inner_num_;
}
}
}
4. 反向传播
计算过程
Step1: 求top_data和top_diff内积
Step2: 求差值,对应推导过程
(∂l∂ai−∂l∂a⋅a)
Step3: 相乘,求偏导
推导过程
∂l∂z=∂l∂a∂a∂z
其中: ∂l∂a=top−diff , a=top−data 。
而:
∂ai∂zj=∂(ezi∑kezk)∂zj
当 i≠j 时
∂ai∂zj=−eziezj(∑kezk)2=−aiaj
当 i=j 时:
∂ai∂zj=ezi∑kezk−eziezj(∑kezk)2=aj−ajaj
可以推出:
∂l∂zj=∂l∂a∂a∂zj=∑i∂l∂ai∂ai∂zj=−(∂l∂a⋅a)aj+∂l∂ajaj=(∂l∂aj−∂l∂a⋅a)aj
代码
template <typename Dtype>
void SoftmaxLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->cpu_diff();
const Dtype* top_data = top[0]->cpu_data();
Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
Dtype* scale_data = scale_.mutable_cpu_data();
int channels = top[0]->shape(softmax_axis_);
int dim = top[0]->count() / outer_num_;
caffe_copy(top[0]->count(), top_diff, bottom_diff);
for (int i = 0; i < outer_num_; ++i) {
// compute dot(top_diff, top_data) and subtract them from the bottom diff, Step1:
for (int k = 0; k < inner_num_; ++k) {
scale_data[k] = caffe_cpu_strided_dot<Dtype>(channels,
bottom_diff + i * dim + k, inner_num_,
top_data + i * dim + k, inner_num_);
}
// subtraction, Step2:
caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, channels, inner_num_, 1,
-1., sum_multiplier_.cpu_data(), scale_data, 1., bottom_diff + i * dim);
}
// elementwise multiplication, Step3:
caffe_mul(top[0]->count(), bottom_diff, top_data, bottom_diff);
}