LeNet5的一个MATLAB实现的代码解析(3)

function [cnet,je] = calcje(cnet,e)
%这里计算并向后传递一阶误差
%CALCJE Calculation of gradient via backpropagation
%作者的介绍
% Syntax
%
% [cnet,je] = calcje(cnet,e)
%
% Description
% Input:
% cnet - Convolutional neural network class object
% e - error
% Output:
% cnet - convolutional neural network class object with computed
% gradients
% ge - gradient (or Jacobian times error)
%
%© Sirotenko Mikhail, 2009

%Last layer
k = cnet.numLayers;
%X is after Sigmoid, Y is before Sigmoid
%Calculate the performance function derrivative
cnet.FLayer{k}.dEdX{1} = feval(cnet.Perf, ‘dy’ , e, cnet.FLayer{k}.Y, cnet.FLayer{k}.X, cnet.Perf);%cnet.Perf = ‘mse’;
%Calculating the transfer function derrivative
cnet.FLayer{k}.dXdY{1} = feval(cnet.FLayer{k}.TransfFunc,‘dn’,cnet.FLayer{k}.Y,cnet.FLayer{k}.X);
%Calculating dE/dY, 储存为列
cnet.FLayer{k}.dEdY{1} = cnet.FLayer{k}.dXdY{1}.*cnet.FLayer{cnet.numLayers}.dEdX{1};
%Check if the previous layer is convolutional or fully-connected
if(cnet.numFLayers~=1)
outp = cnet.FLayer{cnet.numLayers-1}.X;
else
outp = cnet.CLayer{cnet.numLayers-1}.XC;
end
%Calculate gradients for weights and biases
cnet.FLayer{k}.dEdW{1} = kron(cnet.FLayer{k}.dEdY{1},outp)’;%dEdY乘以每一个参与运算的x,得到一个和w相同大小的矩阵
cnet.FLayer{k}.dEdB{1} = cnet.FLayer{k}.dEdY{1}’;

%Reshape data into single-column vector
je=cnet.FLayer{k}.dEdW{1};
je=[je;cnet.FLayer{k}.dEdB{1}];

if (cnet.numFLayers>1) %If there are more than 1 fully-connected layers
for k=cnet.numLayers-1:cnet.numLayers-cnet.numFLayers+1
%Backpropagate error to outputs of this layer
cnet.FLayer{k}.dEdX{1} = cnet.FLayer{k+1}.W*cnet.FLayer{k+1}.dEdY{1}’;
%補lculating the transfer function derrivative
cnet.FLayer{k}.dXdY{1} = feval(cnet.FLayer{k}.TransfFunc,‘dn’,cnet.FLayer{k}.Y,cnet.FLayer{k}.X)’; %Backpropagate error to transfer function inputs
cnet.FLayer{k}.dEdY{1} = cnet.FLayer{k}.dXdY{1}.*cnet.FLayer{k}.dEdX{1};
%Check if the previous layer is convolutional or fully-connected
if(cnet.numLayers-cnet.numFLayers+1==k)
outp = cell2mat(cnet.CLayer{k-1}.XC);
else
outp = cnet.FLayer{k-1}.X;%fully-conected
end
%Calculate gradients for weights and biases
cnet.FLayer{k}.dEdW{1} = kron(cnet.FLayer{k}.dEdY{1},outp);
cnet.FLayer{k}.dEdB{1} = cnet.FLayer{k}.dEdY{1};

   %Reshape data into single-column vector
     je=[je;cnet.FLayer{k}.dEdW{1}];
     je=[je;cnet.FLayer{k}.dEdB{1}];    
end

end

k = cnet.numLayers-cnet.numFLayers;
%Backpropagating the error
cnet.CLayer{k}.dEdX = num2cell(cnet.FLayer{k+1}.W*cnet.FLayer{k+1}.dEdY{1});%将得到的误差向量拆分为单个向量,用于每一个特征图的计算
%dE/dY = dE/dX because of linear transfer function for C-layer
cnet.CLayer{k}.dEdY = cnet.CLayer{k}.dEdX;

for k=(cnet.numLayers-cnet.numFLayers)?2 %Exclude first layer from loop (it’s dummy)
if(rem(k,2)) %Parity check
%Subsampling layer池化层
%Initialize dE/dX for accumilating the error
dEdX = num2cell(zeros(cnet.SLayer{k}.numFMaps,1));
for l=1:cnet.SLayer{k}.numFMaps %For all feature maps
%calculating the transfer function derrivative
cnet.SLayer{k}.dXdY{l} = feval(cnet.SLayer{k}.TransfFunc,‘dn’,cnet.SLayer{k}.YS{l},cnet.SLayer{k}.XS{l});
%Backpropagate error to transfer function inputs
cnet.SLayer{k}.dEdY{l} = cnet.SLayer{k}.dXdY{l}.*cnet.SLayer{k}.dEdX{l};%dEdX在上一层中计算
%Calculate the gradient for weights and biases
cnet.SLayer{k}.dEdW{l} = sum(sum(cnet.SLayer{k}.dEdY{l}.*cnet.SLayer{k}.SS{l}));
cnet.SLayer{k}.dEdB{l}=sum(sum(cnet.SLayer{k}.dEdY{l}));
if(k>1) %Backpropagate the error if this is not the first layer
dEdX{l} = back_subsample(cnet.SLayer{k}.dEdY{l}.*cnet.SLayer{k}.WS{l},cnet.SLayer{k}.SRate);%上采样操作
end
end

if(k>1) %Store the accumulated backpropagated error
    cnet.CLayer{k-1}.dEdX = reshape(dEdX,size(cnet.CLayer{k-1}.XC,1),size(cnet.CLayer{k-1}.XC,2),1);
end
%Reshape data into single-column vector
 je=[je;cell2mat(cnet.SLayer{k}.dEdW')];
 je=[je;cell2mat(cnet.SLayer{k}.dEdB')];

%---------------------------------------------------------------------------------------------
else
%Convolutional layer
%dE/dY = dE/dX because of linear transfer function for C-layer
cnet.CLayer{k}.dEdY = cnet.CLayer{k}.dEdX;%dEdX在上一层中计算
%Initialize temporary variables for accumilating the errors
dEdX = num2cell(zeros(1,cnet.SLayer{k-1}.numFMaps)); %为k - 1层计算
dEdW = num2cell(zeros(1,cnet.CLayer{k}.numKernels)); %为k层计算
dEdB = num2cell(zeros(1,cnet.CLayer{k}.numKernels)); %为k层计算
for l=1:cnet.CLayer{k}.numKernels %for all the CLayers
cnet.CLayer{k}.dXdY{l} = ones(size(cnet.CLayer{k}.XC{1})); %Transfer function is linear
%For all feature maps of next layer wich have connections to
%this layer
for m=find(cnet.CLayer{k}.ConMap(l,:)) %According to ComMap
%Backpropagate and accumulate the error
dEdX{m} = dEdX{m}+…
back_conv2(cnet.SLayer{k-1}.XS{m}, cnet.CLayer{k}.dEdY{l},cnet.CLayer{k}.WC{l},‘err’);%卷积核翻转180度后做卷积运算
%Calculate and accumulate the shared weights gradient
dEdW{l} = dEdW{l}+…
back_conv2(cnet.SLayer{k-1}.XS{m}, cnet.CLayer{k}.dEdY{l},cnet.CLayer{k}.WC{l},‘gx’); %Y = XS * W

            %Calculating the shared biases gradient
            dEdB{l}=dEdB{l} + sum(sum(cnet.CLayer{k}.dEdY{l})); 

        end
 end
 %Storing everithing
 cnet.SLayer{k-1}.dEdX = dEdX;
 cnet.CLayer{k}.dEdW = dEdW;
 cnet.CLayer{k}.dEdB = dEdB;
 %Reshape data into single-column vector
 je=[je;reshape(cell2mat(cnet.CLayer{k}.dEdW),[],1)];
 je=[je;cell2mat(cnet.CLayer{k}.dEdB)'];
    
end

end

end

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值