123

%% construct a network.
net.nIn=1;                              %the input layer has 1 ANN.
net.nHidden=10;                         %the hidden has 10 ANN.
net.nOut=1;                             %the output layer has 1ANN.
w=2*(rand(net.nHidden,net.nIn)-1/2);    %the weight coefficient of the hidden layer.
b=2*(rand(net.nHidden,1)-1/2);          %the threshold
net.w1=[w,b];                           %the weight coefficient and the threshold are linked up.
W=2*(rand(net.nOut,net.nHidden)-1/2);   %the weight coefficient of the output layer.
B=2*(rand(net.nOut,1)-1/2);             %the threshold
net.w2=[W,B];                           %the weight coefficient and the threshold are linked up.

%% set the parameters
mc=0.01;                                %set the momentum term
eta=0.001;                              %set the learining rate
maxiter=50000;                          %set the iteration times

%% set the training samples.
trainIn=[0:pi/4:2*pi];                  %the input of the training samples.
trainOut=sin(trainIn);                  %the ouput of the training samples.
trainnum=9;                             %the amount of the training samples.
SampIn=[trainIn;ones(1,trainnum)];      %the input of the network, and the input of the threshold is a constan 1.
expectedOut=trainOut;                   %the expected ouput is the output of the training samples.
errRec=zeros(1,maxiter);                %used to store the error of the training output.

%% set the testing samples
testIn=[0:pi/180:2*pi];                 %the input of the testing samples.
testOut=sin(testIn);                    %the output of the testing sanples.
testnum=361;                            %the amount of the testing samples.

%% the training procedure
for i=1:maxiter;
hid_input=net.w1*SampIn;                %calculate the weighting sum of the hidden layer
hid_out=tansig(hid_input);              %calculate the output of the hidden layer.
ou_input1=[hid_out;ones(1,trainnum)];   %the input of the output layer, and the input of the threshold is a constan 1.
ou_input2=net.w2*ou_input1;             %calculate the weighting sum of the output layer.
out_out=2*tansig(ou_input2);            %calculate the output of the output layer.
err=expectedOut-out_out;                %caiculate the error vector
sse=sumsqr(err);                        %calculate the square sum of the error.
errRec(i)=sse;                          %store the error

%% the back-propagation of error
DELTA=err.*dtansig(ou_input2,out_out/2);                    %the gradient of between the hidden layer and the output layer
delta=net.w2(:,1:end-1)'*DELTA.*dtansig(hid_input,hid_out); %the gradient of between the input layer and the hidden layer
dWEX=DELTA*ou_input1';                                      %the delta of the weight coefficient of the output layer
dwex=delta*SampIn';                                         %the delta of the weight coefficient of the hidden layer
if i==1                                                     %if it is the first time to revise the coefficient, we do not use the momentum term
    net.w2=net.w2+eta*dWEX;
    net.w1=net.w1+eta*dwex;
else                                                        %else we use the momentum term.
    net.w2=net.w2+(1-mc)*eta*dWEX+mc*dWEXOld;
    net.w1=net.w1+(1-mc)*eta*dwex+mc*dwexOld;
end
dWEXOld=dWEX;                                               %record the delta of the last revision
dwexOld=dwex;
end
%% the display of the results
subplot(1,2,1);
plot(errRec);                               %plot the error
title('error curve');
xlabel('iteration times');
ylabel('error');
realIn=[testIn;ones(1,testnum)];            %the input of the testing samples
realhid_input=net.w1*realIn;                %calculate the weighting sum of the hidden layer
realhid_out=tansig(realhid_input);          %calculate the output of the hidden layer.
realou_input1=[realhid_out;ones(1,testnum)];%the input of the output layer, and the input of the threshold is a constan 1.
realou_input2=net.w2*realou_input1;         %calculate the weighting sum of the output layer.
realout_out=2*tansig(realou_input2);        %calculate the output of the output layer.
realerr=testOut-realout_out;                %caiculate the error vector
realsse=sumsqr(realerr);                    %calculate the square sum of the error.

subplot(1,2,2);
plot(testIn,realout_out,testIn,sin(testIn));%plot the standard sin and the output of the testing.
axis([0 2*pi -1.1 1.1]);                    %set the coordinate range.
set(gca,'XTick',pi/4:pi/4:2*pi);
grid on;
title('the testing output and the standard output');


  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值