1234

%% construct a network
net.nIn=2;                              %the input layer has 1 ANN
net.nHidden=20;                         %the hidden layer has 10 ANN
net.nOut=1;                             %the output layer has 1 ANN
w=2*(rand(net.nHidden,net.nIn)-1/2);    %the weight coefficient of the hidden layer.
b=2*(rand(net.nHidden,1)-1/2);          %the threshold
net.w1=[w,b];                           %the weight coefficient and the threshold are linked up.
W=2*(rand(net.nOut,net.nHidden)-1/2);   %the weight coefficient of the output layer.
B=2*(rand(net.nOut,1)-1/2);             %the threshold
net.w2=[W,B];                           %the weight coefficient and the threshold are linked up.

%% set the parameters
mc=0.00005;                             %set the momentum term
eta=0.0001;                             %set the learning rate
maxiter=200000;                         %set the max iteration times
trainnum=121;                           %the amount of the training samples
testnum=441;                            %the amount of the testing samples
errRec=zeros(1,maxiter);                %used to store the error vector

%% set the training samples and the testing samples
trainin=zeros(2,121);                   %used to store the input of the training samples
testin=zeros(2,441);                    %used to store the input of the testing samples
for i=1:121                             %produce the input of the training samples
        trainin(1,i)=2*ceil(i/11)-12;
        trainin(2,i)=2*rem(i-1,11)-10;
end
sampin=[trainin;ones(1,trainnum)];
for i=1:441                             %produce the input the testing samples
    testin(1,i)=ceil(i/21)-11;
    testin(2,i)=rem(i-1,21)-10;
end
trainout=ones(1,121);                   %used to store the output of the training samples
for i=-10:2:10                          %produce the output of the training samples
    for j=-10:2:10
        trainout(1,61+5.5*i+0.5*j)=sin(i)/i*sin(j)/j;
    end
end
for i=1:121                             %produce the output by calculating the lim
    if i>=56&i<=66
        j=rem(i-1,11)*2-10;
        trainout(1,i)=sin(j)/j;
    end
    if rem(i,11)==6
        j=ceil(i/11)*2-12;
        trainout(1,i)=sin(j)/j;
    end
end
trainout(1,61)=1;
expectout=trainout;
testout=ones(1,441);                    %used to store the output of the testing samples
for i=1:441                             %produce the output of the testing samples
    testout(1,i)=sin(ceil(i/21)-11)/(ceil(i/21)-11)*sin(rem(i-1,21)-10)/(rem(i-1,21)-10);
end
for i=1:441                             %calculate the lim
    if i>=211&i<=231
        j=rem(i-1,21)-10;
        testout(1,i)=sin(j)/j;
    end
    if rem(i,21)==11
        j=ceil(i/21)-11;
        testout(1,i)=sin(j)/j;
    end
end
testout(1,221)=1;
%% the training procedure
for i=1:maxiter;
hid_input=net.w1*sampin;                    %calculate the weighting sum of the hidden layer
hid_out=tansig(hid_input);                  %calculate the output of the hidden layer.
ou_input1=[hid_out;ones(1,trainnum)];       %the input of the output layer, and the input of the threshold is a constan 1.
ou_input2=net.w2*ou_input1;                 %calculate the weighting sum of the output layer.
out_out=2*tansig(ou_input2);                %calculate the output of the output layer.
err=expectout-out_out;                      %calculate the error vector
sse=sumsqr(err);                            %calculate the square sum of the error.
errRec(i)=sse;                              %store the error

%% the back-propagation of error
 DELTA=err.*dtansig(ou_input2,out_out/2);                   %the gradient of between the hidden layer and the output layer
 delta=net.w2(:,1:end-1)'*DELTA.*dtansig(hid_input,hid_out);%the gradient of between the input layer and the hidden layer
 dWEX=DELTA*ou_input1';                                     %the delta of the weight coefficient of the output layer
 dwex=delta*sampin';                                        %the delta of the weight coefficient of the hidden layer
 if i==1                                                    %if it is the first time to revise the coefficient, we do not use the momentum term
 net.w2=net.w2+eta*dWEX;
 net.w1=net.w1+eta*dwex;
 else                                                       %else we use the momentum term.
 net.w2=net.w2+(1-mc)*eta*dWEX+mc*dWEXOld;
 net.w1=net.w1+(1-mc)*eta*dwex+mc*dwexOld;
end
dWEXOld=dWEX;                                               %record the delta of the last revision
dwexOld=dwex;
end
%% the testing procedure
realin=[testin;ones(1,testnum)];                            %the input of the testing samples
realhid_input=net.w1*realin;                                %calculate the weighting sum of the hidden layer
realhid_out=tansig(realhid_input);                          %calculate the output of the hidden layer.
realou_input1=[realhid_out;ones(1,testnum)];                %the input of the output layer, and the input of the threshold is a constan 1.
realou_input2=net.w2*realou_input1;                         %calculate the weighting sum of the output layer.
realout_out=2*tansig(realou_input2);                        %calculate the output of the output layer.
realerr=testout-realout_out;                                %caiculate the error vector
realsse=sumsqr(realerr);                                    %calculate the square sum of the error.
%% the display of the results
subplot(1,3,1);
plot(errRec);                                               %plot the error
title('error curve');
xlabel('iteration times');
ylabel('training error');
x1=-10:10;                                                  %the curve of the standard function
x2=-10:10;
[X1,X2]=meshgrid(x1,x2);
yd=sin(X1)./X1.*sin(X2)./X2;
for i=1:21
    for j=1:21
        if j==11 
            yd(i,j)=sin(i-11)/(i-11);
        end
        if i==11
            yd(i,j)=sin(j-11)/(j-11);
        end
    end
end
yd(11,11)=1;
subplot(1,3,2)
mesh(X1,X2,yd);                                              %plot the standand function
title('sinx/x*siny/y curve');
xlabel('x');
ylabel('y');
zlabel('z');
hold on;
y1=-10:10;                                                    %the curve of the testing results
y2=-10:10;
[Y1,Y2]=meshgrid(y1,y2);
yf=zeros(21,21);
for i=1:21
    for j=1:21
        yf(i,j)=realout_out(1,21*(i-1)+j);
    end
end
subplot(1,3,3);
mesh(Y1,Y2,yf);                                                %plot the output of the testing procedure
xlabel('x');
ylabel('y');
zlabel('testing output')
title('testing curve');








            


  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值