社会粒子群辨识模型参数 参考论文:A social learning particle swarm optimization algorithm for scalable optimization

%%
clc;
clear;
ma = [0.2 0.2 0.2 0.2 0.4 0.4 0.4 0.4 0.2 0.2 0.2 0.2 0.4 0.4 0.4 0.4];
Tai = [23 23 23 23 23 23 23 23 30 30 30 30 30 30 30 30];
Tao = [18.285 18.07 17.731 17.512 20.598 20.492 20.303 20.204 25.032 24.829 24.418 24.254 27.471 27.369 27.148 27.074];
mr = [0.0067687 0.0068901 0.0079628 0.0079636 0.0067752 0.0068934 0.0079722 0.0079591 0.0067847 0.0068982 0.0079827 0.0079519 0.0067894 0.0069008 0.0079875 0.0079469];
Tsat= [5.4627 -1.4785 8.7135 1.374 5.8217 -1.1601 9.2078 1.7237 6.5043 -0.62698 9.9056 2.2528 6.8474 -0.31984 10.37 2.5914];
Pe = [355.57 277.64 397.35 307.88 360.01 280.9 404.03 311.76 368.57 286.42 413.59 317.69 372.93 289.63 420.05 321.53];
Hg = [253.6 249.59 255.44 251.25 253.81 249.77 255.72 251.45 254.2 250.08 256.11 251.76 254.39 250.26 256.37 251.96];
Tri= [33.901 33.583 36.045 35.144 33.781 33.506 35.77 35.047 33.599 33.395 35.473 34.933 33.499 33.33 35.334 34.861];
Cp=1.1626075418552;
for j=1:16
Qcat(j)=Cpma(j)(Tai(j)-Tao(j));
%gama(j) = Q(j);
end
y1 = @§abs((mr.*(Hg-p(1).Tri)+p(2).mr.p(4).*(Tai-Tsat))/(1+p(3).*(mr/ma).p(4))-Qcat);
%%
n=5000;
d=4;%粒子维数
t=0;a=0.5;b=0.01;M=100;
m = M+floor(n./10);
e = b.
(n./M);
%%
pmax = [500 500 500 1];
pmin = [0 0 0 0];
p0 = pmin + (pmax - pmin) .
rand(m, d);
% ppp = [2 2 3 1];
% p0=[p00 ;ppp];
for i = 1:m
pl(i,:) = (1-(i-1)./m).^(a.*log(ceil(n./m))) ;
fitness1(i,:) = y1(p0(i,:));
end
fitness{1}=sum(fitness1,2);
[sortf,idx] = sortrows(fitness{1},-1);
gbest = min(sortf);
p = p0(idx,:);
v1 = zeros(m,d);
%%
% while(min(fitness{1}>0.000002))
for i = 1:5000
[sortf1,idx1] = sortrows(fitness{1},-1);
gbest1 = min(sortf1);
p = p(idx1,:);
v = v1(idx1,:);
pkm = p(ceil(m./2):m,:);
pi = p(1:floor(m./2)😅;
[mm,~] = size(pkm);
mmk = randperm(mm,1);
pk = pkm(mm,:);
pj = mean(p,1);
I = pk-p;
c = pj-p;
r1 = rand(m,d);r2 = rand(m,d);r3 = rand(m,d);
for i =1:m
v2(i,:) = r1(i,:).*v1(i,:) + r2(i,:).*I(i,:) + r3(i,:).*e.*c(i,:);
% p1(i,:) = p(i,:)+v2(i,:);
end
pii = rand(m,1);
for i=1:m
if pii(i)<=pl(i)

% v2(i,j1)=20.*v1(i,j1) + I(i,j1) + e.*c(i,j1);
v2(i,:)=v1(i,:) + I(i,:) + e.*c(i,:);
else
v2(i,:) = r1(i,:).*v1(i,:) + r2(i,:).*I(i,:) + r3(i,:).*e.*c(i,:);
end
p1(i,:) = p(i,:)+v2(i,:);

end
for i =1:m
    for j2 = 1:d
        if p1(i,j2)>pmax(j2)
            p1(i,j2) = pmax(j2);
        elseif p1(i,j2)<pmin(j2)
            p1(i,j2)=pmin(j2);
        end
    end
end

for i=1:m
    fitness2(i,:)=y1(p1(i,:));
end
fitness{2}=sum(fitness2,2);
for i =1:m
    if fitness{2}(i)>fitness{1}(i)
        p1(i,:) = p(i,:);
    end
end
fitness{1}=fitness{2};
v1=v2;
p = p1;

end
%%
[sortf1,idx1] = sortrows(fitness{1},-1);
gbest1 = min(sortf1);
p = p(idx1,:);
pbest = p(m,:);
Q2 = (mr.*(Hg-pbest(1).Tri)+pbest(2).mr.pbest(4).*(Tai-Tsat))/(1+pbest(3).*(mr/ma).pbest(4));
r3 = 100.
(abs(Q2-Qcat))./Qcat;
%%
LB=[0 0 0 0];
UB=[500 500 500 1];
x0 = [1, 0.3991, 0.5711, 0];
fun = @(x)abs((mr.
(Hg-x(1).*Tri)+x(2).*mr.x(4).*(Tai-Tsat))/(1+x(3).*(mr/ma).x(4))-Qcat);
%x0 = [ 0.5345 1.3163 0.9321 0.8];
%x0 = [ 1.1848 70 0.2544 0.5384 0.8]; % Starting guess
%[x,resnorm] = lsqcurvefit(@myfun,x0,ma,Tao,mr,Tri,Tsat,Qcat,LB,UB) % Invoke optimizer
options = optimoptions(@lsqnonlin,‘Algorithm’,‘levenberg-marquardt’);
% [x,resnorm] = lsqnonlin(@myfun,x0,LB,UB,options,ma,Tai,mr,Tsat,Qcat,Hg,Tri); % Invoke optimizer
[xlm,resnorm] = lsqnonlin(fun,x0,LB,UB,options);
%%
for i=1:16

Qcal(i) =  (mr(i)*(Hg(i)-xlm(1)*Tri(i))+xlm(2)*mr(i)^xlm(4)*(Tai(i)-Tsat(i)))/(1+xlm(3)*(mr(i)/ma(i))^xlm(4));

end
%%
r4 = 100.(abs(Qcal-Qcat))./Qcat;
figure(2),plot(r3,‘k-o’);
hold on
plot(r4,'k-
’),ylabel(‘相对误差[%]’), xlabel(‘数据点’);
legend(“sl-pso”,“lm”)
%%
r5 = sum(r3)./16;
r6= sum(r4)./16;
r7 = sum(abs(Q2-Qcat))./16;
r8 = sum(abs(Qcal-Qcat))./16;
% %%
% [sortf1,idx1] = sortrows(fitness{1},-1);
% gbest1 = min(sortf1);
% p = p(idx1,:);
% pkm = p(ceil(m./2):m,:);
% pi = p(1:floor(m./2)😅;
% [mm,~] = size(pkm);
% mmk = randperm(mm,1);
% pk = pkm(mmk,:);
% pj = mean(p,1);
% I = pk-p;
% c = pj-p;
% r1 = rand(m,d);r2 = rand(m,d);r3 = rand(m,d);
% winidxmask = repmat([1:m]’, [1 d]);
% winidx = winidxmask + ceil(rand(m, d).(m - winidxmask));
% for j =1:d
% pwin(:,j) = p(winidx(:,j),j);
% end
% lpmask = repmat(rand(m,1) < pl, [1 d]);
% lpmask(m,:) = 0;
% v2 = 1
(r1.v + r2.(pwin - p) + c3r3.(center - p));
% p1 = p + v2;
% v = lpmask.*v2 + (~lpmask).*v;
% p = lpmask.*p1 + (~lpmask).*p;
% for i = 1:m
% p(i,:) = max(p(i,:), pmax(1,:));
% p(i,:) = min(p(i,:), pmin(2,:));
% end
% %%
% for i =1:m
% v2(i,:) = r1(i,:).*v1(i,:) + r2(i,:).*I(i,:) + r3(i,:).*e.*c(i,:);
% % p1(i,:) = p(i,:)+v2(i,:);
% end
% pii = rand(m,1);
% for i=1:m
% if pii(i)<=pl(i)
%
% % v2(i,j1)=20.*v1(i,j1) + I(i,j1) + e.*c(i,j1);
% v2(i,:)=v1(i,:) + I(i,:) + e.*c(i,:);
% else
% v2(i,:) = r1(i,:).*v1(i,:) + r2(i,:).*I(i,:) + r3(i,:).*e.*c(i,:);
% end
% p1(i,:) = p(i,:)+v2(i,:);
%
% end
% %%
% for i=1:m
% fitness2(i,:)=y1(p1(i,:));
% end
% fitness{2}=sum(fitness2,2);
% for i =1:m
% if fitness{2}(i)>fitness{1}(i)
% p1(i,:) = p(i,:);
% end
% end
% % fitness{1}=fitness{2};
% % v1=v2;
% % p = p1;

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值