1 简介
针对最小二乘支持向量机(LSSVM)在建模中的重要参数如何选择问题。提出利用具有随机性、遍历性及规律性的混沌优化算法对LSSVM建模过程中的参数进行优化搜索,为了加快对较大搜索空间中的搜索速度,提出遗传算法对LSSVM中的参数优化。组合算法克服了单一算法存在的早熟、局部收敛及寻优速度慢等问题,把混沌变量种群映射到LSSVM参数取值区间,按照遗传算法训练,同时利用训练集训练LSSVM,最终得到参数优化值。将该方法应用的烟叶识别的建模研究,取得了较高建模精度,。
2 部分代码
function [features,eigvec,eigvals] = AFEm(Xs,kernel, kernel_pars,X,type,nb,eigvec,eigvals)
% Automatic Feature Extraction by Nystrom method
%
%
% >> features = AFE(X, kernel, sig2, Xt)
%
% Description
% Using the Nystr m approximation method, the mapping of data to
% the feature space can be evaluated explicitly. This gives the
% features that one can use for a linear regression or
% classification. The decomposition of the mapping to the feature
% space relies on the eigenvalue decomposition of the kernel
% matrix. The Matlab ('eigs') or Nystr m's ('eign') approximation
% using the nb most important eigenvectors/eigenvalues can be
% used. The eigenvalue decomposition is not re-calculated if it is
% passed as an extra argument. This routine internally calls a cmex file.
%
% Full syntax
%
% >> [features, U, lam] = AFE(X, kernel, sig2, Xt)
% >> [features, U, lam] = AFE(X, kernel, sig2, Xt, type)
% >> [features, U, lam] = AFE(X, kernel, sig2, Xt, type, nb)
% >> features = AFE(X, kernel, sig2, Xt, [],[], U, lam)
%
% Outputs
% features : Nt x nb matrix with extracted features
% U(*) : N x nb matrix with eigenvectors
% lam(*) : nb x 1 vector with eigenvalues
% Inputs
% X : N x d matrix with input data
% kernel : Name of the used kernel (e.g. 'RBF_kernel')
% sig2 : parameter of the used kernel
% Xt : Data from which the features are extracted
% type(*): 'eig'(*), 'eigs' or 'eign'
% nb(*) : Number of eigenvalues/eigenvectors used in the eigenvalue decomposition approximation
% U(*) : N x nb matrix with eigenvectors
% lam(*) : nb x 1 vector with eigenvalues
%
% See also:
% kernel_matrix, RBF_kernel, demo_fixedsize
%FS-LSSVMLab
% Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab
N = size(X,1);
Nc = size(Xs,1);
eval('type;','type=''eig'';');
if ~(strcmp(type,'eig') || strcmp(type,'eigs') || strcmp(type,'eign') )
error('Type needs to be ''eig'', ''eigs'' or ''eign''...');
end
function [model,A,B,C,D] = bay_optimize(model,level, type, nb, bay)
% Optimize the posterior probabilities of model (hyper-) parameters with respect to the different levels in Bayesian inference
%
% One can optimize on the three different inference levels:
%
% - First level: In the first level one optimizes the support values alpha 's and the bias b.
% - Second level: In the second level one optimizes the regularization parameter gam.
% - Third level: In the third level one optimizes the kernel
% parameter. In the case of the common 'RBF_kernel' the kernel
% parameter is the bandwidth sig2.
% This routine is only tested with Matlab version 6 using the corresponding optimization toolbox.
%
% Full syntax
%
% 1. Outputs on the first level:
%
% >> [model, alpha, b] = bay_optimize({X,Y,type,gam,sig2,kernel,preprocess}, 1)
% >> [model, alpha, b] = bay_optimize(model, 1)
%
% model : Object oriented representation of the LS-SVM model optimized on the first level
% alpha(*) : Support values optimized on the first level of inference
% b(*) : Bias term optimized on the first level of inference
%
%
% 2. Outputs on the second level:
%
% >> [model,gam] = bay_optimize({X,Y,type,gam,sig2,kernel,preprocess}, 2)
% >> [model,gam] = bay_optimize(model, 2)
%
% model : Object oriented representation of the LS-SVM model optimized on the second level of inference
% gam(*) : Regularization parameter optimized on the second level of inference
%
%
% 3. Outputs on the third level:
%
% >> [model, sig2] = bay_optimize({X,Y,type,gam,sig2,kernel,preprocess}, 3)
%
% model : Object oriented representation of the LS-SVM model optimized on the third level of inference
% sig2(*) : Kernel parameter optimized on the third level of inference
%
%
% 4. Inputs using the functional interface
%
% >> model = bay_optimize({X,Y,type,gam,sig2,kernel,preprocess}, level)
% >> model = bay_optimize({X,Y,type,gam,sig2,kernel,preprocess}, level, type)
% >> model = bay_optimize({X,Y,type,gam,sig2,kernel,preprocess}, level, type, nb)
%
% X : N x d matrix with the inputs of the training data
% Y : N x 1 vector with the outputs of the training data
% type : 'function estimation' ('f') or 'classifier' ('c')
% gam : Regularization parameter
% sig2 : Kernel parameter (bandwidth in the case of the 'RBF_kernel')
% kernel(*) : Kernel type (by default 'RBF_kernel')
% preprocess(*) : 'preprocess'(*) or 'original'
% level : 1, 2, 3
% type(*) : 'eig', 'svd'(*), 'eigs', 'eign'
% nb(*) : Number of eigenvalues/eigenvectors used in the eigenvalue decomposition approximation
%
%
% 5. Inputs using the object oriented interface
%
% >> model = bay_optimize(model, level)
% >> model = bay_optimize(model, level, type)
% >> model = bay_optimize(model, level, type, nb)
%
% model : Object oriented representation of the LS-SVM model
% level : 1, 2, 3
% type(*) : 'eig', 'svd'(*), 'eigs', 'eign'
% nb(*) : Number of eigenvalues/eigenvectors used in the eigenvalue decomposition approximation
%
% See also:
% bay_lssvm, bay_lssvmARD, bay_modoutClass, bay_errorbar
% Copyright (c) 2002, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.ac.be/sista/lssvmlab
vers = version;
if vers(1)<'6',
error(['This routine is only supported currently under MATLAB 6' ...
' and its corresponding optimization toolbox.']);
end
if iscell(model), model = initlssvm(model{:}); end
if ~(level==1 | level==2 | level==3),
error('level must be 1, 2 or 3.');
end
eval('nb;','nb=model.nb_data;');
if level==1,
eval('type;','type=''train'';');
eval('[F1, F2, F3, C , model] = bay_lssvm(model,1,type,nb,bay);',...
'[F1, F2, F3, C , model] = bay_lssvm(model,1,type,nb);');
A = model.alpha
B = model.b;
elseif level==2,
eval('type;','type=''svd'';');
eval('[model, A,B] = bay_optimize2(model,type,nb,bay); ',...
'[model, A,B] = bay_optimize2(model,type,nb);')
elseif level==3,
% check fminunc
resp = which('fminunc');
disp(' ');
if isempty(resp),
error(' ''fminunc'' not available');
end
eval('type;','type=''svd'';');
%startvalues
model = bay_optimize2(model,type,nb);
% start value given in model: not fixed 'cause updating
% of optimal parameters needs to be possible
start_param = model.kernel_pars;
opties=optimset('MaxFunEvals', 250, 'TolFun', .001, 'TolX', .001 );
eval('A = fminunc(@costL3, start_param, opties, model, type, nb);');
model = changelssvm(model,'kernel_pars',abs(A));
[c3,B,model] = bay_lssvm(model,3, type, nb);
end
function [model, A,B] = bay_optimize2(model,type,nb,bay)
% check fminunc
resp = which('fminunc');
disp(' ');
if isempty(resp),
error(' ''fminunc'' not available');
end
opties=optimset('TypicalX',model.kernel_pars,'MaxFunEvals', 2000,'GradObj','on','DerivativeCheck', 'off', 'TolFun', .0001, 'TolX', .0001 );
if nargin<4,
[c,dc,o, bay] = bay_lssvm(model,2,type,nb);
end
eval('gam_opt = fminunc(@costL2, abs(model.gam), opties, model, type, nb,bay);');
model = changelssvm(model,'gam',abs(gam_opt));
[D1, D2, D3,B,model] = bay_lssvm(model,2,type, nb, bay);
A = model.gam;
function [cost,Dcost] = costL2(lgam, model, type, nb, bay)
%
model = changelssvm(model,'gam',abs(lgam+1000*eps));
[cost, Dcost] = bay_lssvm(model,2,type, nb, bay);
function cost = costL3(sig2, model, type, nb)
%
model = changelssvm(model,'kernel_pars',abs(sig2));
cost = bay_lssvm(model,3, type, nb);
disp(['sig2 = ' num2str(model.kernel_pars) ' costL3 = ' num2str(cost) ';'])
% eigenvalue decomposition to do..
if nargin<=7,
omega = kernel_matrix(Xs, kernel, kernel_pars);
if strcmp(type,'eig'),
[eigvec,eigvals] = eig(omega+2*eye(size(omega,1))); % + jitter factor
eigvals = diag(eigvals);
clear omega
elseif strcmp(type,'eigs'),
eval('nb;','nb=min(size(omega,1),10);'); options.disp = 0;
[eigvec,eigvals] = eigs(omega+2*eye(size(omega,1)),nb,'lm',options); clear omega % + jitter factor
elseif strcmp(type,'eign'),
eval('nb;','nb=min(size(omega,1),10);');
[eigvec,eigvals] = eign(omega+2*eye(size(omega,1)),nb); clear omega % + jitter factor
end
eigvals = (eigvals-2)/Nc;
peff = eigvals>eps;
eigvals = eigvals(peff);
eigvec = eigvec(:,peff); clear peff
end
if strcmp(kernel,'RBF_kernel')
omegaN = sum(X.^2,2)*ones(1,Nc);
omegaN = omegaN + ones(N,1)*sum(Xs.^2,2)';
omegaN = omegaN -2*X*Xs'; clear X Xs
omegaN = exp(-omegaN./kernel_pars);
elseif strcmp(kernel,'lin_kernel')
omegaN = X*Xs'; clear X Xs
elseif strcmp(kernel,'poly_kernel')
omegaN = X*Xs'; clear X Xs
omegaN = (omegaN + kernel_pars(1)).^kernel_pars(2);
else
disp('kernel_type unkown')
return;
end
%A=sqrt(Nc) ./ sqrt(eigvals); clear eigvals
features = omegaN*eigvec; clear omegaN
features = repmat((1 ./ sqrt(eigvals))',N,1).*features;
%% 基于遗传算法与最小二乘支持向量机的特征选择的烟叶识别
clc
clear all
close all
% 加载工具箱
addpath('LSSVM工具箱\')
%% 加载数据
data=csvread('最小二乘支持向量机.csv',1,0);
tr_len=36;
[W,D]=size(data);% 数据行和列
% %% 训练数据
X = data(1:tr_len,1:D-1);%输入属性特征数据
Y= data(1:tr_len,D);%标签,即类别
% %% 测试数据
Xt= data(tr_len+1:end,1:D-1);%输入属性特征数据
Yt =data(tr_len+1:end,D);%标签,即类别
%% 归一化
[Xn,inputps] = mapminmax(X');
Xtn= mapminmax('apply',Xt',inputps);
% 转置,符合LSSVM数据格式要求
X=Xn';
Xt=Xtn';
%% LSSVM参数设置
gam=5;
sig2=50;
xlabel('迭代次数')
ylabel('识别率(%)')
legend('平均适应度','最佳适应度')
axis tight
title('GA-LSSVM特征选择')
%% 运用最优特征识别烟叶
L_fold =10;
model = initlssvm(X(:,find(bestchrom==1)),Y,'c',gam,sig2,'RBF_kernel');
model = tunelssvm(model,'simplex','crossvalidatelssvm',{L_fold,'misclass'},'code_OneVsOne');
model = trainlssvm(model);
%% 训练样本
Y_sim = simlssvm(model,X(:,find(bestchrom==1)));
accuracy_1=100*length(find(Y==Y_sim))/length(Y);
figure
plot(1:length(Y),Y,'bo','linewidth',1)
hold on
plot(1:length(Y_sim),Y_sim,'r*','linewidth',1)
xlabel('训练样本','FontSize',12);
ylabel('烟叶类别','FontSize',12);
legend('实际类别','预测类别');
set(gca,'YTick',[0:1:6])
string={'GA-LSSVM识别率';
['accuracy=' num2str(accuracy_1) '%']};
title(string);
grid on
axis tight
%% 测试样本
Yt_sim = simlssvm(model,Xt(:,find(bestchrom==1)));
accuracy_2=100*length(find(Yt==Yt_sim))/length(Yt);
figure
plot(1:length(Yt),Yt,'bo','linewidth',1)
hold on
plot(1:length(Yt_sim),Yt_sim,'r*','linewidth',1)
xlabel('测试样本','FontSize',12);
ylabel('烟叶类别','FontSize',12);
legend('实际类别','预测类别');
set(gca,'YTick',[0:1:6])
string={'GA-LSSVM识别率';
['accuracy=' num2str(accuracy_2) '%']};
title(string);
grid on
axis tight
%% 输出结果
XX=individuals.chrom(end,:);
[m,n]=find(XX==1);
disp(['优化筛选后的输入自变量编号为:' num2str(n)]);
3 仿真结果
4 参考文献
[1]田珂, 常华俊. 基于遗传算法优化LSSVM的着靶速度建模与预测[J]. 兵器装备工程学报, 2021, 42(S02):5.
博主简介:擅长智能优化算法、神经网络预测、信号处理、元胞自动机、图像处理、路径规划、无人机等多种领域的Matlab仿真,相关matlab代码问题可私信交流。
部分理论引用网络文献,若有侵权联系博主删除。