1.SVM
(1)这里我选择惩罚系数 C = 1 C=1 C=1做实验,不同的惩罚系数 C C C可能导致结果不同。
- 核函数kernel.m
function K = kernel(X,Y,type,gamma)
switch type
case 'linear' %线性核
K = X*Y';
case 'rbf' %高斯核
m = size(X,1);
K = zeros(m,m);
for i = 1:m
for j = 1:m
K(i,j) = exp(-gamma*norm(X(i,:)-Y(j,:))^2);
end
end
end
end
- 训练函数svmTrain.m
function svm = svmTrain(X,Y,kertype,gamma,C)
%二次规划问题,使用quadprog,详细help quadprog
n = length(Y);
H = (Y*Y').*kernel(X,X,kertype,gamma);
f = -ones(n,1);
A = [];
b = [];
Aeq = Y';
beq = 0;
lb = zeros(n,1);
ub = C*ones(n,1);
a = quadprog(H,f,A,b,Aeq,beq,lb,ub);
epsilon = 3e-5; %阈值可以根据自身需求选择
%找出支持向量
svm_index = find(abs(a)> epsilon);
svm.sva = a(svm_index);
svm.Xsv = X(svm_index,:);
svm.Ysv = Y(svm_index);
svm.svnum = length(svm_index);
svm.a = a;
end
- 预测函数predict1.m
function test = predict1(train_data_name,test_data_name,kertype,gamma,C)
%(1)-------------------training data ready-------------------
train_data = load(train_data_name);
n = size(train_data,2); %data column
train_x = train_data(:,1:n-1);
train_y = train_data(:,n);
%find the position of positive label and negtive label
pos = find ( train_y == 1 );
neg = find ( train_y == -1 );
figure('Position',[400 400 1000 400]);
subplot(1,2,1);
plot(train_x(pos,1),train_x(pos,2),'k+');
hold on;
plot(train_x(neg,1),train_x(neg,2),'bs');
hold on;
%(2)-----------------decision boundary-------------------
train_svm = svmTrain(train_x,train_y,kertype,gamma,C);
%plot the support vector
plot(train_svm.Xsv(:,1),train_svm.Xsv(:,2),'ro');
train_a = train_svm.a;
train_w = [sum(train_a.*train_y.*train_x(:,1));sum(train_a.*train_y.*train_x(:,2))];
train_b = sum(train_svm.Ysv-train_svm.Xsv*train_w)/size(train_svm.Xsv,1);
train_x_axis = 0:1:200;
plot(train_x_axis,-train_b-train_w(1,1)*train_x_axis/train_w(2,1),'-');
legend('1','-1','suport vector','decision boundary');
title('training data')
hold on;
%(3)-------------------testing data ready----------------------
test_data = load(test_data_name