N=100;
x1_original=randn(N,2);
m1=[0 2];
x1=x1_original+kron(ones(N,1),m1);
x2_original=randn(N,2);
m2=[1.5 0.0];
x2=x2_original+kron(ones(N,1),m2);
C=[2,1;1,2];
T=chol(C);
x1=x1*T;
x2=x2*T;
plot(x1(:,1),x1(:,2),'mx');
hold on;
plot(x2(:,1),x2(:,2),'o');
axis([-10 10 -10 10]);
grid on;
w=2*(C^-1)*(m2-m1)';
b=(m1*(C^-1)*m1')-(m2*(C^-1)*m2');
w1=w(1,1);
w2=w(2,1);
plot([-10,10],[(b-10*w1)/-w2,(b+10*w1)/-w2],'b','LineWidth',2);
pause(2);
%close();
% include column of ones for bias
X=[x1 ones(N,1)];
y=ones(N,1);
for iter1=1:N
if rand<0.5
y(iter1,1)=-1;
end
end
disp(X);
% Separate into training and test sets (check: >> doc randperm)
ii = randperm(N);
Xtr = X(ii(1:N/2),:);
ytr = y(ii(1:N/2),:);
Xts = X(ii(N/2+1:N),:);
yts = y(ii(N/2+1:N),:);
% initialize weights
w = randn(3,1);
disp(w);
% Error correcting learning
eta = 0.001;
for iter=1:500
j = ceil(rand*N/2);
if ( ytr(j)*Xtr(j,:)*w < 0 )
w = w + eta*ytr(j)*Xtr(j,:)';
end
end
% Performance on test data
yhts = Xts*w;
disp([yts yhts])
disp(yts.*yhts);
%PercentageErro