保姆级教程吴恩达机器学习ex3手写数字辨认Matlab代码解析

%%Machine learning From Andrew
%%By youknowwho3_3 in CSDN #GirlsHelpGirls#DOUBANEZU
%%Multi-class Classification and Neural Networks
%%implement one-vs-all logistic regression and neural networks to recognize hand-written digits
%%用one-vs-all LR和 neural networks预测辨认数字是几(y的数据内容是1-10100表示)
%%本次是多分类逻辑回归的代码,主题是让你预测5000个手写数字对应的真正数字,每张图片有400个特征值,可以用20*20的方阵表示出来,一共5000行数据。
%此次的数据集是.mat类型,和以往的txt的导入会不同。
%%searching for one-vs-all logistic regression to get its define.
%1.1Multi-class Classification load dataset
%1.2 Visualizing the data
%1.3 Vectorizing logistic regression
%1.3.1 Vectorizing the cost function
%1.3.2 Vectorizing the gradient
%1.3.3 Vectorizing regularized logistic regression
%1.4 One-vs-all classication
%1.4.1 One-vs-all prediction
%2. Neural Networks
%2.1 Model representation
%2.2 Feedforward propagati

%%%%%%%%%%%%%%%%%%%%1. Multi-class Classification load data%%%%%%%%%%%%%%%
load("ex3data1.mat"); %含有X,y,且X,y不为0

%%%%1.1 Visualizing the data
m = size(X, 1); %X=5000*400;m=5000
% Randomly select 100 data points to display
rand_indices = randperm(m); %random 1*5000 5000列
sel = X(rand_indices(1:100), :); 
%sel是X的第rand_indices(1:100)行内容组成的新matrix
%如rand_indices(1)=2004,即X的2004行的400列内容成为sel的第一行内容;size(sel)=100*400
displayData(sel);

%%%%%%%%%%%%%%%%%%%%1.3Vectorizing logistic regression%%%%%%%%%%%%%%%%%%%%
%%%1.3.1 Vectorizing the cost function of the logistic regression
%%%1.3.2 Vectorizing the gradient
%%%1.3.3 Vectorizing regularized logistic regression
theta_t = [-2; -1; 1; 2]; %4*1
X_t = [ones(5,1) reshape(1:15,5,3)/10]; %5*4
y_t = ([1;0;1;0;1] >= 0.5);
lambda_t = 3;
[J, grad] = lrCostFunction(theta_t, X_t, y_t, lambda_t);

fprintf('Cost: %f | Expected cost: 2.534819\n',J);
fprintf('Gradients:\n'); fprintf('%f\n',grad);
fprintf('Expected gradients:\n 0.146561\n -0.548558\n 0.724722\n 1.398003');


%%%%%%%%%%%%%%%%%%%%%%%%%%%%1.4 One-vs-all classication%%%%%%%%%%%%%%%%%%%
a = 1:10; % a = 1  2  3  4  5  6  7  8  9  10
b = 3;
disp(a == b) %  0   0   1   0   0   0   0   0   0   0

num_labels = 10; % 10 labels, from 1 to 10 
lambda = 0.1;
[all_theta] = oneVsAll(X, y, num_labels, lambda);
%size([all_theta])=10*401
%10个classification


%%%1.4.1 One-vs-all prediction
pred = predictOneVsAll(all_theta, X);
%all_theta一共10组classification,h每一行值最大的最有可能是这个手写数的值。
fprintf('\nTraining Set Accuracy: %f\n', mean(double(pred == y)) * 100);


%%%%%%%%%%%%%%%%%%%%%%%%%%%%2. Neural Networks%%%%%%%%%%%%%%%%%%%%%%%%%%%%
load('ex3data1.mat');%reload X
% Randomly select 100 data points to display
m=size(X,1);%m=5000
sel=randperm(size(X,1));%size(sel)=1*5000个随机数
sel=sel(1:100);%新sel取sel的前100个数 size(sel)=1*100
displayData(X(sel,:));%X的sel值的每一行输入进来 相当于X随机100行的内容输出


% Load saved matrices from file
load('ex3weights.mat'); %theta1 theta2
% Theta1 has size 25 x 401
% Theta2 has size 10 x 26


%%%2.2 Feedforward propagation and prediction
pred = predict(Theta1, Theta2, X);
fprintf('\nTraining Set Accuracy: %f\n', mean(double(pred == y)) * 100);
%97.5



%  Randomly permute examples
rp = randi(m);
% Predict
pred = predict(Theta1, Theta2, X(rp,:));
fprintf('\nNeural Network Prediction: %d (digit %d)\n', pred, mod(pred, 10));
% Display 
displayData(X(rp, :));   


%function predict
function pred=predict(Theta1,Theta2,X)
%size(X)=5000*400
%Theta1=25*401
%Theta2=10*26
m=size(X,1);
p = zeros(size(X, 1), 1);
a1=[ones(m,1),X]; %size(a1)=5000*401

%formulat: z2=Theta1*X
z2=a1*Theta1'; 
%size(z2)=5000*25 

a2=sigmoid(z2);
%size(sigmoid(z2))=5000*25
a2=[ones(size(a2,1),1),a2];
%size(a2)=5000*26

%formulat: z3=theta2*sigmoid(z2);
z3=a2*Theta2'; 
%size(z3)=5000*10

h=sigmoid(z3);%size(h)=5000*10
%a3=h=sigmoid(sigmoid(X*Theta1')*Theta2')

[maxval, indices] = max(h,[],2);
%maxval predictions即h的最大值
%indices 索引,即每一行的最大值在第几列上
pred=indices;
end



%%function predictOneVsAll
function p = predictOneVsAll(all_theta, X)
m = size(X, 1); %m=5000
num_labels = size(all_theta, 1); %size=10

% You need to return the following variables correctly 
p = zeros(size(X, 1), 1); %5000*1

% Add ones to the X data matrix
X = [ones(m, 1) X]; %size(X)=5000*401

predictions=sigmoid(X*all_theta');%size(all_theta)=10*401
%size(predictions)=5000*10
%根据求得得all_theta返算predictions
[maxval,indices]=max(predictions,[],2);
%最大的值最有可能是最可能的数字,即和y对应上。
%maxval predictions即h的最大值
%indices 索引,即每一行的最大值在第几列上

%size(maxval)=5000*1
%size(indices)=5000*1
%Ais a matrix,max(A,[],2)is column vector containing the maximum value of each row
p=indices; 
end




%%function oneVsAll
function [all_theta] = oneVsAll(X,  y, num_labels, lambda)
%ONEVSALL trains multiple logistic regression classifiers and returns all
%the classifiers in a matrix all_theta, where the i-th row of all_theta 
%corresponds to the classifier for label i
%   [all_theta] = ONEVSALL(X, y, num_labels, lambda) trains num_labels
%   logistic regression classifiers and returns each of these classifiers
%   in a matrix all_theta, where the i-th row of all_theta corresponds 
%   to the classifier for label i

% Some useful variables
m = size(X, 1); %m=5000
n = size(X, 2); %n=400
% You need to return the following variables correctly 
all_theta = zeros(num_labels, n + 1);%zeros(10,401)
% Add ones to the X data matrix
X = [ones(m, 1) X]; %size(X)=5000*401

initial_theta = zeros(n + 1, 1);

options = optimset('GradObj', 'on', 'MaxIter', 50);

for c = 1:num_labels
    all_theta(c,:)=fmincg(@(t)(lrCostFunction(t, X, (y == c), lambda)),initial_theta,options);
%y==c 即为classification的10组分类,分别认为是1-10来计算costFunction,Grad,和theta
%再代入回h方程,h为5000*10的矩阵,分别是1-10的h值,h最大的则最有可能对得上y的值。
end
end



%%function fmincg (provided)
function [X, fX, i] = fmincg(f, X, options, P1, P2, P3, P4, P5)
% Minimize a continuous differentialble multivariate function. Starting point
% is given by "X" (D by 1), and the function named in the string "f", must
% return a function value and a vector of partial derivatives. The Polack-
% Ribiere flavour of conjugate gradients is used to compute search directions,
% and a line search using quadratic and cubic polynomial approximations and the
% Wolfe-Powell stopping criteria is used together with the slope ratio method
% for guessing initial step sizes. Additionally a bunch of checks are made to
% make sure that exploration is taking place and that extrapolation will not
% be unboundedly large. The "length" gives the length of the run: if it is
% positive, it gives the maximum number of line searches, if negative its
% absolute gives the maximum allowed number of function evaluations. You can
% (optionally) give "length" a second component, which will indicate the
% reduction in function value to be expected in the first line-search (defaults
% to 1.0). The function returns when either its length is up, or if no further
% progress can be made (ie, we are at a minimum, or so close that due to
% numerical problems, we cannot get any closer). If the function terminates
% within a few iterations, it could be an indication that the function value
% and derivatives are not consistent (ie, there may be a bug in the
% implementation of your "f" function). The function returns the found
% solution "X", a vector of function values "fX" indicating the progress made
% and "i" the number of iterations (line searches or function evaluations,
% depending on the sign of "length") used.
%
% Usage: [X, fX, i] = fmincg(f, X, options, P1, P2, P3, P4, P5)
%
% See also: checkgrad 
%
% Copyright (C) 2001 and 2002 by Carl Edward Rasmussen. Date 2002-02-13
%
%
% (C) Copyright 1999, 2000 & 2001, Carl Edward Rasmussen
% 
% Permission is granted for anyone to copy, use, or modify these
% programs and accompanying documents for purposes of research or
% education, provided this copyright notice is retained, and note is
% made of any changes that have been made.
% 
% These programs and documents are distributed without any warranty,
% express or implied.  As the programs were written for research
% purposes only, they have not been tested to the degree that would be
% advisable in any important application.  All use of these programs is
% entirely at the user's own risk.
%
% [ml-class] Changes Made:
% 1) Function name and argument specifications
% 2) Output display
%

% Read options
if exist('options', 'var') && ~isempty(options) && isfield(options, 'MaxIter')
    length = options.MaxIter;
else
    length = 100;
end


RHO = 0.01;                            % a bunch of constants for line searches
SIG = 0.5;       % RHO and SIG are the constants in the Wolfe-Powell conditions
INT = 0.1;    % don't reevaluate within 0.1 of the limit of the current bracket
EXT = 3.0;                    % extrapolate maximum 3 times the current bracket
MAX = 20;                         % max 20 function evaluations per line search
RATIO = 100;                                      % maximum allowed slope ratio

argstr = ['feval(f, X'];                      % compose string used to call function
for i = 1:(nargin - 3)
  argstr = [argstr, ',P', int2str(i)];
end
argstr = [argstr, ')'];

if max(size(length)) == 2, red=length(2); length=length(1); else red=1; end
S=['Iteration '];

i = 0;                                            % zero the run length counter
ls_failed = 0;                             % no previous line search has failed
fX = [];
[f1 df1] = eval(argstr);                      % get function value and gradient
i = i + (length<0);                                            % count epochs?!
s = -df1;                                        % search direction is steepest
d1 = -s'*s;                                                 % this is the slope
z1 = red/(1-d1);                                  % initial step is red/(|s|+1)

while i < abs(length)                                      % while not finished
  i = i + (length>0);                                      % count iterations?!

  X0 = X; f0 = f1; df0 = df1;                   % make a copy of current values
  X = X + z1*s;                                             % begin line search
  [f2 df2] = eval(argstr);
  i = i + (length<0);                                          % count epochs?!
  d2 = df2'*s;
  f3 = f1; d3 = d1; z3 = -z1;             % initialize point 3 equal to point 1
  if length>0, M = MAX; else M = min(MAX, -length-i); end
  success = 0; limit = -1;                     % initialize quanteties
  while 1
    while ((f2 > f1+z1*RHO*d1) || (d2 > -SIG*d1)) && (M > 0) 
      limit = z1;                                         % tighten the bracket
      if f2 > f1
        z2 = z3 - (0.5*d3*z3*z3)/(d3*z3+f2-f3);                 % quadratic fit
      else
        A = 6*(f2-f3)/z3+3*(d2+d3);                                 % cubic fit
        B = 3*(f3-f2)-z3*(d3+2*d2);
        z2 = (sqrt(B*B-A*d2*z3*z3)-B)/A;       % numerical error possible - ok!
      end
      if isnan(z2) || isinf(z2)
        z2 = z3/2;                  % if we had a numerical problem then bisect
      end
      z2 = max(min(z2, INT*z3),(1-INT)*z3);  % don't accept too close to limits
      z1 = z1 + z2;                                           % update the step
      X = X + z2*s;
      [f2 df2] = eval(argstr);
      M = M - 1; i = i + (length<0);                           % count epochs?!
      d2 = df2'*s;
      z3 = z3-z2;                    % z3 is now relative to the location of z2
    end
    if f2 > f1+z1*RHO*d1 || d2 > -SIG*d1
      break;                                                % this is a failure
    elseif d2 > SIG*d1
      success = 1; break;                                             % success
    elseif M == 0
      break;                                                          % failure
    end
    A = 6*(f2-f3)/z3+3*(d2+d3);                      % make cubic extrapolation
    B = 3*(f3-f2)-z3*(d3+2*d2);
    z2 = -d2*z3*z3/(B+sqrt(B*B-A*d2*z3*z3));        % num. error possible - ok!
    if ~isreal(z2) || isnan(z2) || isinf(z2) || z2 < 0 % num prob or wrong sign?
      if limit < -0.5                               % if we have no upper limit
        z2 = z1 * (EXT-1);                 % the extrapolate the maximum amount
      else
        z2 = (limit-z1)/2;                                   % otherwise bisect
      end
    elseif (limit > -0.5) && (z2+z1 > limit)         % extraplation beyond max?
      z2 = (limit-z1)/2;                                               % bisect
    elseif (limit < -0.5) && (z2+z1 > z1*EXT)       % extrapolation beyond limit
      z2 = z1*(EXT-1.0);                           % set to extrapolation limit
    elseif z2 < -z3*INT
      z2 = -z3*INT;
    elseif (limit > -0.5) && (z2 < (limit-z1)*(1.0-INT))  % too close to limit?
      z2 = (limit-z1)*(1.0-INT);
    end
    f3 = f2; d3 = d2; z3 = -z2;                  % set point 3 equal to point 2
    z1 = z1 + z2; X = X + z2*s;                      % update current estimates
    [f2 df2] = eval(argstr);
    M = M - 1; i = i + (length<0);                             % count epochs?!
    d2 = df2'*s;
  end                                                      % end of line search

  if success                                         % if line search succeeded
    f1 = f2; fX = [fX' f1]';
    %fprintf('%s %4i | Cost: %4.6e\r', S, i, f1);
    s = (df2'*df2-df1'*df2)/(df1'*df1)*s - df2;      % Polack-Ribiere direction
    tmp = df1; df1 = df2; df2 = tmp;                         % swap derivatives
    d2 = df1'*s;
    if d2 > 0                                      % new slope must be negative
      s = -df1;                              % otherwise use steepest direction
      d2 = -s'*s;    
    end
    z1 = z1 * min(RATIO, d1/(d2-realmin));          % slope ratio but max RATIO
    d1 = d2;
    ls_failed = 0;                              % this line search did not fail
  else
    X = X0; f1 = f0; df1 = df0;  % restore point from before failed line search
    if ls_failed || i > abs(length)          % line search failed twice in a row
      break;                             % or we ran out of time, so we give up
    end
    tmp = df1; df1 = df2; df2 = tmp;                         % swap derivatives
    s = -df1;                                                    % try steepest
    d1 = -s'*s;
    z1 = 1/(1-d1);                     
    ls_failed = 1;                                    % this line search failed
  end
  if exist('OCTAVE_VERSION')
    fflush(stdout);
  end
end
fprintf('\n');
end



%%function lrCostFunction
%{
formulate:%lrostFunction
          J=1/m*sum(i=1:m)[-yi*log(h(xi))-(1-yi)*log(1-h(xi))]+lambda/2m*sum(j=1:n)(theta^2)
          %gradientDescent
          j=0 J'=1/m*sum(h(xi)-yi)*xi 
          j>=1 J'=1/m*sum(h(xi)-yi)*xi+lambda/m*theta(j)
code:   h=sigmoid(z)
        z=X*theta
        J=1/m*sum(-y.*log(h)-(1-y).*log(1-h))+lambda/(2*m)*sum(theta(2:end).^2)
        %Note that you should not be regularizing  which is used for the
        bias term.    
        %在函数中是j>=1,但对于theta(1)在编程中表示为theta(2);j=0,theta(0)在编程中表示为theta(1);
        %%%%%%%%%%%%%%%%grad的通项公式%%%%%%%%%%%%%
        j=0时 grad=(1/m)* (X_t(:,1))'*(h-y))+(lambda/m)*0
        j>=1时 grad=(1/m)* (X_t(:,2:end))'*(h-y))+(lambda/m)*theta_t(2:end,:) %j>=1,要X的所有行的2:end列数据
        合并可得:
        grad = (1/m)* X_t'*(h-y))+(lambda/m)*[0;theta(2:end)]
             = (1/m)* X_t'*(h-y))+[0;(lambda_t/m)*theta_t(2:end,:)]
  
        j=0时,J'没有lambda项,即为0;j>=1时,J'lambda项在此。
        所以X_t是X_t(j=0123...),lambda项是[0;(lambda_t/m)*theta_t(2:end,:)]
%}
function [J, grad] = lrCostFunction(theta_t, X_t, y_t, lambda_t)
m=length(y_t);
J = 0;
grad = zeros(size(theta_t));
z = X_t * theta_t;      
h = sigmoid(z);   
J=1/m*sum(-y_t.*log(h)-(1-y_t).*log(1-h))+lambda_t/(2*m)*sum(theta_t(2:end).^2);
grad = (1/m)* (X_t)'*(h-y_t)+[0;(lambda_t/m)*theta_t(2:end,:)];  
end

%%sigmoid function
%Formula:h(X)=g(theta'*X)=1/(1+e^(-theta'*X))
%   Coding:h=1./(1+exp(-X*theta))
function g=sigmoid(z)
g=zeros(size(z)); 
g=1./(1+exp(-z)); 
end

%%function displayData (provided)
function [h, display_array] = displayData(X, example_width)
%DISPLAYDATA Display 2D data in a nice grid
%   [h, display_array] = DISPLAYDATA(X, example_width) displays 2D data
%   stored in X in a nice grid. It returns the figure handle h and the 
%   displayed array if requested.

% Set example_width automatically if not passed in
if ~exist('example_width', 'var') || isempty(example_width) 
	example_width = round(sqrt(size(X, 2)));
end

% Gray Image
colormap(gray);

% Compute rows, cols
[m n] = size(X);
example_height = (n / example_width);

% Compute number of items to display
display_rows = floor(sqrt(m));
display_cols = ceil(m / display_rows);

% Between images padding
pad = 1;

% Setup blank display
display_array = - ones(pad + display_rows * (example_height + pad), ...
                       pad + display_cols * (example_width + pad));

% Copy each example into a patch on the display array
curr_ex = 1;
for j = 1:display_rows
	for i = 1:display_cols
		if curr_ex > m, 
			break; 
		end
		% Copy the patch
		
		% Get the max value of the patch
		max_val = max(abs(X(curr_ex, :)));
		display_array(pad + (j - 1) * (example_height + pad) + (1:example_height), ...
		              pad + (i - 1) * (example_width + pad) + (1:example_width)) = ...
						reshape(X(curr_ex, :), example_height, example_width) / max_val;
		curr_ex = curr_ex + 1;
	end
	if curr_ex > m, 
		break; 
	end
end

% Display Image
h = imagesc(display_array, [-1 1]);

% Do not show axis
axis image off

drawnow; 

end

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值