神经网例子

clear ; close all; clc
input_layer_size  = 400;  % 20x20 Input Images of Digits
hidden_layer_size = 25;   % 25 hidden units
num_labels = 10;          % 10 labels, from 1 to 10 是每层中的神经元个数  
                          % (note that we have mapped "0" to label 10)
fprintf('Loading and Visualizing Data ...\n')
load('ex4data1.mat');
m = size(X, 1);%行数训练集的数目
sel = randperm(size(X, 1));
sel = sel(1:100);
displayData(X(sel, :));
fprintf('\nLoading Saved Neural Network Parameters ...\n')
load('ex4weights.mat');
nn_params = [Theta1(:) ; Theta2(:)];%两层的theta合成一个向量
lambda = 0;
J = nnCostFunction(nn_params, input_layer_size, hidden_layer_size, ...
                   num_labels, X, y, lambda);
fprintf(['Cost at parameters (loaded from ex4weights): %f '...
         '\n(this value should be about 0.287629)\n'], J);
fprintf('\nProgram paused. Press enter to continue.\n');
pause;
fprintf('\nChecking Cost Function (w/ Regularization) ... \n')
lambda = 1;
J = nnCostFunction(nn_params, input_layer_size, hidden_layer_size, ...
                  num_labels, X, y, lambda);
fprintf(['Cost at parameters (loaded from ex4weights): %f '...
         '\n(this value should be about 0.383770)\n'], J);
fprintf('Program paused. Press enter to continue.\n');
pause;
g = sigmoidGradient([-1 -0.5 0 0.5 1]);
fprintf('Sigmoid gradient evaluated at [-1 -0.5 0 0.5 1]:\n  ');
fprintf('%f ', g);
fprintf('\n\n');

fprintf('Program paused. Press enter to continue.\n');
pause;


fprintf('\nInitializing Neural Network Parameters ...\n')

initial_Theta1 = randInitializeWeights(input_layer_size, hidden_layer_size);
initial_Theta2 = randInitializeWeights(hidden_layer_size, num_labels);

initial_nn_params = [initial_Theta1(:) ; initial_Theta2(:)];

fprintf('\nChecking Backpropagation... \n');
checkNNGradients;

fprintf('\nProgram paused. Press enter to continue.\n');
pause;
fprintf('\nChecking Backpropagation (w/ Regularization) ... \n')
checkNNGradients(lambda);
debug_J  = nnCostFunction(nn_params, input_layer_size, ...
                          hidden_layer_size, num_labels, X, y, lambda);
options = optimset('MaxIter', 50);
lambda = 1;
costFunction = @(p) nnCostFunction(p, ...
                                   input_layer_size, ...
                                   hidden_layer_size, ...
                                   num_labels, X, y, lambda);
[nn_params, cost] = fmincg(costFunction, initial_nn_params, options);
Theta1 = reshape(nn_params(1:hidden_layer_size * (input_layer_size + 1)), ...
                 hidden_layer_size, (input_layer_size + 1));

Theta2 = reshape(nn_params((1 + (hidden_layer_size * (input_layer_size + 1))):end), ...
                 num_labels, (hidden_layer_size + 1));

displayData(Theta1(:, 2:end));
fprintf('\nProgram paused. Press enter to continue.\n');
pause;
pred = predict(Theta1, Theta2, X);

fprintf('\nTraining Set Accuracy: %f\n', mean(double(pred == y)) * 100);
........................................................................
function g = sigmoidGradient(z)
g = zeros(size(z));
g=sigmoid(z).*(1-sigmoid(z));
end

。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。
function [h, display_array] = displayData(X, example_width)
if ~exist('example_width', 'var') || isempty(example_width) 
    example_width = round(sqrt(size(X, 2)));
end
colormap(gray);
[m n] = size(X);
example_height = (n / example_width);
display_rows = floor(sqrt(m));
display_cols = ceil(m / display_rows);
pad = 1;
display_array = - ones(pad + display_rows * (example_height + pad), ...
                       pad + display_cols * (example_width + pad));
% Copy each example into a patch on the display array
curr_ex = 1;
for j = 1:display_rows
    for i = 1:display_cols
        if curr_ex > m, 
            break; 
        end
        max_val = max(abs(X(curr_ex, :)));
        display_array(pad + (j - 1) * (example_height + pad) + (1:example_height), ...
                      pad + (i - 1) * (example_width + pad) + (1:example_width)) = ...
                        reshape(X(curr_ex, :), example_height, example_width) / max_val;
        curr_ex = curr_ex + 1;
    end
    if curr_ex > m, 
        break; 
    end
end
h = imagesc(display_array, [-1 1]);
axis image off
drawnow;
end
...............................................................................................................
function [J grad] = nnCostFunction(nn_params, ...
                                   input_layer_size, ...
                                   hidden_layer_size, ...
                                   num_labels, ...
                                   X, y, lambda)
Theta1 = reshape(nn_params(1:hidden_layer_size * (input_layer_size + 1)), ...
                 hidden_layer_size, (input_layer_size + 1));

Theta2 = reshape(nn_params((1 + (hidden_layer_size * (input_layer_size + 1))):end), ...
                 num_labels, (hidden_layer_size + 1));
%就两层
m = size(X, 1); %训练集个数
J = 0;

    a1 = [ones(m, 1) X];
    z2 = a1 * Theta1';
    a2 = sigmoid(z2);
    a2 = [ones(m, 1) a2];
    z3 = a2 * Theta2';
    h = sigmoid(z3);
    %calculate yk
    yk = zeros(m, num_labels);
    for i = 1:m    
        yk(i, y(i)) = 1; %y is class vector
    end
    %costFunction
    J = (1/m)* sum(sum(((-yk) .* log(h) - (1 - yk) .* log(1 - h))));
    %Regularized cost function,Theta1 and Theta2 are the weight matrices
    Theta1_new=Theta1(:,2:size(Theta1,2));
    Theta2_new=Theta2(:,2:size(Theta2,2));
    J=J+lambda/2/m*(Theta1_new(:)'*Theta1_new(:)+Theta2_new(:)'*Theta2_new(:));

    
    
for i=1:m
    y_new=zeros(1,num_labels);
    y_new(1,y(i))=1;
    a1=[1;X(i,:)'];
    a2=[1;sigmoid(Theta1*a1)];
    a3=sigmoid(Theta2*a2);
    det3=a3-y_new';
    det2=Theta2'*det3.*sigmoidGradient([1;Theta1*a1]);
    det2 = det2(2:end); 
    Theta1_grad=Theta1_grad+det2*a1';
    Theta2_grad=Theta2_grad+det3*a2';
    
    Theta1_grad(:,1)=Theta1_grad(:,1)/m;
Theta1_grad(:,2:size(Theta1_grad,2))=Theta1_grad(:,2:size(Theta1_grad,2))/m+...
    lambda*Theta1(:,2:size(Theta1,2))/m;
Theta2_grad(:,1)=Theta2_grad(:,1)/m;
Theta2_grad(:,2:size(Theta2_grad,2))=Theta2_grad(:,2:size(Theta2_grad,2))/m+...
    lambda*Theta2(:,2:size(Theta2,2))/m;
    
end
...................................................................................
function W = randInitializeWeights(L_in, L_out)

W = zeros(L_out, 1 + L_in);
epsilon_init = 0.12;
    W = rand(L_out, 1 + L_in) * 2 * epsilon_init-epsilon_init;
end

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值