Coursera-MachineLearning-NeuralNetwork(2)

Coursera机器学习-吴恩达

上一篇神经网络只有正向传播,这次加入反向传播。
背景:同样是识别手写体。
在这里插入图片描述

% 1.加载并可视化数据

%% 初始化
clear ; close all; clc

%% 设置参数
input_layer_size  = 400;  % 20x20 Input Images of Digits
hidden_layer_size = 25;   % 25 hidden units
num_labels = 10;          % 10 labels, from 1 to 10   
                          % (note that we have mapped "0" to label 10)
                         
% 读取训练集数据
load('ex4data1.mat');
m = size(X, 1);

% 随机选择 100 个数据展示
sel = randperm(size(X, 1));
sel = sel(1:100);
% 2.加载神经网络参数

load('ex4weights.mat');

% 将这些参数向量化展开
nn_params = [Theta1(:) ; Theta2(:)];
% 3.计算代价函数(神经网络 正向传播)
				% 建议第一次计算代价函数不要加上正规化,
				% 即将lambda置0

lambda = 0;
J = nnCostFunction(nn_params, input_layer_size, hidden_layer_size, ...
                   num_labels, X, y, lambda);

% 函数 nnCostFunction:
function [J grad] = nnCostFunction(nn_params, ...
                                   input_layer_size, ...
                                   hidden_layer_size, ...
                                   num_labels, ...
                                   X, y, lambda)

% 将nn_params重新转为theta1,theta2
% Theta1 has size 25 x 401  
% Theta2 has size 10 x 26  
Theta1 = reshape(nn_params(1:hidden_layer_size * (input_layer_size + 1)), ...
                 hidden_layer_size, (input_layer_size + 1));
Theta2 = reshape(nn_params((1 + (hidden_layer_size * (input_layer_size + 1))):end), ...
                 num_labels, (hidden_layer_size + 1));

m = size(X, 1); 
J = 0;
Theta1_grad = zeros(size(Theta1));
Theta2_grad = zeros(size(Theta2));


% part 1  神经网络正向传播,并计算代价函数
% X,y维度见下图:

% 5000x10  这两条语句的意义在将y中的值变为0-1表示
h = eye(num_labels);
y = h(y, :); 

% a1增加一列1    5000x401 (m = 5000)
a1 = [ones(m, 1) X];

%  开始一层一层往下计算,z2,a2...
z2 = a1 * Theta1' ;  	
a2 = sigmoid(z2);           

% a2也要增加一列1,计算z3,a3
n = size(a2,1);  
a2 = [ones(n, 1) a2] ;   
z3 = a2 * Theta2';
a3 = sigmoid(z3); 

% 计算代价函数J,函数如下图:
J = sum( sum( -y .* log(a3) -  (1-y) .* log(1-a3) ))/ m; 

% 计算 正规化
% pay attention :" Theta1(:,2:end) " 因为加了一列1,排除掉
regularized = lambda / (2 * m) * (sum(sum(Theta1(:, 2:end) .^ 2)) 
			  + 
			  sum(sum(Theta2(:, 2:end) .^ 2)) );  
J = J + regularized;  


% part2  实现反向传播并计算下降的梯度
% 建议第一次反向传播使用for循环,不用高级算法

% 通过a3和y值的差,算出delta3,再用 delta3 * theta2 得到 delta2,公式如下图:
delta3 = a3 - y;  
delta2 = delta3 * Theta2; 

% 5000*25
delta2 = delta2(:, 2 : end);
delta2 = delta2 .* sigmoidGradient(z2);  

Delta_1 = zeros(size(Theta1));
Delta_2 = zeros(size(Theta2));

Delta_1 = Delta_1 + delta2' * a1;
Delta_2 = Delta_2 + delta3' * a2;

Theta1_grad = ((1 / m) * Delta_1) + ((lambda / m) * Theta1); 
Theta2_grad = ((1 / m) * Delta_2) + ((lambda / m) * Theta2);

%Theta1_grad, Theta2_grad中第一列theta值不需要正则化
Theta1_grad(:, 1) = Theta1_grad(:, 1) - ((lambda / m) * (Theta1(:, 1)));
Theta2_grad(:, 1) = Theta2_grad(:, 1) - ((lambda / m) * (Theta2(:, 1)));  

% Unroll gradients
grad = [Theta1_grad(:) ; Theta2_grad(:)];
end

X,y大小:
在这里插入图片描述
代价函数J:
在这里插入图片描述
在这里插入图片描述

% 4.计算代价函数,带上正规化:

lambda = 1;
J = nnCostFunction(nn_params, input_layer_size, hidden_layer_size, ...
                   num_labels, X, y, lambda);
% 5.计算 假设函数sigmoid的梯度(Sigmoid Gradient)

g = sigmoidGradient([-1 -0.5 0 0.5 1]);

% 函数 sigmoidGradient:
function g = sigmoidGradient(z)
g = zeros(size(z));
% g'(z),公式如下图:
g = sigmoid(z) .* (1 - sigmoid(z)) 
end

在这里插入图片描述

% 6.初始化参数

initial_Theta1 = randInitializeWeights(input_layer_size, hidden_layer_size);
initial_Theta2 = randInitializeWeights(hidden_layer_size, num_labels);

% Unroll parameters(展开参数)
initial_nn_params = [initial_Theta1(:) ; initial_Theta2(:)];

% 函数 randInitializeWeights:
function W = randInitializeWeights(L_in, L_out)
W = zeros(L_out, 1 + L_in);
epsilon_init = 0.12;
W = rand(L_out, 1 + L_in) * 2 * epsilon_init - epsilon_init;
end
% 7.梯度检查(Check gradients)
% 这部分不太清楚

function checkNNGradients(lambda)

% 先判断是否第一次跑,正规化第一次去掉,即lambda=0
if ~exist('lambda', 'var') || isempty(lambda)
    lambda = 0;
end

% 设置各层参数,label是3个,就是需判断的东西
input_layer_size = 3;
hidden_layer_size = 5;
num_labels = 3;
m = 5;

% 创造一些随机的训练集
Theta1 = debugInitializeWeights(hidden_layer_size, input_layer_size);
Theta2 = debugInitializeWeights(num_labels, hidden_layer_size);

% 函数debugInitializeWeights:
function W = debugInitializeWeights(fan_out, fan_in)
W = zeros(fan_out, 1 + fan_in);
W = reshape(sin(1:numel(W)), size(W)) / 10;
end

% 重用上面这个函数去创造 X 训练集
X  = debugInitializeWeights(m, input_layer_size - 1);
y  = 1 + mod(1:m, num_labels)';

% 展开参数
nn_params = [Theta1(:) ; Theta2(:)];

% Short hand for cost function
costFunc = @(p) nnCostFunction(p, input_layer_size, hidden_layer_size, ...
                               num_labels, X, y, lambda);

[cost, grad] = costFunc(nn_params);
numgrad = computeNumericalGradient(costFunc, nn_params);

% 函数 computeNumericalGradient:
function numgrad = computeNumericalGradient(J, theta)
numgrad = zeros(size(theta));
perturb = zeros(size(theta));
e = 1e-4;
for p = 1:numel(theta)
    % Set perturbation vector
    perturb(p) = e;
    loss1 = J(theta - perturb);
    loss2 = J(theta + perturb);
    % Compute Numerical Gradient
    numgrad(p) = (loss2 - loss1) / (2*e);
    perturb(p) = 0;
end
end


disp([numgrad grad]);
diff = norm(numgrad-grad)/norm(numgrad+grad);
end
% 8.训练神经网络模型

options = optimset('MaxIter', 50);
lambda = 1;

% Create "short hand" for the cost function to be minimized
costFunction = @(p) nnCostFunction(p, ...
                                   input_layer_size, ...
                                   hidden_layer_size, ...
                                   num_labels, X, y, lambda);

[nn_params, cost] = fmincg(costFunction, initial_nn_params, options);

Theta1 = reshape(nn_params(1:hidden_layer_size * (input_layer_size + 1)), ...
	hidden_layer_size, (input_layer_size + 1));

Theta2 = reshape(nn_params((1 + (hidden_layer_size * (input_layer_size + 1))):end), ...num_labels, (hidden_layer_size + 1));
% 9.实现预测

pred = predict(Theta1, Theta2, X);

% 函数predict:
function p = predict(Theta1, Theta2, X)
m = size(X, 1);
num_labels = size(Theta2, 1);
p = zeros(size(X, 1), 1);

% 使用假设函数sigmoid预测:
h1 = sigmoid([ones(m, 1) X] * Theta1');
h2 = sigmoid([ones(m, 1) h1] * Theta2');
[dummy, p] = max(h2, [], 2);
end

% 输出结果,如下图:
fprintf('\nTraining Set Accuracy: %f\n', mean(double(pred == y)) * 100);

在这里插入图片描述

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值