关闭

文章标题

标签: 深度学习NN
13人阅读 评论(0) 收藏 举报
分类:

 先看test_example_NN.m(依次调用nnsetup.m、nntrain.m、nntest.m)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
function test_example_NN
clear;close all;clc;
disp(‘当前正在执行的程序是:’);
disp([mfilename(‘fullpath’),’.m’]);

addpath(genpath(‘../data/’));
addpath(genpath(‘../NN/’));
addpath(genpath(‘../Util/’));
load mnist_uint8;

%下面的数据train_x为训练数据样本,train_y为训练数据标签。
%下面的数据test_x为测试数据样本,test_y为测试数据标签。
train_x = double(train_x) / 255;%加载数据
test_x = double(test_x) / 255;
train_y = double(train_y);
test_y = double(test_y);

% normalize
%归一化,数据(按列存储的),为0均值,标准偏差
%返回参数,train_x为规整化后的数据,mu和sigma为用来规整化的均值,和缩放比例
[train_x, mu, sigma] = zscore(train_x);
%用相同的均值和缩放比例来规整化数据
test_x = normalize(test_x, mu, sigma);

%% ex1 vanilla neural net
%vanilla 香草
rand(‘state’,0)
nn = nnsetup([784 100 10]);%初始化网络参数
opts.numepochs = 1; % Number of full sweeps through data 整体打乱样本,重新训练的次数,每次整体训练,都重新打乱一次
opts.batchsize = 100; % Take a mean gradient step over this many samples,每批训练样本的大小
[nn, L] = nntrain(nn, train_x, train_y, opts);%训练网络

[er, bad] = nntest(nn, test_x, test_y);%测试

assert(er < 0.08, ‘Too big error’);

%% ex2 neural net with L2 weight decay
rand(‘state’,0)
nn = nnsetup([784 100 10]);

nn.weightPenaltyL2 = 1e-4; % L2 weight decay
opts.numepochs = 1; % Number of full sweeps through data
opts.batchsize = 100; % Take a mean gradient step over this many samples

nn = nntrain(nn, train_x, train_y, opts);

[er, bad] = nntest(nn, test_x, test_y);
assert(er < 0.1, ‘Too big error’);

%% ex3 neural net with dropout
rand(‘state’,0)
nn = nnsetup([784 100 10]);

nn.dropoutFraction = 0.5; % Dropout fraction
opts.numepochs = 1; % Number of full sweeps through data
opts.batchsize = 100; % Take a mean gradient step over this many samples

nn = nntrain(nn, train_x, train_y, opts);

[er, bad] = nntest(nn, test_x, test_y);
assert(er < 0.1, ‘Too big error’);

%% ex4 neural net with sigmoid activation function
%从下面简单的几步设置一个网络,发现最重要的几个函数就是nnsetup,nntrain和nntest
%所以,对于本代码中NN主要就解析这三个函数就可以
rand(‘state’,0)
nn = nnsetup([784 100 10]);

nn.activation_function = ‘sigm’; % Sigmoid activation function
nn.learningRate = 1; % Sigm require a lower learning rate
opts.numepochs = 1; % Number of full sweeps through data
opts.batchsize = 100; % Take a mean gradient step over this many samples

nn = nntrain(nn, train_x, train_y, opts);

[er, bad] = nntest(nn, test_x, test_y);
assert(er < 0.1, ‘Too big error’);

%% ex5 plotting functionality
rand(‘state’,0)
nn = nnsetup([784 20 10]);
opts.numepochs = 5; % Number of full sweeps through data
nn.output = ‘softmax’; % use softmax output
opts.batchsize = 1000; % Take a mean gradient step over this many samples
opts.plot = 1; % enable plotting

nn = nntrain(nn, train_x, train_y, opts);

[er, bad] = nntest(nn, test_x, test_y);
assert(er < 0.1, ‘Too big error’);

%% ex6 neural net with sigmoid activation and plotting of validation and training error
% split training data into training and validation data
vx = train_x(1:10000,:);
tx = train_x(10001:end,:);
vy = train_y(1:10000,:);
ty = train_y(10001:end,:);

rand(‘state’,0)
nn = nnsetup([784 20 10]);
nn.output = ‘softmax’; % use softmax output
opts.numepochs = 5; % Number of full sweeps through data
opts.batchsize = 1000; % Take a mean gradient step over this many samples
opts.plot = 1; % enable plotting
nn = nntrain(nn, tx, ty, opts, vx, vy); % nntrain takes validation set as last two arguments (optionally)

[er, bad] = nntest(nn, test_x, test_y);
assert(er < 0.1, ‘Too big error’);
  nnsetup.m

  • View Code
      nntrain.m(依次调用nnff.m、nnbp.m、nnapplygrads.m)

  • View Code
      nnff.m

  • View Code
      nnbp.m

  • View Code
      nnapplygrads.m

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
function nn = nnapplygrads(nn)
%NNAPPLYGRADS updates weights and biases with calculated gradients
% nn = nnapplygrads(nn) returns an neural network structure with updated
% weights and biases
%nn.weightPenaltyL2 是weight decay的部分,也是nnsetup时可以设置的一个参数
%有的话就加入weight Penalty,防止过拟合,然后再根据momentum的大小调整一下,最后改变nn.W{i}即可

for i = 1 : (nn.n - 1)
    if(nn.weightPenaltyL2>0)
        dW = nn.dW{i} + nn.weightPenaltyL2 * [zeros(size(nn.W{i},1),1) nn.W{i}(:,2:end)];
    else
        dW = nn.dW{i};
    end

    dW = nn.learningRate * dW;

    if(nn.momentum>0)
        nn.vW{i} = nn.momentum*nn.vW{i} + dW;
        dW = nn.vW{i};
    end

    nn.W{i} = nn.W{i} - dW;
end

end
  nntest.m

1
2
3
4
5
6
7
function [er, bad] = nntest(nn, x, y)
%调用一下nnpredict,在和test的集合进行比较
labels = nnpredict(nn, x);
[~, expected] = max(y,[],2);
bad = find(labels ~= expected);
er = numel(bad) / size(x, 1);
end

0
0

查看评论
* 以上用户言论只代表其个人观点,不代表CSDN网站的观点或立场
    个人资料
    • 访问:15次
    • 积分:2
    • 等级:
    • 排名:千里之外
    • 原创:0篇
    • 转载:1篇
    • 译文:0篇
    • 评论:0条
    文章分类
    文章存档
    阅读排行
    评论排行