matlab 神经网络设计多层隐含层_【MATLAB深度学习】多层神经网络

1.反向传播算法

MATLAB代码实现：

function [W1, W2] = BackpropXOR(W1, W2, X, D)

% 以神经网络的权重和训练数据作为输入，返回调整后的权重

% 其中W1和W2为相应层的权重矩阵；X和D分别是训练数据的输入和标准输入

alpha = 0.9;

N = 4;

for k = 1:N

x = X(k, :)‘;

d = D(k);

v1 = W1*x;

y1 = Sigmoid(v1);

v = W2*y1;

y = Sigmoid(v);

e = d - y;

delta = y.*(1-y).*e;

e1 = W2‘*delta; % 反向传播

delta1 = y1.*(1-y1).*e1;

dW1 = alpha*delta1*x‘;

W1 = W1 + dW1;

dW2 = alpha*delta*y1‘;

W2 = W2 + dW2;

end

end

Sigmoid函数定义如下：

function y = Sigmoid(x)

y = 1 ./ (1 + exp(-x));

end

clear all

X = [ 0 0 1;

0 1 1;

1 0 1;

1 1 1;

];

D = [ 0

1

1

0

];

W1 = 2*rand(4, 3) - 1;

W2 = 2*rand(1, 4) - 1;

for epoch = 1:10000 % train

[W1 W2] = BackpropXOR(W1, W2, X, D);

end

N = 4; % inference

for k = 1:N

x = X(k, :)‘;

v1 = W1*x;

y1 = Sigmoid(v1);

v = W2*y1;

y = Sigmoid(v)

end

2.动量

function [W1, W2] = BackpropMmt(W1, W2, X, D)

alpha = 0.9;

beta = 0.9;

mmt1 = zeros(size(W1));

mmt2 = zeros(size(W2));

N = 4;

for k = 1:N

x = X(k, :)‘;

d = D(k);

v1 = W1*x;

y1 = Sigmoid(v1);

v = W2*y1;

y = Sigmoid(v);

e = d - y;

delta = y.*(1-y).*e;

e1 = W2‘*delta;

delta1 = y1.*(1-y1).*e1;

% 动量

dW1 = alpha*delta1*x‘;

mmt1 = dW1 + beta*mmt1;

W1 = W1 + mmt1;

dW2 = alpha*delta*y1‘;

mmt2 = dW2 + beta*mmt2;

W2 = W2 + mmt2;

end

end

clear all

X = [ 0 0 1;

0 1 1;

1 0 1;

1 1 1;

];

D = [ 0

1

1

0

];

W1 = 2*rand(4, 3) - 1;

W2 = 2*rand(1, 4) - 1;

for epoch = 1:10000 % train

[W1 W2] = BackpropMmt(W1, W2, X, D);

end

N = 4; % inference

for k = 1:N

x = X(k, :)‘;

v1 = W1*x;

y1 = Sigmoid(v1);

v = W2*y1;

y = Sigmoid(v)

end

3.代价函数与学习规则

function [W1, W2] = BackpropCE(W1, W2, X, D)

alpha = 0.9;

N = 4;

for k = 1:N

x = X(k, :)‘; % x = a column vector

d = D(k);

v1 = W1*x;

y1 = Sigmoid(v1);

v = W2*y1;

y = Sigmoid(v);

e = d - y;

delta = e;

e1 = W2‘*delta;

delta1 = y1.*(1-y1).*e1;

dW1 = alpha*delta1*x‘;

W1 = W1 + dW1;

dW2 = alpha*delta*y1‘;

W2 = W2 + dW2;

end

end

clear all

X = [ 0 0 1;

0 1 1;

1 0 1;

1 1 1;

];

D = [ 0

1

1

0

];

W1 = 2*rand(4, 3) - 1;

W2 = 2*rand(1, 4) - 1;

for epoch = 1:10000 % train

[W1 W2] = BackpropCE(W1, W2, X, D);

end

N = 4; % inference

for k = 1:N

x = X(k, :)‘;

v1 = W1*x;

y1 = Sigmoid(v1);

v = W2*y1;

y = Sigmoid(v)

end

4.代价函数比较

clear all

X = [ 0 0 1;

0 1 1;

1 0 1;

1 1 1;

];

D = [ 0

0

1

1

];

E1 = zeros(1000, 1);

E2 = zeros(1000, 1);

W11 = 2*rand(4, 3) - 1; % Cross entropy

W12 = 2*rand(1, 4) - 1; %

W21 = W11; % Sum of squared error

W22 = W12; %

for epoch = 1:1000

[W11 W12] = BackpropCE(W11, W12, X, D);

[W21 W22] = BackpropXOR(W21, W22, X, D);

es1 = 0;

es2 = 0;

N = 4;

for k = 1:N

x = X(k, :)‘;

d = D(k);

v1 = W11*x;

y1 = Sigmoid(v1);

v = W12*y1;

y = Sigmoid(v);

es1 = es1 + (d - y)^2;

v1 = W21*x;

y1 = Sigmoid(v1);

v = W22*y1;

y = Sigmoid(v);

es2 = es2 + (d - y)^2;

end

E1(epoch) = es1 / N;

E2(epoch) = es2 / N;

end

plot(E1, ‘r‘)

hold on

plot(E2, ‘b:‘)

xlabel(‘Epoch‘)

ylabel(‘Average of Training error‘)

legend(‘Cross Entropy‘, ‘Sum of Squared Error‘)

01-18
09-01 1万+
11-25 5610
03-24 2048

“相关推荐”对你有帮助么？

• 非常没帮助
• 没帮助
• 一般
• 有帮助
• 非常有帮助

©️2022 CSDN 皮肤主题：1024 设计师：我叫白小胖

weixin_39836803

¥2 ¥4 ¥6 ¥10 ¥20

1.余额是钱包充值的虚拟货币，按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载，可以购买VIP、C币套餐、付费专栏及课程。