机器学习7:BP神经网络
BP神经网络
原理
![](https://img-blog.csdnimg.cn/20200204125244664.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L21hdGFmZWl5YW5sbA==,size_16,color_FFFFFF,t_70)
![](https://img-blog.csdnimg.cn/2020020412550188.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L21hdGFmZWl5YW5sbA==,size_16,color_FFFFFF,t_70)
![](https://img-blog.csdnimg.cn/20200204125539295.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L21hdGFmZWl5YW5sbA==,size_16,color_FFFFFF,t_70)
激活函数
sigmoid
Tanh函数和Softsign函数
ReLU函数
算法实现
BP网络解决异或问题
import numpy as np
#输入数据
X = np.array([[1,0,0],
[1,0,1],
[1,1,0],
[1,1,1]])
#标签
Y = np.array([[0,1,1,0]])
#权值初始化,取值范围-1到1
V = np.random.random((3,4))*2-1
W = np.random.random((4,1))*2-1
print(V)
print(W)
#学习率设置
lr = 0.11
def sigmoid(x):
return 1/(1+np.exp(-x))
def dsigmoid(x): #sigmoid的导数
return x*(1-x)
def update():
global X,Y,W,V,lr
L1 = sigmoid(np.dot(X,V))#隐藏层输出(4,4)
L2 = sigmoid(np.dot(L1,W))#输出层输出(4,1)
L2_delta = (Y.T - L2)*dsigmoid(L2)
L1_delta = L2_delta.dot(W.T)*dsigmoid(L1)
W_C = lr*L1.T.dot(L2_delta)
V_C = lr*X.T.dot(L1_delta)
W = W + W_C
V = V + V_C
[[ 0.45351321 -0.31894805 -0.7458763 0.4053608 ]
[ 0.712348 -0.75128113 -0.44691941 -0.08700177]
[ 0.4251679 0.43617922 -0.1939801 -0.42499698]]
[[0.4762917 ]
[0.49606807]
[0.65692 ]
[0.80087638]]
for i in range(20000):
update()#更新权值
if i%500==0:
L1 = sigmoid(np.dot(X,V))#隐藏层输出(4,4)
L2 = sigmoid(np.dot(L1,W))#输出层输出(4,1)
print('Error:',np.mean(np.abs(Y.T-L2)))
L1 = sigmoid(np.dot(X,V))#隐藏层输出(4,4)
L2 = sigmoid(np.dot(L1,W))#输出层输出(4,1)
print(L2)
def judge(x):
if x>=0.5:
return 1
else:
return 0
for i in map(judge,L2):
print(i)
Error: 0.4992098482462489
Error: 0.5001822489723924
Error: 0.4998436601839132
Error: 0.49942675108491763
Error: 0.49854350219100646
Error: 0.49589862657157363
Error: 0.4863204143221485
Error: 0.45834605307996024
Error: 0.4176151085644827
Error: 0.3804392343251921
Error: 0.34127531423040813
Error: 0.2578139109156126
Error: 0.16934597307497748
Error: 0.12453032431093806
Error: 0.10005365197009156
Error: 0.08481152496371526
Error: 0.0743801633333893
Error: 0.06675369190343491
Error: 0.060905427868935526
Error: 0.05625769620503522
Error: 0.052460736268040556
Error: 0.049290202197416685
Error: 0.04659537076700852
Error: 0.04427107243376421
Error: 0.04224157657696884
Error: 0.040450884048937735
Error: 0.038856636942982185
Error: 0.03742616263244191
Error: 0.036133825826401726
Error: 0.034959209127802385
Error: 0.03388583374158297
Error: 0.03290024143074452
Error: 0.031991323609506406
Error: 0.031149822970656327
Error: 0.03036795778178773
Error: 0.02963913484778234
Error: 0.028957727530054697
Error: 0.028318902157113017
Error: 0.02771848088408489
Error: 0.027152832324249365
[[0.0213492 ]
[0.9738751 ]
[0.96953351]
[0.0285387 ]]
0
1
1
0
In [ ]: