机器学习8 人工神经网络

8 人工神经网络

'''BP
# 1 Initial
# 2 Forward
# 3 Backward
# 4 Gradient
# 5 Train
X[m,n]        y[m,1]
w1[n1,n]      (X,w1.T)    o1[m,n1]
w2[n2,n1]     (o1,w2.T)   o2[m,n2]
w3[1,n2]      (X,w1.T)    o3[m,1]
delta = (o3-y).T(o3-y)
dL/dw3 = ( delta* o3 *(1-o3) ).T * o2
dL/do2 = ( delta* o3 *(1-o3) ).T * w3
dL/dw2 = ( dL/do2 * o2*(1-o2) ).T * o1
dL/do1 = ( dL/do2 * o2*(1-o2) ).T * w2
dL/dw1 = ( dL/do1 * o1*(1-o1) ).T * X

'''


import numpy as np
class BP_NerualNet():
    def __init__(self,out_num=1,layers=[3,3]):
        self.o = []     # [X,o1,o2,y_pre]  储存每一层的输出
        self.w = []     # [w1,w2,w3]       储存参数
        self.do = [0]*3    # [delta,o2,o1]    储存每一层的误差
        self.dw = [0,0,0]
        self.layers_num = len(layers)+1     # 2 + 1 = 3
        self.layers = [X.shape[1]] + layers + [out_num]  # [n,n1,n2,1]
        for i in range(self.layers_num):
            self.w.append(np.random.random([self.layers[i+1], self.layers[i]]))


    def sigmoid(self,x):
        return 1.0 / (1 + np.exp(-x))

    def forward(self,X):
        self.o = []
        self.o.append(X)
        # 前向传播 self.o = [X,o1,o2,y_pre],self.w = [w1,w2,w3]
        for i in range(self.layers_num):  # i=0,1,2
            self.o.append(self.sigmoid(self.o[i]@(self.w[i]).T ))
        return self.o[-1]


    def backward(self,epoches,eta,y):      # self.do = []   # [delta,o2,o1]
        # 1 迭代更新
        for epoch in range(epoches):
            y_pre = self.forward(X)
            self.do[0]= ( (y_pre - y).T @ (y_pre - y) )
            for i in range(self.layers_num):   # i= 0,1,2
                z = self.do[i] * self.o[-1-i] * (1-self.o[-1-i])
                self.dw[i]=(z.T @ self.o[-1-i-1])
                if i < 2:
                    self.do[i] = (z @ self.w[-1-i])
            n = len(self.dw)
            #self.dw = np.array([self.dw[n-1-i] for i in range(n)])
            self.dw .reverse()
            self.w -= eta * np.array(self.dw)
        print('self.w :', self.w,3)




if __name__ == '__main__':
    X = np.random.random([100,3])
    layers = [3,3]
    w1 = np.array([[0.11,0.12,0.13],[0.21,0.22,0.23],[0.31,0.32,0.33]]).reshape([3,3])
    X@w1.T
    w2 = w1 + 0.12
    w3 = np.array([0.1,0.2,0.3]).reshape([1,3])
    bp = BP_NerualNet()
    y = bp.sigmoid((bp.sigmoid((bp.sigmoid(X@(w1.T)))@(w2.T)))@w3.T)
    bp.forward(X)
    bp.backward(epoches=100,eta=1e-5,y=y)


'''
self.w : [array([[0.57376611, 0.15738754, 0.42096851],
       [0.90004375, 0.77413897, 0.09904761],
       [0.80463757, 0.93983183, 0.28247711]])
 array([[0.91310057, 0.12393639, 0.4271968 ],
       [0.37687411, 0.30213972, 0.07207602],
       [0.29266308, 0.94054451, 0.79918474]])
 array([[0.51168146, 0.04212335, 0.88038857]])] 3

Process finished with exit code 0

'''
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值