bp神经元的实现,m隐层到输出层的反向传播推导

import numpy as np
import matplotlib.pyplot as plt
#代价->均方差
def cost(target,pred):
    return np.sum(1/2.0*np.power(target - pred,2))

x = np.array([1.0,0.05,0.10])
w = np.array([0.35,0.15,0.20])
y = 0.1

iters = 1000
alpha = 0.1
costs = []
for i in range(iters):
    h = logistic_hypothesis(w,x)
    g = (h-y) * (1-h) * h * x
    w -= alpha * g * x
    costs.append(cost(y,h))

print(logistic_hypothesis(w,x))

plt.plot(range(len(costs)),costs)
#plt.show()
#f(z) z->(x,y) F'(x)y'+F'(y) y'=-F'(y)/F'(x)
def sigmoid(z):
    return 1/(1+np.exp(-z))
def log_hypothesis(w,x):
    return sigmoid(w@x)
w1 = np.array([[0.35,0.15,0.2],[0.35,0.25,0.3]])
w2 = np.array([[0.6,0.4,0.45],[0.6,0.5,0.55]])

y = [0.01,0.99]
a2 = log_hypothesis(w1,x)
a2 = np.insert(a2,0,1)
x3 = np.array(a2)


a3 = log_hypothesis(w2,x3)
print(a2,a3,sep='\n',end='\n')

dE1_a31 = (a3[0] - y[0])
da31_z31 = a3[0] * (1-a3[0])
dz31_w11 = a2[1]
dE1_w11 = dE1_a31 * da31_z31 * dz31_w11
w11_new = w2[0][1] - alpha * dE1_w11
print(w11_new)

dE2_a32 = (a3[1] - y[1])
da32_z32 = a3[1] * (1-a3[1])
dz32_w22 = a2[2]
dE2_w22 = dE2_a32 * da32_z32 * dz32_w22
w22_new = w2[1][2] - alpha * dE2_w22
print(w22_new)

dE1_a21 = dE1_a31 * da31_z31 * w1[1][1]
dE2_a21 = dE2_a32 * da32_z32 * w1[1][2]
dE_a21 = dE1_a21 + dE2_a21
da21_z21 = a2[1] * (1 - a2[1])
dz21_dw11 = x[1]
dE_w11 = dE_a21 *da21_z21 * dz21_dw11
w11_new = w1[0][1] - alpha * dE_w11

 

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值