反向传播算法代码

https://www.jianshu.com/p/9bc46738f00c 原文链接
def feedforward(self, x): # 正向计算
# return the feedforward value for x
a = np.copy(x)
z_s = []
a_s = [a]
for i in range(len(self.weights)):
activation_function = self.getActivationFunction(self.activations[i])
z_s.append(self.weights[i].dot(a) + self.biases[i])
a = activation_function(z_s[-1])
a_s.append(a)
return (z_s, a_s)

def backpropagation(self,y, z_s, a_s): # 反向计算
dw = [] # dl/dW
db = [] # dl/db
deltas = [None] * len(self.weights) # delta = dl/dz known as error for each layer
# insert the last layer error
deltas[-1] = ((y-a_s[-1])(self.getDerivitiveActivationFunction(self.activations[-1]))(z_s[-1]))
# Perform BackPropagation
for i in reversed(range(len(deltas)-1)):
deltas[i] = self.weights[i+1].T.dot(deltas[i+1])
(self.getDerivitiveActivationFunction(self.activations[i])(z_s[i]))
#a= [print(d.shape) for d in deltas]
batch_size = y.shape[1]
db = [d.dot(np.ones((batch_size,1)))/float(batch_size) for d in deltas]
dw = [d.dot(a_s[i].T)/float(batch_size) for i,d in enumerate(deltas)]
# return the derivitives respect to weight matrix and biases
return dw, db

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值