反向传播算法代码

https://www.jianshu.com/p/9bc46738f00c 原文链接
def feedforward(self, x): # 正向计算
# return the feedforward value for x
a = np.copy(x)
z_s = []
a_s = [a]
for i in range(len(self.weights)):
activation_function = self.getActivationFunction(self.activations[i])
z_s.append(self.weights[i].dot(a) + self.biases[i])
a = activation_function(z_s[-1])
a_s.append(a)
return (z_s, a_s)

def backpropagation(self,y, z_s, a_s): # 反向计算
dw = [] # dl/dW
db = [] # dl/db
deltas = [None] * len(self.weights) # delta = dl/dz known as error for each layer
# insert the last layer error
deltas[-1] = ((y-a_s[-1])(self.getDerivitiveActivationFunction(self.activations[-1]))(z_s[-1]))
# Perform BackPropagation
for i in reversed(range(len(deltas)-1)):
deltas[i] = self.weights[i+1].T.dot(deltas[i+1])
(self.getDerivitiveActivationFunction(self.activations[i])(z_s[i]))
#a= [print(d.shape) for d in deltas]
batch_size = y.shape[1]
db = [d.dot(np.ones((batch_size,1)))/float(batch_size) for d in deltas]
dw = [d.dot(a_s[i].T)/float(batch_size) for i,d in enumerate(deltas)]
# return the derivitives respect to weight matrix and biases
return dw, db

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
以下是一个简单的反向传播算法的 Python 代码实现: ``` # 定义神经网络的结构和参数 input_size = 2 hidden_size = 3 output_size = 1 # 权重和偏置 W1 = np.random.randn(input_size, hidden_size) b1 = np.random.randn(hidden_size) W2 = np.random.randn(hidden_size, output_size) b2 = np.random.randn(output_size) # 定义激活函数 def sigmoid(x): return 1 / (1 + np.exp(-x)) # 定义损失函数 def mse_loss(y_true, y_pred): return ((y_true - y_pred) ** 2).mean() # 定义前向传播函数 def forward(x): z1 = np.dot(x, W1) + b1 a1 = sigmoid(z1) z2 = np.dot(a1, W2) + b2 a2 = sigmoid(z2) return a1, a2 # 定义反向传播函数 def backward(x, y_true, a1, a2): # 计算输出层的误差 delta2 = (a2 - y_true) * (a2 * (1 - a2)) # 计算隐藏层的误差 delta1 = np.dot(delta2, W2.T) * (a1 * (1 - a1)) # 更新权重和偏置 dW2 = np.dot(a1.T, delta2) db2 = delta2.sum(axis=0) dW1 = np.dot(x.T, delta1) db1 = delta1.sum(axis=0) # 返回梯度 return dW1, db1, dW2, db2 # 定义训练函数 def train(x, y_true, learning_rate=0.1, epochs=1000): for i in range(epochs): # 前向传播 a1, a2 = forward(x) # 计算损失 loss = mse_loss(y_true, a2) # 反向传播 dW1, db1, dW2, db2 = backward(x, y_true, a1, a2) # 更新权重和偏置 W1 -= learning_rate * dW1 b1 -= learning_rate * db1 W2 -= learning_rate * dW2 b2 -= learning_rate * db2 # 打印损失 if i % 100 == 0: print(f'Epoch {i}, Loss: {loss:.3f}') # 返回训练后的权重和偏置 return W1, b1, W2, b2 ``` 注意,这只是一个简单的反向传播算法的实现,实际应用中可能需要进行更多的优化和改进。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值