BP神经网络python实现

import numpy as np
import matplotlib.pyplot as plt

def relu(x):
    return np.maximum(0, x)

def relu_derivative(x):
    return np.where(x > 0, 1, 0)

def init_data(x_dim, pam, x_test_dim):
    X = np.random.uniform(0.0, 1.0, x_dim)
    X_test = np.random.uniform(0.0, 1.0, x_test_dim)
    # print(X)
    Y = X.dot(np.array(pam))
    Y=np.expand_dims(Y, axis=0).T
    # X=np.expand_dims(X,axis=0)
    #  print(X,Y)
    Y_test = X_test.dot(np.array(pam))
    Y_test = np.expand_dims(Y_test, axis=0).T
    return X, Y, X_test, Y_test

def initialize(input_size, hidden_size, output_size):
    W1 = np.random.randn(input_size, hidden_size)
    b1 = np.zeros(hidden_size)
    W2 = np.random.randn(hidden_size, output_size)
    b2 = np.zeros(output_size)
    return W1, b1, W2, b2

def forward(X, W1, b1, W2, b2):
    Z1 = np.dot(X, W1) + b1
    A1 = relu(Z1)
    Z2 = np.dot(A1, W2) + b2
    A2 = relu(Z2)
    return Z1, A1, Z2, A2

def loss(Y, Z2):
    return np.mean((Y - Z2) ** 2)

def backpropagation(X, Y, Z1, A1, Z2, A2,  W1, b1, W2, b2, learning_rate):


    # w3 = 2 * (A2 - Y) / 10
    # dW2 = np.dot(A1.T, w3)
    # # print(dW2.shape)
    # db2 = np.sum(w3, axis=0)
    # # print(db2.shape)
    # dW1 = np.dot(X.T, (w3 * W2.T * relu_derivative(Z1)))
    # # print(dW1.shape)
    # db1 = np.sum(w3 * W2.T * relu_derivative(Z1), axis=0)
    # # print(db1.shape)

    w3 = 2 * (A2 - Y)
    dW2 = np.dot(A1.T, w3 * relu_derivative(Z2))
    # print(dW2.shape)
    db2 = np.sum(w3, axis=0)
    # print(db2.shape)
    w4 = 2 * (A2 - Y) * relu_derivative(Z2)
    dW1 = np.dot(X.T, (w4 * W2.T * relu_derivative(Z1)))
    # print(dW1.shape)
    db1 = np.sum(w3 * W2.T * relu_derivative(Z1), axis=0)
    #  print(db1.shape)

    W1 -= learning_rate * dW1
    b1 -= learning_rate * db1
    W2 -= learning_rate * dW2
    b2 -= learning_rate * db2

    return W1, b1, W2, b2

def train(input_size, output_size, X, Y, hidden_size, learning_rate, epochs):
    W1, b1, W2, b2 = initialize(input_size, hidden_size, output_size)
    b1 = np.expand_dims(b1, axis=0)
    b2 = np.expand_dims(b2, axis=0)
    # print(W1.shape, W2.shape, b1.shape, b2.shape)
    loss_list = []
    for epoch in range(epochs):
        Z1, A1, Z2, A2 = forward(X, W1, b1, W2, b2)
        loss1 = loss(Y, A2)
        W1, b1, W2, b2 = backpropagation(X, Y, Z1, A1, Z2, A2, W1, b1, W2, b2, learning_rate)
        print("已训练{}轮".format(epoch))
        loss_list.append(loss1)

    return W1, b1, W2, b2, loss_list

def predict(X_text ,Y_test, W1, b1, W2, b2):
    Z1, A1, Z2, A2 = forward(X_text, W1, b1, W2, b2)
    loss1 = loss(Y_test, A2)
    print("预测值的损失为:", loss1)
    return Z2

x_dim = (30, 3)
x_test_dim = (5, 3)
pam = [1.3, 0.5, 1.5]
X, Y, X_test, Y_test = init_data(x_dim, pam, x_test_dim)
# 训练神经网络
hidden_size = 25
learning_rate = 0.01
epochs = 100
input_size = x_dim[1]
output_size = 1
W1, b1, W2, b2, loss_list = train(input_size, output_size, X, Y, hidden_size, learning_rate, epochs)
# 绘制loss曲线
plt.figure()
x = np.arange(0, epochs)
plt.title("loss")
plt.plot(x, loss_list)
plt.show()
# 预测
predictions = predict(X_test, Y_test, W1, b1, W2, b2)
print("预测值与真实值的差值:\n", predictions - Y_test)

# 运行结果

预测值的损失为: 0.13031508881235027
预测值与真实值的差值:
 [[0.10231436]
 [0.29744002]
 [0.30471985]
 [0.17542227]
 [0.65498816]]
 

# 损失曲线

 

  • 4
    点赞
  • 25
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
好的,以下是一个简单的BP神经网络Python实现,供您参考: ```python import numpy as np class NeuralNetwork: def __init__(self, input_size, hidden_size, output_size): self.input_size = input_size self.hidden_size = hidden_size self.output_size = output_size self.learning_rate = 0.1 # 初始化权重和偏置 self.weights1 = np.random.randn(self.input_size, self.hidden_size) self.bias1 = np.zeros((1, self.hidden_size)) self.weights2 = np.random.randn(self.hidden_size, self.output_size) self.bias2 = np.zeros((1, self.output_size)) def sigmoid(self, z): return 1 / (1 + np.exp(-z)) def sigmoid_derivative(self, z): return self.sigmoid(z) * (1 - self.sigmoid(z)) def feedforward(self, X): # 计算隐藏层输入 self.z1 = np.dot(X, self.weights1) + self.bias1 # 计算隐藏层输出 self.a1 = self.sigmoid(self.z1) # 计算输出层输入 self.z2 = np.dot(self.a1, self.weights2) + self.bias2 # 计算输出层输出 self.a2 = self.sigmoid(self.z2) return self.a2 def backward(self, X, y, output): # 计算输出层误差 self.output_error = y - output # 计算输出层梯度 self.output_gradient = self.output_error * self.sigmoid_derivative(self.z2) # 计算隐藏层误差 self.hidden_error = np.dot(self.output_gradient, self.weights2.T) # 计算隐藏层梯度 self.hidden_gradient = self.hidden_error * self.sigmoid_derivative(self.z1) # 更新权重和偏置 self.weights2 += self.learning_rate * np.dot(self.a1.T, self.output_gradient) self.bias2 += self.learning_rate * np.sum(self.output_gradient, axis=0, keepdims=True) self.weights1 += self.learning_rate * np.dot(X.T, self.hidden_gradient) self.bias1 += self.learning_rate * np.sum(self.hidden_gradient, axis=0) def train(self, X, y): output = self.feedforward(X) self.backward(X, y, output) def predict(self, X): return self.feedforward(X) ``` 使用示例: ```python X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) y = np.array([[0], [1], [1], [0]]) nn = NeuralNetwork(2, 3, 1) for i in range(10000): nn.train(X, y) print(nn.predict(X)) ``` 这个实现中,我们使用了 sigmoid 作为激活函数,并且采用了随机梯度下降法来更新权重和偏置。当然,这只是一个简单的实现,实际应用中可能需要更复杂的网络结构和优化算法。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值