BP神经网络的Python实现

测试代码和更多机器学习算法详见 https://github.com/WiseDoge/ML-by-Python

import numpy as np


class BPNetWork(object):
    """
    全连接神经网络,采用BP算法训练。
    """

    def __init__(self, layers, act_func='tanh'):
        """
        :param layers: 神经网络的结构
        :param act_func: 激励函数
        输入样例:
        ann = BPNN((2, 3, 1))
        表示一个输入层,一个隐含层,一个输出层,输入层有2个结点,
        隐含层有3个结点,输出层有1个结点
        ann = BPNN((2, 3, 3, 1))
        表示一个输入层,二个隐含层,一个输出层,输入层有2个结点,
        第一层隐含层有3个结点,第二层隐含层有3个结点,输出层有1个结点
        """

        # 初始化神经元的值
        self.networks = []

        # 初始化神经元权重
        self.weights = []
        for i in range(len(layers) - 1):
            weight = 2 * np.random.random((layers[i], layers[i + 1])) - 1
            network = [1.0] * layers[i]
            self.networks.append(network)
            self.weights.append(weight)
        self.networks.append([1.0] * layers[-1])
        self.networks = np.array(self.networks)

        # 初始化神经元阈值
        self.thresholds = []
        for i in range(1, len(layers)):
            threshold = 2 * np.random.random(layers[i]) - 1
            self.thresholds.append(threshold)

        # 选择激励函数和它的导函数
        if act_func == 'tanh':

            self.act_func = self.tanh
            self.dact_func = self.dthanh
        else:
            self.act_func = self.sigmoid
            self.dact_func = self.dsigmoid

    def sigmoid(self, x):
        return 1 / (1 + np.exp(-x))

    def dsigmoid(self, x):
        return self.sigmoid(x) * (1 - self.sigmoid(x))

    def tanh(self, x):
        return np.tanh(x)

    def dthanh(self, x):
        return 1.0 - np.tanh(x) ** 2

    def fit(self, train_x, train_y, epochs, learn_rate):
        '''
        :param train_x: 训练集X
        :param train_y: 训练集Y
        :param epochs: 迭代次数
        :param learn_rate: 学习率,步长
        :return: None
        拟合神经网络
        '''

        for i in range(epochs):
            i = np.random.randint(train_x.shape[0], high=None)
            self.update(train_x[i])
            self.back_propagate(train_y[i], learn_rate)

    def predict(self, test_x):
        '''
        :param test_x: 测试集合
        :return: 预测值
        '''
        self.update(test_x)
        return self.networks[-1].copy()

    def update(self, inputs):
        '''
        :param inputs: X的输入值
        :return: None
        更新一次神经元的值
        '''
        self.networks[0] = inputs.copy()
        for i in range(len(self.weights)):
            count = np.dot(self.networks[i], self.weights[i]) - self.thresholds[i]
            self.networks[i + 1] = self.act_func(count)

    def back_propagate(self, y, rate):
        '''
        :param y: target
        :param rate: 学习率
        :return: None
        BP算法,对神经网络的权值和阈值进行更新
        '''
        errors = y - self.networks[-1]
        gradients = [self.dact_func(self.networks[-1]) * errors]

        self.thresholds[-1] += (-1) * rate * gradients[-1]
        for i in range(len(self.weights) - 1, 0, -1):
            gradients.append(gradients[-1].dot(self.weights[i].T) * self.dact_func(self.networks[i]))
            self.thresholds[i - 1] += (-1) * rate * gradients[-1]

        gradients.reverse()
        for i in range(len(self.weights)):
            self.weights[i] += rate * self.networks[i].reshape((-1, 1)) * gradients[i]
好的,以下是一个简单的BP神经网络Python实现,供您参考: ```python import numpy as np class NeuralNetwork: def __init__(self, input_size, hidden_size, output_size): self.input_size = input_size self.hidden_size = hidden_size self.output_size = output_size self.learning_rate = 0.1 # 初始化权重和偏置 self.weights1 = np.random.randn(self.input_size, self.hidden_size) self.bias1 = np.zeros((1, self.hidden_size)) self.weights2 = np.random.randn(self.hidden_size, self.output_size) self.bias2 = np.zeros((1, self.output_size)) def sigmoid(self, z): return 1 / (1 + np.exp(-z)) def sigmoid_derivative(self, z): return self.sigmoid(z) * (1 - self.sigmoid(z)) def feedforward(self, X): # 计算隐藏层输入 self.z1 = np.dot(X, self.weights1) + self.bias1 # 计算隐藏层输出 self.a1 = self.sigmoid(self.z1) # 计算输出层输入 self.z2 = np.dot(self.a1, self.weights2) + self.bias2 # 计算输出层输出 self.a2 = self.sigmoid(self.z2) return self.a2 def backward(self, X, y, output): # 计算输出层误差 self.output_error = y - output # 计算输出层梯度 self.output_gradient = self.output_error * self.sigmoid_derivative(self.z2) # 计算隐藏层误差 self.hidden_error = np.dot(self.output_gradient, self.weights2.T) # 计算隐藏层梯度 self.hidden_gradient = self.hidden_error * self.sigmoid_derivative(self.z1) # 更新权重和偏置 self.weights2 += self.learning_rate * np.dot(self.a1.T, self.output_gradient) self.bias2 += self.learning_rate * np.sum(self.output_gradient, axis=0, keepdims=True) self.weights1 += self.learning_rate * np.dot(X.T, self.hidden_gradient) self.bias1 += self.learning_rate * np.sum(self.hidden_gradient, axis=0) def train(self, X, y): output = self.feedforward(X) self.backward(X, y, output) def predict(self, X): return self.feedforward(X) ``` 使用示例: ```python X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) y = np.array([[0], [1], [1], [0]]) nn = NeuralNetwork(2, 3, 1) for i in range(10000): nn.train(X, y) print(nn.predict(X)) ``` 这个实现中,我们使用了 sigmoid 作为激活函数,并且采用了随机梯度下降法来更新权重和偏置。当然,这只是一个简单的实现,实际应用中可能需要更复杂的网络结构和优化算法
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值