BP神经网络-python源码

用Python构建如下一个简单BP神经网络
第一步FP:先前向得到一个总的损失函数;
第二步BP:再反向修正各个w权重值

用Python构建一个简单BP神经网络

Python原理代码:

import numpy as np
'''
构建一个隐藏层的BP神经网络
输入层:两个神经元l1、l2
隐藏层:h1/h2/h3
输出层:o1/o2

'''

# 加载数据
def loadDataSet():
    w = [0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65] #所有权重
    b = [0.35, 0.65]#偏置率
    l = [5, 10] #输入特征
    return w, b, l


# 激活函数
def sigmoid(z):
    return 1.0 / (1 + np.exp(-z))


w, b, l = loadDataSet()


def f1(w, b, l):
    '''
    :param w:
    :param b:
    :param l:
    :return:
    '''
    h1 = sigmoid(w[0] * l[0] + w[1] * l[1] + b[0])
    h2 = sigmoid(w[2] * l[0] + w[3] * l[1] + b[0])
    h3 = sigmoid(w[4] * l[0] + w[5] * l[1] + b[0])
    o1 = sigmoid(w[6] * h1 + w[8] * h2 + w[10] * h3 + b[1])
    o2 = sigmoid(w[7] * h1 + w[9] * h2 + w[11] * h3 + b[1])
    c1 = 0.1  # 目标值
    c2 = 0.99
    t1 = -(c1 - o1) * o1 * (1 - o1)
    t2 = -(c2 - o2) * o2 * (1 - o2)
    w[6] = w[6] - 0.5 * (t1 * h1)
    w[8] = w[8] - 0.5 * (t1 * h2)
    w[10] = w[10] - 0.5 * (t1 * h3)
    w[7] = w[7] - 0.5 * (t2 * h1)
    w[9] = w[9] - 0.5 * (t2 * h2)
    w[11] = w[11] - 0.5 * (t2 * h3)

    w[0] = w[0] - 0.5 * (t1 * w[6] + t2 * w[7]) * h1 * (1 - h1) * l[0]
    w[1] = w[1] - 0.5 * (t1 * w[6] + t2 * w[7]) * h1 * (1 - h1) * l[1]
    w[2] = w[2] - 0.5 * (t1 * w[8] + t2 * w[9]) * h2 * (1 - h2) * l[0]
    w[3] = w[3] - 0.5 * (t1 * w[8] + t2 * w[9]) * h2 * (1 - h2) * l[1]
    w[4] = w[4] - 0.5 * (t1 * w[10] + t2 * w[11]) * h3 * (1 - h3) * l[0]
    w[5] = w[5] - 0.5 * (t1 * w[10] + t2 * w[11]) * h3 * (1 - h3) * l[1]

    return o1, o2, w

def t_test():
    w, b, l = loadDataSet()
    for i in range(1001):
        r1, r2, w = f1(w, b, l)
        print("第{}次迭代后,结果值为:({},{}),权重更新为:{}".format(i, r1, r2, w))


t_test()

TensorFlow代码:

import tensorflow as tf
import numpy as np

#输入数据和输出数据
x_data = np.linspace(-1, 1, 300)[:, np.newaxis] #X值等差数列,在1~-1之间分成300份
noise = np.random.normal(0, 0.05, x_data.shape)#添加噪点
y_data = np.square(x_data) - 0.5 + noise#Y=X²-0.5

#定义变量
xs = tf.placeholder(tf.float32, [None, 1])
ys = tf.placeholder(tf.float32, [None, 1])

def add_layer(inputs, in_size, out_size, activation_function=None):
    weights = tf.Variable(tf.random_normal([in_size, out_size]))
    biases = tf.Variable(tf.zeros([out_size]) + 0.1)
    Wx_plus_b = tf.matmul(inputs, weights) + biases
    if activation_function is None:
        outputs = Wx_plus_b
    else:
        outputs = activation_function(Wx_plus_b)
    return outputs
#前向
h1 = add_layer(xs, 1, 20, activation_function=tf.nn.relu)#激活函数 relu,即 max(features, 0)。即将矩阵中每行的小于0的值都偏置为0
prediction = add_layer(h1, 20, 1, activation_function=None)
#后向
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction), reduction_indices=[1]))
train_op = tf.train.GradientDescentOptimizer(0.1).minimize(loss)

init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
for i in range(1000):
    sess.run(train_op, feed_dict={xs:x_data, ys:y_data})
    if i % 50 == 0:
        print(sess.run(loss, feed_dict={xs:x_data, ys:y_data}))#每迭代50次输出一个损失值
  • 0
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
以下是一个使用BP神经网络进行回归的Python源码示例: ```python import numpy as np import matplotlib.pyplot as plt # 定义BP神经网络类 class BPNeuralNetwork: def __init__(self, input_size, hidden_size, output_size): # 随机初始化权重 self.weights_ih = np.random.randn(hidden_size, input_size) self.weights_ho = np.random.randn(output_size, hidden_size) self.bias_h = np.random.randn(hidden_size, 1) self.bias_o = np.random.randn(output_size, 1) # 定义sigmoid激活函数 def sigmoid(self, x): return 1 / (1 + np.exp(-x)) # 定义前向传播函数 def forward(self, inputs): hidden = self.sigmoid(self.weights_ih @ inputs + self.bias_h) output = self.sigmoid(self.weights_ho @ hidden + self.bias_o) return output # 定义反向传播函数 def backward(self, inputs, targets, learning_rate): # 前向传播 hidden = self.sigmoid(self.weights_ih @ inputs + self.bias_h) output = self.sigmoid(self.weights_ho @ hidden + self.bias_o) # 计算误差 error = targets - output # 更新权重和偏置 gradient_output = output * (1 - output) * error gradient_hidden = hidden * (1 - hidden) * (self.weights_ho.T @ gradient_output) self.weights_ho += learning_rate * gradient_output @ hidden.T self.weights_ih += learning_rate * gradient_hidden @ inputs.T self.bias_o += learning_rate * gradient_output self.bias_h += learning_rate * gradient_hidden # 定义训练函数 def train(self, inputs, targets, epochs, learning_rate): for i in range(epochs): for j in range(len(inputs)): self.backward(inputs[j], targets[j], learning_rate) # 定义预测函数 def predict(self, inputs): return self.forward(inputs) # 示例用法 inputs = np.array([[0], [1], [2], [3], [4], [5]]) targets = np.array([[0], [1], [4], [9], [16], [25]]) # 创建BP神经网络对象 network = BPNeuralNetwork(input_size=1, hidden_size=4, output_size=1) # 训练网络 network.train(inputs, targets, epochs=10000, learning_rate=0.1) # 预测并打印结果 for input in inputs: output = network.predict(input) print(f"Input: {input}, Predicted Output: {output}") # 绘制拟合曲线 plt.plot(inputs, targets, 'ro', label='True') plt.plot(inputs, [network.predict(input) for input in inputs], label='Predicted') plt.legend() plt.show() ``` 这段代码实现了一个简单的BP神经网络进行回归的例子。首先定义了一个BPNeuralNetwork类,其中包括初始化权重、定义激活函数、前向传播函数、反向传播函数、训练函数和预测函数等方法。接下来创建了一个示例输入和目标输出,然后创建一个BP神经网络对象并对其进行训练。最后使用训练好的神经网络进行预测,并绘制出拟合曲线。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值