BP神经网络算法 Python实现

BP神经网络算法
实验要求:

  1. 掌握BP神经网络算法的实现
  2. 掌握利用BP神经网络对样本数据分类,并观察训练时间及迭代次数对结果的影响

Python 代码

# -*- coding: utf-8 -*-
# @Time    : 2022/10/25 22:48
# @Author  : Norman Mises
# @FileName: train.py
# @Software: PyCharm

import math
import time

import numpy as np
import pandas as pd
import yaml


def train(dat, id):
    x = dat
    global d
    d = [-1 for _ in range(num_layers[-1])]  # 希望输出值
    d[id] = 1
    # print(d)

    global y
    y[0] = x
    for i in range(1, n):  # 每一层
        for j in range(num_layers[i]):  # 每一个
            net = sum([weights[i][j][k] * y[i - 1][k] for k in range(num_layers[i - 1])])
            f = lambda net_: 1 / (1 + math.e ** ((theta[i][j] - net_) / h0))
            y[i][j] = f(net)

    # for i in y[-1]:
    #     print(f'{i:.6f}', end=',')

    Ep = 0.5 * sum([(d[i] - y[-1][i]) ** 2 for i in range(num_layers[-1])])
    # print(f'{Ep}')
    if Ep > 0.50001:
        return False
    else:
        return True


def correct():
    global delta
    for i in range(n - 1, 0, -1):  # 每一层
        for j in range(num_layers[i]):
            if i == n - 1:
                delta[i][j] = y[i][j] * (1 - y[i][j]) * (d[j] - y[-1][j])
            else:
                delta[i][j] = y[i][j] * (1 - y[i][j]) * sum(
                    [delta[i + 1][q] * weights[i + 1][q][j] for q in range(num_layers[i + 1])])
            for k in range(num_layers[i - 1]):
                weights[i][j][k] += eta * delta[i][j] * y[i - 1][k]


def test(x):
    out = y
    out[0] = x
    for i in range(1, n):  # 每一层
        for j in range(num_layers[i]):  # 每一个
            net = sum([weights[i][j][k] * out[i - 1][k] for k in range(num_layers[i - 1])])
            f = lambda net_: 1 / (1 + math.e ** ((theta[i][j] - net_) / h0))
            out[i][j] = f(net)

    for i in range(num_layers[-1]):
        print(f'第 {i} 类:{out[-1][i]:.6f}')


if __name__ == "__main__":
    with open('testcases/training_data.yaml', 'r', encoding='utf-8') as f:
        data_yaml = yaml.load(f, yaml.FullLoader)
        print(data_yaml['data'][0])

    label = data_yaml['label'][0]
    df = pd.DataFrame(data_yaml['data'][0])
    data = (df - df.min()) / (df.max() - df.min())
    data = data.values
    print(data)
    print(label)

    h0 = float(input(f'h0: '))
    eta = float(input(f'eta: '))
    n = int(input('层数:'))
    num_layers = []
    weights = []
    theta = []  # 神经元阈值
    delta = []
    d = []
    y = []
    print('每层神经元数:')
    for i in range(n):
        num_layers.append(int(input(f'第 {i} 层:')))
        theta.append(np.random.rand(num_layers[i]))
        delta.append(np.zeros(num_layers[i]))
        y.append(np.zeros(num_layers[i]))
        # y.append([] for _ in range(num_layers[i]))
        if i > 0:
            weights.append(np.random.rand(num_layers[i], num_layers[i - 1]))
        else:
            weights.append([])

    print(f'每层神经元与其低一层的连接权:')
    for i in weights:
        print(i)

    print(f'神经元阈值:')
    for i in theta:
        print(i)
    times = int(input(f'训练次数上限(-1 为无上限):'))
    start = time.time()
    cnt = 1
    while not train(data[cnt % len(data)], label[cnt % len(data)]):
        if times != -1 and cnt >= times:
            break
        cnt += 1
        correct()
    end = time.time()

    print(f'训练结束后每层神经元与其低一层的连接权:')
    for i in weights:
        print(i)
    print(f'训练次数:{cnt}')
    print(f'训练时间:{(end - start):.6f} s')

    try:
        while True:
            print(f'输入测试数据:')
            t = input().split(',')
            t = [float(i.strip()) for i in t]
            print(t)
            test(t)
    except:
        print('测试结束')


相关数据文件

# .\testcases\training_data.yaml
data:
- [[99, 63], [128, 71], [175, 88], [206, 102], [143, 146], [188, 114], [206, 129], [110, 99], [142, 91], [188, 94], [982, 390], [1007, 388], [990, 414], [1000, 441], [1044, 400], [1057, 419], [985, 444], [1011, 424], [1052, 425], [1056, 427]]
label:
- [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]

# .\testcases\test_data.yaml
data:
- [[100, 65], [110, 60], [180, 70], [220, 110], [130, 150], [200, 110], [190, 135], [114, 90], [150, 90], [200, 80], [950, 400], [1010, 370], [980, 450], [1000, 410], [1077, 500], [1007, 450], [1001, 454], [1022, 450], [1055, 450], [1076, 440]]

  • 3
    点赞
  • 15
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 2
    评论
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

Norman Mises

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值