BD神经网络扩展到N层

承接上一篇文章,将BD神经网络扩展到了n层
虽然没什么用,但满足了我的好奇心,需要的资料在上一篇文章查找
注:
层数越多,训练越慢,而且出错几率大大增加

import pandas as pd
import matplotlib.pyplot as plt
import numpy as np


def sigmoid(x):
    # S函数: f(x) = 1 / (1 + e^(-x))
    return 1 / (1 + np.exp(-x))


def deriv_sigmoid(x):
    # S函数求导: f'(x) = f(x) * (1 - f(x))
    fx = sigmoid(x)
    return fx * (1 - fx)


def mse_loss(y_true, y_pred):
    # 求丢失。
    return (y_true - y_pred) ** 2


# 取数
def getdata(row):
    data = []
    df_Iris = pd.read_csv('Iris.csv')
    data = np.array([[df_Iris.loc[row].SepalLengthCm, df_Iris.loc[row].SepalWidthCm, df_Iris.loc[row].PetalLengthCm,
                      df_Iris.loc[row].PetalWidthCm]])
    row += 1
    for i in range(9):
        row += i
        data_tmp = np.array(
            [[df_Iris.loc[row].SepalLengthCm, df_Iris.loc[row].SepalWidthCm, df_Iris.loc[row].PetalLengthCm,
              df_Iris.loc[row].PetalWidthCm]])
        data = np.append(data, data_tmp, axis=0)
        pass
    row += 50
    for i in range(10):
        row += i
        data_tmp = np.array(
            [[df_Iris.loc[row].SepalLengthCm, df_Iris.loc[row].SepalWidthCm, df_Iris.loc[row].PetalLengthCm,
              df_Iris.loc[row].PetalWidthCm]])
        data = np.append(data, data_tmp, axis=0)
        pass
    Species = np.array(
        [[0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1]])
    return data, Species


class test():
    # n为输入量,m为神经网络层数
    def __init__(self, n: int, m: int):
        self.m = m
        self.n = n
        self.w = np.random.normal(0, 1, [n, m, n])
        self.b = np.random.normal(0, 1, [n, m])
        self.w_o = np.random.normal(0, 1, [n, 1])
        self.b_o = np.random.normal(0, 1, [1, 1])
        self.h_sum = np.zeros([n, m])
        self.h = np.zeros([n, m])
        self.h_d = np.zeros([n, m])
        self.ypred_sum = 0
        self.pred = 0

        pass

    # 输入一维数组
    def feedforward(self, data: range):
        # 输入第一层,与输入层对接
        self.h_sum[:, 0] = np.sum(np.multiply(data, self.w[:, 0, :])) + self.b[:, 0]
        self.h[:, 0] = sigmoid(self.h_sum[:, 0])
        self.h_d[:, 0] = deriv_sigmoid(self.h_sum[:, 0])

        # 第二场到最后一层
        for i in range(self.m):
            # 第一层跳过
            if i == 0:
                continue
            for j in range(self.n):
                self.h_sum[j, i] = np.sum(np.multiply(self.h[:, i - 1], self.w[j, i, :])) + self.b[j, i]
                pass
            self.h[:, i] = sigmoid(self.h_sum[:, i])
            self.h_d[:, i] = deriv_sigmoid(self.h_sum[:, i])
            pass
        # 输出层
        self.ypred_sum = np.sum(np.multiply(self.w_o, self.h[:, self.m - 1])) + self.b_o[0, 0]
        self.pred = sigmoid(self.ypred_sum)
        return self.pred
        pass
        # # for i in range(self.m):
        #     for j in range(self.n):
        #         self.h_sum[j, i] = np.sum(np.multiply(data, self.w[j, i, :])) + self.b[j, i, :]
        #         pass
        #     pass
        # self.h = sigmoid(self.h_sum)
        # self.ypred_sum = np.sum(np.multiply(self.w_o, self.h)) + self.b_o
        # self.pred = sigmoid(self.ypred_sum)
        #
        # return self.pred[0, 0]
        # pass

    def train(self, data, Species):
        learn_rate = 0.3
        epochs = 1000  # 遍历整个数据集的次数
        for epoch in range(epochs):
            for x, y in zip(data, Species):
                # 提前计算
                y_pred = self.feedforward(x)
                x1 = -2 * (y[0] - y_pred)
                # 与输入对接的第一层处理
                for i in range(self.n):  # 行
                    tmp = learn_rate * x1 * self.w_o[i, 0] * deriv_sigmoid(self.ypred_sum)
                    for k in range(self.m - 1):  # 列
                        tmp *= self.w[i, k + 1, i] * self.h_d[i, k + 1]
                    for j in range(self.n):  # 纵
                        self.w[i, 0, j] -= tmp * x[j] * self.h_d[i, 0]
                        pass
                    self.b[i, 0] -= tmp * self.h_d[i, 0]
                    pass
                # 对中间层进行处理
                for i in range(self.m):  # 列
                    if i == 0:
                        continue
                    for j in range(self.n):  # 行
                        tmp = learn_rate * x1 * self.w_o[j, 0] * deriv_sigmoid(self.ypred_sum)
                        for k in range(i - 1):  # 列
                            tmp *= self.w[j, k + 1, j] * self.h_d[j, k + 1]
                            pass
                        for k in range(self.n):  # 纵
                            self.w[j, i, k] -= tmp * self.h[j, i - 1] * self.h_d[j, i]
                            pass
                        self.b[j, i] -= tmp * self.h_d[j, i]
                        pass
                    pass
                # 对输出层处理
                for i in range(self.n):  # 行
                    self.w_o[i, 0] -= learn_rate * x1 * self.h[i, self.m - 1] * deriv_sigmoid(self.ypred_sum)
                    pass
                self.b_o[0, 0] -= learn_rate * x1 * deriv_sigmoid(self.ypred_sum)
                pass
            # 查看过程
            if epoch % 20 == 0:
                print(epoch, "-----", y[0] - self.pred)
                # plt.scatter(epoch, y[0] - self.pred)
                # plt.pause(0.1)


while True:
    data, Species = getdata(5)
    # print(data[2])
    # print(Species[2])
    # plt.axis([0, 1000, 0, 1])

    network = test(4, 3)
    for i in range(20):
        print(network.feedforward(data[i]))
    network.train(data, Species)
    print("-------------------------------------------------")
    data, Species = getdata(15)
    for i in range(20):
        print(network.feedforward(data[i]))
        pass
    print("-------------------------------------------------")

    if (network.feedforward(data[18]) - network.feedforward(data[1])) >= 0.8:
        break
        pass

貌似没啥用,而且应该大概有错误,建议使用1~3层,4层可能是运气好我就成功了一次。

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值