BP神经网络python实现

总体布局

在这里插入图片描述

函数

S i g m o i d Sigmoid Sigmoid函数:
f ( x ) = 1 1 + e x p ( − x ) f(x)=\frac{1}{1+exp(-x)} f(x)=1+exp(x)1

# Sigmoid 前向反向
def sigmoid(z):
    h = 1. / (1 + np.exp(-z))
    return h


def de_sigmoid(z,h):
    return h * (1 - h)


# rule激活函数
def relu(z):
    h = np.maximum(z, 0)
    return h


def de_relu(z, h):
    z[z <= 0] = 0
    z[z > 0] = 1.0
    return z


# 无激活函数,前向反向
def no_active(z):
    h = z
    return h


def de_no_active(z,h):
    return np.ones(h.shape)


# L2损失函数 前向反向
def loss_L2(o, lab):
    diff = lab - o
    sqrDiff = diff ** 2
    return 0.5 * np.sum(sqrDiff)


def de_loss_L2(o, lab):
    return o - lab

构建网络

def bulid_net(dim_in, list_num_hidden, list_act_funs, list_de_act_funs):
    """
    构建网络,对权重w,偏置b进行初始化
    :param dim_in: 输入特征的维度
    :param list_num_hidden: 每层输出节点数目
    :param list_act_funs: 每层的激活函数
    :param list_de_act_funs: 反向传播时的函数
    :return:
    """
    layers = []

    # 逐层的进行网络构建
    for i in range(len(list_num_hidden)):
        layer = {}

        # 定义每一层的权重
        if i == 0:
            # layer["w"] = 0.2 * np.random.randn(dim_in, list_num_hidden[i]) - 0.1  # 用sigmoid激活函数
            layer["w"] = 0.01 * np.random.randn(dim_in, list_num_hidden[i])    # 用relu 激活函数
        else:
            # layer["w"] = 0.2 * np.random.randn(list_num_hidden[i - 1], list_num_hidden[i]) - 0.1 # 用sigmoid激活函数
            layer["w"] = 0.01 * np.random.randn(list_num_hidden[i - 1], list_num_hidden[i]) # 用relu 激活函数

        # 定义每一层的偏置
        layer["b"] = 0.2 * np.ones([1, list_num_hidden[i]])
        layer["act_fun"] = list_act_funs[i]
        layer["de_act_fun"] = list_de_act_funs[i]
        layers.append(layer)

    return layers

前馈的一个过程

def fead_forward(datas, layers):
    """
    返回每一层的输入,与最后一层的输出   前馈的一个过程
    :param datas: 输入数据
    :param layers: 网络
    :return:
    """
    input_layers = []
    input_acfun = []

    for i in range(len(layers)):
        layer = layers[i]

        if i == 0:
            inputs = datas
        else:
            inputs = h

        z = np.dot(inputs, layer["w"]) + layer["b"]
        h = layer["act_fun"](z)
        input_layers.append(inputs)
        input_acfun.append(z)
    return input_layers, input_acfun, h

更新 w , b w,b w,b

def updata_wb(datas, labs, layers, loss_fun, de_loss_fun, alpha=0.01):
    """
    更新参数w,b
    :param datas: 输入
    :param labs: 标签
    :param layers: 网络
    :param loss_fun: 损失函数
    :param de_loss_fun: 反向传播的损失函数
    :param alpha: 梯度下降的参数
    :return:
    """
    N, D = np.shape(datas)
    # 进行前馈操作
    inputs, input_acfun, output = fead_forward(datas, layers)

    # 计算loss
    loss = loss_fun(output, labs)

    # 从后向前计算
    deltas0 = de_loss_fun(output, labs)

    # 从后向前计算误差
    deltas = []
    for i in range(len(layers)):
        index = -i - 1
        if i == 0:
            h = output
            z = input_acfun[index]
            delta = deltas0 * layers[index]["de_act_fun"](z,h)
        else:
            h = inputs[index + 1]
            z = input_acfun[index]
            delta = np.dot(delta, layers[index + 1]["w"].T) * layers[index]["de_act_fun"](z,h)

        deltas.insert(0, delta)

    # 利用误差,对每一层的权重进行修成
    for i in range(len(layers)):
        # 计算dw 与db
        dw = np.dot(inputs[i].T, deltas[i])
        db = np.sum(deltas[i], axis=0, keepdims=True)
        # 梯度下降
        layers[i]['w'] = layers[i]["w"] - alpha * dw
        layers[i]["b"] = layers[i]["b"] - alpha * db

    return layers, loss

进行测试

# 进行测试
def test_accuracy(datas, labs_true, layers):
    _, _, output = fead_forward(datas, layers)
    lab_det = np.argmax(output, axis=1)
    labs_true = np.argmax(labs_true, axis=1)
    N_error = np.where(np.abs(labs_true - lab_det) > 0)[0].shape[0]

    error_rate = N_error / np.shape(datas)[0]
    return error_rate

数据读取

def load_dataset_iris(file_data, N_train):
    # 数据读取
    datas = np.loadtxt(file_data, dtype=np.float, delimiter=',', usecols=(1, 2, 3, 4))
    labs = np.loadtxt(file_data, dtype=str, delimiter=',', usecols=(5))
    # print(datas)
    N, D = np.shape(datas)
    N_test = N - N_train
    unqiue_labs = np.unique(labs).tolist()  # unique函数去除其中重复的元素,并按元素由大到小返回一个新的无元素重复的元组或者列表

    dic_str2index = {}
    dic_index2str = {}
    for i in range(len(unqiue_labs)):
        lab_str = unqiue_labs[i]
        dic_str2index[lab_str] = i
        dic_index2str[i] = lab_str

    labs_onehot = np.zeros([N, len(unqiue_labs)])
    for i in range(N):
        labs_onehot[i, dic_str2index[labs[i]]] = 1

    perm = np.random.permutation(N)
    index_train = perm[:N_train]
    index_test = perm[N_train:]

    data_train = datas[index_train, :]
    lab_train_onehot = labs_onehot[index_train, :]

    data_test = datas[index_test, :]
    lab_test_onehot = labs_onehot[index_test]

    return data_train, lab_train_onehot, data_test, lab_test_onehot, dic_index2str


m a i n main main函数

if __name__ == "__main__":

    file_data = 'iris.data'

    data_train, lab_train_onehot, data_test, lab_test_onehot, dic_index2str = load_dataset_iris(file_data, 100)

    N, dim_in = np.shape(data_train)
    # 定义网络结构
    list_num_hidden = [10, 5, 3]
    list_act_funs = [sigmoid, sigmoid, no_active]
    list_de_act_funs = [de_sigmoid, de_sigmoid, de_no_active]

    # 定义损失函数
    loss_fun = loss_CE
    de_loss_fun = de_loss_CE

    # loss_fun = loss_L2
    # de_loss_fun=de_loss_L2

    layers = bulid_net(dim_in, list_num_hidden,
                       list_act_funs, list_de_act_funs)

    # 进行训练
    n_epoch = 50
    batchsize = 4
    N_batch = N // batchsize
    for i in range(n_epoch):
        # 数据打乱
        rand_index = np.random.permutation(N).tolist()
        # 每个batch 更新一下weight
        loss_sum = 0
        for j in range(N_batch):
            index = rand_index[j * batchsize:(j + 1) * batchsize]
            batch_datas = data_train[index]
            batch_labs = lab_train_onehot[index]
            layers, loss = updata_wb(batch_datas, batch_labs, layers, loss_fun, de_loss_fun, alpha=0.2)
            loss_sum = loss_sum + loss

        error = test_accuracy(data_train, lab_train_onehot, layers)
        print("epoch %d  error  %.2f%%  loss_all %.2f" % (i, error * 100, loss_sum))

    # 进行测试
    error = test_accuracy(data_test, lab_test_onehot, layers)
    print(error * 100)

数据集

I r i s . d a t a Iris.data Iris.data

1,5.1,3.5,1.4,0.2,Iris-setosa
2,4.9,3.0,1.4,0.2,Iris-setosa
3,4.7,3.2,1.3,0.2,Iris-setosa
4,4.6,3.1,1.5,0.2,Iris-setosa
5,5.0,3.6,1.4,0.2,Iris-setosa
6,5.4,3.9,1.7,0.4,Iris-setosa
7,4.6,3.4,1.4,0.3,Iris-setosa
8,5.0,3.4,1.5,0.2,Iris-setosa
9,4.4,2.9,1.4,0.2,Iris-setosa
10,4.9,3.1,1.5,0.1,Iris-setosa
11,5.4,3.7,1.5,0.2,Iris-setosa
12,4.8,3.4,1.6,0.2,Iris-setosa
13,4.8,3.0,1.4,0.1,Iris-setosa
14,4.3,3.0,1.1,0.1,Iris-setosa
15,5.8,4.0,1.2,0.2,Iris-setosa
16,5.7,4.4,1.5,0.4,Iris-setosa
17,5.4,3.9,1.3,0.4,Iris-setosa
18,5.1,3.5,1.4,0.3,Iris-setosa
19,5.7,3.8,1.7,0.3,Iris-setosa
20,5.1,3.8,1.5,0.3,Iris-setosa
21,5.4,3.4,1.7,0.2,Iris-setosa
22,5.1,3.7,1.5,0.4,Iris-setosa
23,4.6,3.6,1.0,0.2,Iris-setosa
24,5.1,3.3,1.7,0.5,Iris-setosa
25,4.8,3.4,1.9,0.2,Iris-setosa
26,5.0,3.0,1.6,0.2,Iris-setosa
27,5.0,3.4,1.6,0.4,Iris-setosa
28,5.2,3.5,1.5,0.2,Iris-setosa
29,5.2,3.4,1.4,0.2,Iris-setosa
30,4.7,3.2,1.6,0.2,Iris-setosa
31,4.8,3.1,1.6,0.2,Iris-setosa
32,5.4,3.4,1.5,0.4,Iris-setosa
33,5.2,4.1,1.5,0.1,Iris-setosa
34,5.5,4.2,1.4,0.2,Iris-setosa
35,4.9,3.1,1.5,0.1,Iris-setosa
36,5.0,3.2,1.2,0.2,Iris-setosa
37,5.5,3.5,1.3,0.2,Iris-setosa
38,4.9,3.1,1.5,0.1,Iris-setosa
39,4.4,3.0,1.3,0.2,Iris-setosa
40,5.1,3.4,1.5,0.2,Iris-setosa
41,5.0,3.5,1.3,0.3,Iris-setosa
42,4.5,2.3,1.3,0.3,Iris-setosa
43,4.4,3.2,1.3,0.2,Iris-setosa
44,5.0,3.5,1.6,0.6,Iris-setosa
45,5.1,3.8,1.9,0.4,Iris-setosa
46,4.8,3.0,1.4,0.3,Iris-setosa
47,5.1,3.8,1.6,0.2,Iris-setosa
48,4.6,3.2,1.4,0.2,Iris-setosa
49,5.3,3.7,1.5,0.2,Iris-setosa
50,5.0,3.3,1.4,0.2,Iris-setosa
51,7.0,3.2,4.7,1.4,Iris-versicolor
52,6.4,3.2,4.5,1.5,Iris-versicolor
53,6.9,3.1,4.9,1.5,Iris-versicolor
54,5.5,2.3,4.0,1.3,Iris-versicolor
55,6.5,2.8,4.6,1.5,Iris-versicolor
56,5.7,2.8,4.5,1.3,Iris-versicolor
57,6.3,3.3,4.7,1.6,Iris-versicolor
58,4.9,2.4,3.3,1.0,Iris-versicolor
59,6.6,2.9,4.6,1.3,Iris-versicolor
60,5.2,2.7,3.9,1.4,Iris-versicolor
61,5.0,2.0,3.5,1.0,Iris-versicolor
62,5.9,3.0,4.2,1.5,Iris-versicolor
63,6.0,2.2,4.0,1.0,Iris-versicolor
64,6.1,2.9,4.7,1.4,Iris-versicolor
65,5.6,2.9,3.6,1.3,Iris-versicolor
66,6.7,3.1,4.4,1.4,Iris-versicolor
67,5.6,3.0,4.5,1.5,Iris-versicolor
68,5.8,2.7,4.1,1.0,Iris-versicolor
69,6.2,2.2,4.5,1.5,Iris-versicolor
70,5.6,2.5,3.9,1.1,Iris-versicolor
71,5.9,3.2,4.8,1.8,Iris-versicolor
72,6.1,2.8,4.0,1.3,Iris-versicolor
73,6.3,2.5,4.9,1.5,Iris-versicolor
74,6.1,2.8,4.7,1.2,Iris-versicolor
75,6.4,2.9,4.3,1.3,Iris-versicolor
76,6.6,3.0,4.4,1.4,Iris-versicolor
77,6.8,2.8,4.8,1.4,Iris-versicolor
78,6.7,3.0,5.0,1.7,Iris-versicolor
79,6.0,2.9,4.5,1.5,Iris-versicolor
80,5.7,2.6,3.5,1.0,Iris-versicolor
81,5.5,2.4,3.8,1.1,Iris-versicolor
82,5.5,2.4,3.7,1.0,Iris-versicolor
83,5.8,2.7,3.9,1.2,Iris-versicolor
84,6.0,2.7,5.1,1.6,Iris-versicolor
85,5.4,3.0,4.5,1.5,Iris-versicolor
86,6.0,3.4,4.5,1.6,Iris-versicolor
87,6.7,3.1,4.7,1.5,Iris-versicolor
88,6.3,2.3,4.4,1.3,Iris-versicolor
89,5.6,3.0,4.1,1.3,Iris-versicolor
90,5.5,2.5,4.0,1.3,Iris-versicolor
91,5.5,2.6,4.4,1.2,Iris-versicolor
92,6.1,3.0,4.6,1.4,Iris-versicolor
93,5.8,2.6,4.0,1.2,Iris-versicolor
94,5.0,2.3,3.3,1.0,Iris-versicolor
95,5.6,2.7,4.2,1.3,Iris-versicolor
96,5.7,3.0,4.2,1.2,Iris-versicolor
97,5.7,2.9,4.2,1.3,Iris-versicolor
98,6.2,2.9,4.3,1.3,Iris-versicolor
99,5.1,2.5,3.0,1.1,Iris-versicolor
100,5.7,2.8,4.1,1.3,Iris-versicolor
101,6.3,3.3,6.0,2.5,Iris-virginica
102,5.8,2.7,5.1,1.9,Iris-virginica
103,7.1,3.0,5.9,2.1,Iris-virginica
104,6.3,2.9,5.6,1.8,Iris-virginica
105,6.5,3.0,5.8,2.2,Iris-virginica
106,7.6,3.0,6.6,2.1,Iris-virginica
107,4.9,2.5,4.5,1.7,Iris-virginica
108,7.3,2.9,6.3,1.8,Iris-virginica
109,6.7,2.5,5.8,1.8,Iris-virginica
110,7.2,3.6,6.1,2.5,Iris-virginica
111,6.5,3.2,5.1,2.0,Iris-virginica
112,6.4,2.7,5.3,1.9,Iris-virginica
113,6.8,3.0,5.5,2.1,Iris-virginica
114,5.7,2.5,5.0,2.0,Iris-virginica
115,5.8,2.8,5.1,2.4,Iris-virginica
116,6.4,3.2,5.3,2.3,Iris-virginica
117,6.5,3.0,5.5,1.8,Iris-virginica
118,7.7,3.8,6.7,2.2,Iris-virginica
119,7.7,2.6,6.9,2.3,Iris-virginica
120,6.0,2.2,5.0,1.5,Iris-virginica
121,6.9,3.2,5.7,2.3,Iris-virginica
122,5.6,2.8,4.9,2.0,Iris-virginica
123,7.7,2.8,6.7,2.0,Iris-virginica
124,6.3,2.7,4.9,1.8,Iris-virginica
125,6.7,3.3,5.7,2.1,Iris-virginica
126,7.2,3.2,6.0,1.8,Iris-virginica
127,6.2,2.8,4.8,1.8,Iris-virginica
128,6.1,3.0,4.9,1.8,Iris-virginica
129,6.4,2.8,5.6,2.1,Iris-virginica
130,7.2,3.0,5.8,1.6,Iris-virginica
131,7.4,2.8,6.1,1.9,Iris-virginica
132,7.9,3.8,6.4,2.0,Iris-virginica
133,6.4,2.8,5.6,2.2,Iris-virginica
134,6.3,2.8,5.1,1.5,Iris-virginica
135,6.1,2.6,5.6,1.4,Iris-virginica
136,7.7,3.0,6.1,2.3,Iris-virginica
137,6.3,3.4,5.6,2.4,Iris-virginica
138,6.4,3.1,5.5,1.8,Iris-virginica
139,6.0,3.0,4.8,1.8,Iris-virginica
140,6.9,3.1,5.4,2.1,Iris-virginica
141,6.7,3.1,5.6,2.4,Iris-virginica
142,6.9,3.1,5.1,2.3,Iris-virginica
143,5.8,2.7,5.1,1.9,Iris-virginica
144,6.8,3.2,5.9,2.3,Iris-virginica
145,6.7,3.3,5.7,2.5,Iris-virginica
146,6.7,3.0,5.2,2.3,Iris-virginica
147,6.3,2.5,5.0,1.9,Iris-virginica
148,6.5,3.0,5.2,2.0,Iris-virginica
149,6.2,3.4,5.4,2.3,Iris-virginica
150,5.9,3.0,5.1,1.8,Iris-virginica
  • 2
    点赞
  • 13
    收藏
    觉得还不错? 一键收藏
  • 3
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值