机器学习与数据挖掘实验5-编程实现误差逆传播算法(BP算法)

 

import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
seed = 2020
import random
np.random.seed(seed)  # Numpy module.
random.seed(seed)  # Python random module.
plt.rcParams['font.sans-serif'] = ['SimHei'] #用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False #用来正常显示负号
plt.close('all')

#数据预处理
def preprocess(data):
    #将非数映射数字
    for title in data.columns:
        if data[title].dtype=='object':
            encoder = LabelEncoder()
            data[title] = encoder.fit_transform(data[title])
    #去均值和方差归一化
    ss = StandardScaler()
    X = data.drop('好瓜',axis=1)
    Y = data['好瓜']
    X = ss.fit_transform(X)
    x,y = np.array(X),np.array(Y).reshape(Y.shape[0],1)
    return x,y
#定义Sigmoid
def sigmoid(x):
    return 1/(1+np.exp(-x))
#求导
def d_sigmoid(x):
    return x*(1-x)


#累积BP算法

def accumulate_BP(x,y,dim=10,eta=0.8,max_iter=500):
    n_samples = x.shape[0]
    w1 = np.zeros((x.shape[1],dim))
    b1 = np.zeros((n_samples,dim))
    w2 = np.zeros((dim,1))
    b2 = np.zeros((n_samples,1))
    losslist = []
    for ite in range(max_iter):
        ##前向传播
        u1 = np.dot(x,w1)+b1
        out1 = sigmoid(u1)
        u2 = np.dot(out1,w2)+b2
        out2 = sigmoid(u2)
        loss = np.mean(np.square(y - out2))/2
        losslist.append(loss)
        print('iter:%d  loss:%.4f'%(ite,loss))
 ##更新

        d_out2 = -(y - out2)
        d_u2 = d_out2*d_sigmoid(out2)
        d_w2 = np.dot(np.transpose(out1),d_u2)
        d_b2 = d_u2
        d_out1 = np.dot(d_u2,np.transpose(w2))
        d_u1 = d_out1*d_sigmoid(out1)
        d_w1 = np.dot(np.transpose(x),d_u1)
        d_b1 = d_u1

        w1 = w1 - eta*d_w1
        w2 = w2 - eta*d_w2
        b1 = b1 - eta*d_b1
        b2 = b2 - eta*d_b2

    ##补充Loss可视化代码
    plt.figure()
    plt.plot([i+1 for i in range(max_iter)],losslist)
    plt.legend(['accumlated BP'])
    plt.xlabel('iteration')
    plt.ylabel('loss')
    plt.show()
    return w1,w2,b1,b2

#标准BP算法
def standard_BP(x,y,dim=10,eta=0.8,max_iter=500):
    n_samples = 1
    w1 = np.zeros((x.shape[1],dim))
    b1 = np.zeros((n_samples,dim))
    w2 = np.zeros((dim,1))
    b2 = np.zeros((n_samples,1))
    losslist = []
#补充标准BP算法代码

    for ite in range(max_iter):
        loss_per_ite = []
        for m in range(x.shape[0]):
            xi,yi = x[m,:],y[m,:]
            xi,yi = xi.reshape(1,xi.shape[0]),yi.reshape(1,yi.shape[0])
            ##补充前向传播代码
            u1 = np.dot(xi,w1)+b1
            out1 = sigmoid(u1)
            u2 = np.dot(out1,w2)+b2
            out2 = sigmoid(u2)
            loss = np.square(yi - out2)/2
            loss_per_ite.append(loss)
            print('iter:%d  loss:%.4f'%(ite,loss))
            ##反向传播
            ##补充反向传播代码
            d_out2 = -(yi - out2)
            d_u2 = d_out2*d_sigmoid(out2)
            d_w2 = np.dot(np.transpose(out1),d_u2)
            d_b2 = d_u2
            d_out1 = np.dot(d_u2,np.transpose(w2))
            d_u1 = d_out1*d_sigmoid(out1)
            d_w1 = np.dot(np.transpose(xi),d_u1)
            d_b1 = d_u1
            ##补充参数更新代码
            w1 = w1 - eta*d_w1
            w2 = w2 - eta*d_w2
            b1 = b1 - eta*d_b1
            b2 = b2 - eta*d_b2
        losslist.append(np.mean(loss_per_ite))

            #补充Loss可视化代码
    plt.figure()
    plt.plot([i+1 for i in range(max_iter)],losslist)
    plt.legend(['standard BP'])
    plt.xlabel('iteration')
    plt.ylabel('loss')
    plt.show()

    return w1,w2,b1,b2

#测试

def main():
    data = pd.read_table('watermelon30.txt',delimiter=',')#换成自己的数据集
    data.drop('编号',axis=1,inplace=True)
    x,y = preprocess(data)
    dim = 10
    w1,w2,b1,b2 = standard_BP(x,y,dim)
    # w1,w2,b1,b2 = accumulate_BP(x,y,dim)

    u1 = np.dot(x,w1)+b1
    out1 = sigmoid(u1)
    u2 = np.dot(out1,w2)+b2
    out2 = sigmoid(u2)
    y_pred = np.round(out2)

    result = pd.DataFrame(np.hstack((y,y_pred)),columns=['真值','预测'] )
    result.to_excel('result_numpy.xlsx',index=False)

#补充测试代码,根据当前的x,预测其类别;
if __name__=='__main__':
    main()

最终得到的实验结果:

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值