BP网络搭建和例题测试

本文介绍了一种优化后的代码实现,通过使用数组替代DataFrame,显著提高了训练速度。作者详细展示了如何使用sigmoid函数、梯度下降和BP算法训练一个多层感知器,并在训练集和测试集上计算误差。最后,文章着重于提升预测阶段的性能和效率。
摘要由CSDN通过智能技术生成

1.代码如下:

# -*- coding: utf-8 -*-
"""
Created on Mon Mar 14 18:58:05 2022

@author: L
"""
import math
import pandas as pd
import numpy  as np
import matplotlib.pyplot as plt
from pandas import DataFrame,Series
#定义函数:
def sigmoid(x):
    return  1/(1+math.exp(-x))
#读取数据
data_tr=pd.read_csv('D:\\text_class\\data_tr.txt')
data_te=pd.read_csv('D:\\text_class\\data_te.txt')
#设置学习量
yita=0.3
#设置结构
Err = []
Net_in=DataFrame(0.6,index=['input1','input2','theata'],columns=['a'])
Out_in=DataFrame(0,index=['input1','input2','input3','input4','theata'],columns=['a'])
Net_in.iloc[2,0]=-1 
Out_in.iloc[4,0]=-1
W_mid=DataFrame(0.5,index=['input1','input2','theata'],columns=['mid1','mid2','mid3','mid4'])
W_out=DataFrame(0.5,index=['input1','input2','input3','input4','theata'],columns=['a'])
W_mid_delta=DataFrame(0,index=['input1','input2','theata'],columns=['mid1','mid2','mid3','mid4'])
W_out_delta=DataFrame(0,index=['input1','input2','input3','input4','theata'],columns=['a'])
#设置训练
n=len(data_tr)
for j in range(1000):
    error = []
    for i in range(n):
        Net_in.iloc[0,0]=data_tr.iloc[i,0]
        Net_in.iloc[1,0]=data_tr.iloc[i,1]
        real=data_tr.iloc[i,2]
        for t in range(0,4):
            Out_in.iloc[t,0]=sigmoid(sum(W_mid.iloc[:,t]*Net_in.iloc[:,0]))
        res=sigmoid(sum(Out_in.iloc[:,0]*W_out.iloc[:,0]))
        error.append(abs(res-real))
        print(i,':',res,real)
        W_out_delta.iloc[:,0]=yita*res*(1-res)*(real-res)*Out_in.iloc[:,0]
        W_out_delta.iloc[4,0]=-(yita*res*(1-res)*(real-res))
        W_out=W_out+W_out_delta
        for g in range(0,4):
            W_mid_delta.iloc[:,g]=yita*Out_in.iloc[g,0]*(1-Out_in.iloc[g,0])*W_out.iloc[g,0]*res*(1-res)*(real-res)*Net_in.iloc[:,0]
            W_mid_delta.iloc[2,g]=-(yita*Out_in.iloc[g,0]*(1-Out_in.iloc[g,0])*W_out.iloc[g,0]*res*(1-res)*(real-res))
        W_mid=W_mid+W_mid_delta
    Err.append(np.mean(error))
print(W_mid,W_out)
plt.plot(Err)#训练集上每一轮的平均误差
plt.show()
plt.close()
#测试部分
error_te=[]
k=len(data_te)
Net_in.iloc[2,0]=-1 
Out_in.iloc[4,0]=-1
for i in range(k):
    Net_in.iloc[0,0]=data_te.iloc[i,0]
    Net_in.iloc[1,0]=data_te.iloc[i,1]
    real=data_te.iloc[i,2]
    for t in range(0,4):
        Out_in.iloc[t,0]=sigmoid(sum(W_mid.iloc[:,t]*Net_in.iloc[:,0]))
    res=sigmoid(sum(Out_in.iloc[:,0]*W_out.iloc[:,0]))
    error_te.append(abs(res-real))
plt.plot(error_te)#训练集上每一轮的平均误差
plt.show()
plt.close()


        

该代码使用了DataFrame模块,运行速度会很慢,训练1000次需要30多分钟,可用数组代替,效率会快很多 

运行结果如下:

 

 

 

2.求出预测值

题如下:

代码如下:

 

 

# -*- coding: utf-8 -*-
"""
Created on Sun Mar 13 20:56:46 2022

@author: L
"""

import math
import pandas as pd
import numpy  as np
import matplotlib.pyplot as plt
from pandas import DataFrame,Series
#定义函数:
def sigmoid(x):
    return  1/(1+math.exp(-x))
#读取数据
data_tr=pd.read_csv('D:\\text_class\\data_tr_else.txt')
data_te=pd.read_csv('D:\\text_class\\data_te_else.txt')
#设置学习量
yita=0.3
#设置结构
Err = []
Net_in=DataFrame(0.6,index=['input1','input2','theata'],columns=['a'])
Out_in=DataFrame(0,index=['input1','input2','input3','input4','theata'],columns=['a'])
Net_in.iloc[2,0]=-1 
Out_in.iloc[4,0]=-1
W_mid=DataFrame(0.5,index=['input1','input2','theata'],columns=['mid1','mid2','mid3','mid4'])
W_out=DataFrame(0.5,index=['input1','input2','input3','input4','theata'],columns=['a'])
W_mid_delta=DataFrame(0,index=['input1','input2','theata'],columns=['mid1','mid2','mid3','mid4'])
W_out_delta=DataFrame(0,index=['input1','input2','input3','input4','theata'],columns=['a'])
#设置训练
n=len(data_tr)
for j in range(1000):
    error = []
    for i in range(n):
        Net_in.iloc[0,0]=data_tr.iloc[i,0]
        Net_in.iloc[1,0]=data_tr.iloc[i,1]
        real=data_tr.iloc[i,2]
        for t in range(0,4):
            Out_in.iloc[t,0]=sigmoid(sum(W_mid.iloc[:,t]*Net_in.iloc[:,0]))
        res=sigmoid(sum(Out_in.iloc[:,0]*W_out.iloc[:,0]))
        error.append(abs(res-real))
        print(i,':',res,real)
        W_out_delta.iloc[:,0]=yita*res*(1-res)*(real-res)*Out_in.iloc[:,0]
        W_out_delta.iloc[4,0]=-(yita*res*(1-res)*(real-res))
        W_out=W_out+W_out_delta
        for g in range(0,4):
            W_mid_delta.iloc[:,g]=yita*Out_in.iloc[g,0]*(1-Out_in.iloc[g,0])*W_out.iloc[g,0]*res*(1-res)*(real-res)*Net_in.iloc[:,0]
            W_mid_delta.iloc[2,g]=-(yita*Out_in.iloc[g,0]*(1-Out_in.iloc[g,0])*W_out.iloc[g,0]*res*(1-res)*(real-res))
        W_mid=W_mid+W_mid_delta
    Err.append(np.mean(error))
print(W_mid,W_out)
plt.plot(Err)#训练集上每一轮的平均误差
plt.show()
plt.close()
#测试部分


k=len(data_te)
Net_in.iloc[2,0]=-1 
Out_in.iloc[4,0]=-1
for i in range(k):
    Net_in.iloc[0,0]=data_te.iloc[i,0]
    Net_in.iloc[1,0]=data_te.iloc[i,1]
    for t in range(0,4):
        Out_in.iloc[t,0]=sigmoid(sum(W_mid.iloc[:,t]*Net_in.iloc[:,0]))
    res=sigmoid(sum(Out_in.iloc[:,0]*W_out.iloc[:,0]))
    print('第'+str(i)+'个预测值为: '+str(res))

        

 运行结果如下:

 

3008 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值