公式推导 ![](https://img-blog.csdnimg.cn/20210722163724438.jpg?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L21heWhoaGhoaGg=,size_16,color_FFFFFF,t_70)
挑一个好瓜
导入包
import pandas as pd
import numpy as np
np.set_printoptions(suppress=True)#不要科学计数法
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
seed = 2020
import random
np.random.seed(seed) # Numpy module.
random.seed(seed) # Python random module.
plt.rcParams['font.sans-serif'] = ['SimHei'] #用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False #用来正常显示负号
plt.close('all')
#定义Sigmoid函数
def sigmoid(x):
x=np.array(x,dtype=np.float64)
return 1/(1+np.exp(-x))
#对Sigmoid函数求导
def d_sigmoid(x):
return x*(1-x)
def preprocess(data):
#将非数映射数字
for title in data.columns:
if data[title].dtype=='object':
encoder = LabelEncoder()
#将离散型的数据转换成 0到n−1之间的数,这里n是一个列表的不同取值的个数,可以认为是某个特征的所有不同取值的个数。
原文链接:https://blog.csdn.net/lw_power/article/details/82981122
data[title] = encoder.fit_transform(data[title])
#fit_transform是fit和transform的组合,既包括了训练又包含了转换。transform()和fit_transform()二者的功能都是对数据进行某种统一处理(比如标准化~N(0,1),将数据缩放(映射)到某个固定区间,归一化,正则化等)
#去均值和方差归一化
ss = StandardScaler()
X = data.drop('好瓜',axis=1)
Y = data['好瓜']
X = ss.fit_transform(X)
x,y = np.array(X),np.array(Y).reshape(Y.shape[0],1)
return x,y
BP公式
def accumulate_BP(x,y,dim=10,eta=0.8,max_iter=500):
n_samples = x.shape[0]
w1 = [[random.random() for i in range(dim)] for j in range(x.shape[1])]
b1 = [random.random() for i in range(dim)]
w2 = [[random.random() for i in range(1)] for j in range(dim)]
b2 = [random.random() for i in range(1)]
losslist = []
for ite in range(max_iter):
##前向传播
u1 = np.dot(x,w1)-b1
out1 = sigmoid(u1)
u2 = np.dot(out1,w2)-b2
out2 = sigmoid(u2)
loss = np.mean(np.square(y - out2))/2
losslist.append(loss)
print('iter:%d loss:%.4f'%(ite,loss))
#补充反向传播代码
g=out2*(1-out2)*(y-out2) #书上式(5.10)
e=out1*(1-out1)*((np.dot(w2, g.T)).T)#书上式(5.15)
d_w1=-np.dot(x.T,e)#书上式(5.13)
d_w2=-np.dot(out1.T,g)#书上式(5.11)
d_b1=e#书上式(5.14)
d_b2=g#书上式(5.12)
##更新
w1 = w1 - eta*d_w1
w2 = w2 - eta*d_w2
b1 = b1 - eta*d_b1
b2 = b2 - eta*d_b2
plt.figure()
##Loss可视化代码
plt.plot(losslist)
plt.show()
return w1,w2,b1,b2
def standard_BP(x,y,dim=10,eta=0.8,max_iter=500):
n_samples = 1
w1 = [[random.random() for i in range(dim)] for j in range(x.shape[1])]
b1 = [random.random() for i in range(dim)]
w2 = [[random.random() for i in range(1)] for j in range(dim)]
b2 = [random.random() for i in range(1)]
losslist = []
#标准BP算法代码
for i in range(max_iter):
for j in range(x.shape[0]):#标准BP算法针对每一个样例
alpha=np.dot(x[j],w1)#隐层神经元的输入
b=sigmoid(alpha-b1)#隐层的输出
beta=np.dot(b,w2)#输出神经元的输入
predictY=sigmoid(beta-b2)#预测值
E=sum(((predictY-y[j])**2)/2)#损失值
print(E)
losslist.append(E)
g=predictY*(1-predictY)*(y[j]-predictY)#书上式(5.10)
e=b*(1-b)*((np.dot(w2,g.T)).T)#书上式(5.15)
w2+=eta*np.dot(b.reshape((dim,1)),g.reshape((1,1)))#书上式(5.11)
b2-=eta*g#书上式(5.12)
w1+=eta*np.dot(x[j].reshape((x.shape[1],1)),e.reshape((1,dim)))#书上式(5.13)
b1-=eta*e#书上式(5.14)
#Loss可视化代码
plt.plot(losslist)
plt.show()
return w1,w2,b1,b2
data = pd.read_table('./watermelon30.txt',delimiter=',')
data.drop('编号',axis=1,inplace=True)
x,y = preprocess(data)
dim = 10
w1,w2,b1,b2 = accumulate_BP(x,y,dim)
#测试代码,根据当前的x,预测其类别;
def predict(x,w1,w2,b1,b2):
alpha = np.dot(x, w1) #隐层输入
b=sigmoid(alpha-b1)#隐层输出
beta=np.dot(b,w2) #输出层输入
predictY=sigmoid(beta-b2) #神经网络输出
return predictY
print(predict(x,w1,w2,b1,b2))
对BP神经网络前传和误差反向传播的概述和理解
BP算法基本思想:学习过程由信号的正向传播(求损失)和误差的反向传播(误差回传)两个过程组成
求误差:根据输入的样本,给定的初始化权重值W和偏置项的值b,计算最终输出值以及输出值与实际值之间的损失值。
回传误差:将输出以某种形式通过隐层向输入层逐层反传,并将误差分摊给各层的所有单元,从而获得各层单元的误差信号,此误差信号即作为修正各单元权值的依据。