#BP实现
##
#made by wanghanpu
##
import numpy as np
import math
import random
InputNum = 2
HiddenNum = 3
OutputNum = 2
#实现数据点的定义
class Node:
def __init__(self,Number):
#该节点的值
self.value = 0
#随机初始化的权重
#nd.arange(4).reshape(1,4)两者的数据类型也什么区别
self.W = (2*np.random.random_sample(Number)-1)*0.01
#print(self.W)
#实现BPNN类的定义
class Net:
def __init__(self):
# 每一层的定义
self.inLayer = [Node(HiddenNum)]
for i in range(1,InputNum):
self.inLayer.append(Node(HiddenNum))
#权重向量的形式是3*5
self.hiddenLayer = [Node(OutputNum)]
for i in range(1,HiddenNum):
self.hiddenLayer.append(Node(OutputNum))
self.outputLayer = [Node(0)]
for i in range(1,OutputNum):
self.outputLayer.append(Node(0))
self.b0 = np.zeros(HiddenNum)
self.b1 = np.zeros(OutputNum)
self.learning_rate = 0.01
self.epoches = 1000
#relu激活函数
def relu(self,x):
if x >= 0:
return x
else:
return 0
#定义损失函数
def loss(self,y_label,y):
loss = 0
for i in range(y):
loss += 1/2 * (y_label[i] - y[i])*(y_label[i] - y[i])
loss = loss / len(y)
def forward(self,input):
#输入层数据
for i in range(InputNum):
self.inLayer[i].value = input[i]
#计算隐含层数据
for i in range(HiddenNum):
k = 0
for j in range(InputNum):
# 用中间变量保存未使用激活函数得到的隐含层输出值
k += self.inLayer[j].value * self.inLayer[j].W[i]
#一轮结束通过激活函数保存为输出层的输入
self.hiddenLayer[i].value = self.relu(k)
#计算输出层数据
for i in range(OutputNum):
p = 0
for j in range(HiddenNum):
p += self.hiddenLayer[j].value * self.hiddenLayer[j].W[i]
self.outputLayer[i].value = self.relu(p)
#进行权重更改
def backward(self,real_result):
#对隐含层--->>到输出层求权值的偏执
#第一步 dloss/dyo求偏导
#y1 = -(real_result - self.outputLayer)
dloss_dyo = np.zeros(shape=(OutputNum, 1))
for i in range(OutputNum): #OutputNum=3
dloss_dyo[i] = - (real_result[i] - self.outputLayer[i].value)
#第二步 dyo/dz2
dyo_dz2 = np.zeros(shape=(OutputNum, 1))
for i in range(OutputNum):
if (self.hiddenLayer[i].value > 0):
dyo_dz2[i] = self.hiddenLayer[i].value
else:
dyo_dz2[i] = 0
#第三步 求dyo_dw2
dyo_dw2 = np.zeros(shape=(HiddenNum,OutputNum))
for i in range(OutputNum):
for j in range(HiddenNum):
#??
dyo_dw2[j][i] = self.hiddenLayer[i].value
#第四步 求dw2 as 3*2
dw2 = np.zeros(shape=(HiddenNum,OutputNum))
for i in range(HiddenNum):
for j in range(OutputNum):
#dw2[0][0] = dloss_dyo[0]*dyo_dz2[0]*dyo_dw2[0][0]
#dw2[1][0] = dloss_dyo[0]*dyo_dz2[0]*dyo_dw2[1][0]
#dw2[2][0] = dloss_dyo[0]*dyo_dz2[0]*dyo_dw2[2][0]
#??
dw2[i][j] = dloss_dyo[0]*dyo_dz2[0]*dyo_dw2[0][0]
# 对输入层--->>到隐含层求权值的偏执
#第一步 求dloss/dz2
dloss_dz2 = np.zeros(shape=(OutputNum,1))
for i in range(OutputNum):
dloss_dz2[i] = dloss_dyo[i]*dyo_dz2[i]
#第二步 求dloss/dh1
dloss_dh1 = np.zeros(shape=(HiddenNum,1))
for i in range(HiddenNum):
#???
dloss_dh1[i] = dloss_dz2[0]*self.hiddenLayer[0].W[0] + dloss_dz2[0]*self.hiddenLayer[0].W[0]
#第三步 求dh1/dz1
dh1_dz1 = np.zeros(shape=(HiddenNum))
for i in range(HiddenNum):
if self.hiddenLayer[i].value>0:
dh1_dz1[i] = self.hiddenLayer[i].value
else:
dh1_dz1[i] = 0
#第4步 求dz1/dw1
dz1_dw1 = np.zeros(shape=(InputNum,1))
for i in range(InputNum):
dz1_dw1[i] = self.inLayer[i].W[i]
#第五步 求dLoss/dw1
dloss_dw1 = np.zeros(shape=(InputNum,OutputNum))
for i in range(InputNum):
for j in range(OutputNum):
dloss_dw1[i][j] = dloss_dh1[j]*dh1_dz1[j]*dz1_dw1[i]
#得到权重 然后进行 w = w - lr*dy/dw
for i in range(OutputNum):
for j in range(HiddenNum):
#??
self.hiddenLayer[0].W[0] = self.hiddenLayer[0].W[0] - self.learning_rate*dw2[0][0]
for i in range(HiddenNum):
for j in range(InputNum):
#??
self.inLayer[j].W[i] = self.inLayer[0].W[0] - self.learning_rate*dloss_dw1[0][0]
#得到偏差 然后 b = b - lr*dv/db
#进行数据集的训练
def train(self,input,real_result,epoches):
for i in range(0,epoches):
self.forward(input)
self.backward(real_result)
if(i % 200 == 0):
print(i)
#输出训练后的测试结果
def printresult(self):
for i in range(InputNum):
print(self.inLayer[i].value)
for i in range(OutputNum):
print(self.inLayer[i].W)
#定义主方法
def main_way():
input = [1,2,3]
real_result = [0.2,0.3,0.5]
BPNN = Net()
#输出开始权值
BPNN.printresult()
BPNN.train(input,real_result,1000)
#输出结束权值
BPNN.printresult()
#调用主方法
main_way()