Backpropagation
Backpropagation is a method used in artificial neural networks to calculate a gradient that is needed in the calculation of the weights to be used in the network. It is commonly used to train deep neural networks, a term referring to neural networks with more than one hidden layer.
import math
import random
import string
import matplotlib.pyplot as plt
import numpy as np
random.seed(0)
rand(a, b)保证随机数在[a,b]范围内
def rand(a, b):
return (b-a)*random.random() + a
print(rand(6, 10))
9.377687406100193
生成I*J的矩阵。
def makeMatrix(I, J, fill=0.0):
m = []
for i in range(I):
m.append([fill]*J)
return m
m=makeMatrix(3, 5, 4.87)
print(m)
[[4.87, 4.87, 4.87, 4.87, 4.87], [4.87, 4.87, 4.87, 4.87, 4.87], [4.87, 4.87, 4.87, 4.87, 4.87]]
定义激活函数
def sigmoid(x):
return math.tanh(x)
def dsigmoid(y):
return 1.0 - y**2
画出来瞧瞧
numpy.arange() 参数的含义:起点,终点,间隔。
numpy.linspace():起点,终点,以及点个数。
Xaxis = np.arange(-10., 10., 0.2)
Yaxis = []
Zaxis = []
for i in Xaxis:
Yaxis.append(sigmoid(i))
Zaxis.append(dsigmoid(i))
plt.plot(Xaxis, Yaxis)
[<matplotlib.lines.Line2D at 0x2f64a12f278>]
plt.plot(Xaxis, Zaxis)
[<matplotlib.lines.Line2D at 0x2f64a1d1a20>]
创建NN
一开始的神经网络权重为随机数
class neuralNetwork:
"""三层BP网络"""
def __init__(self, ni, nh, no):
# 输入层 隐藏层 输出层
self.ni = ni + 1
self.nh = nh
self.no = no
# 激活神经网络的所有节点(向量)
self.ai = [1.0]*self.ni
self.ah = [1.0]*self.nh
self.ao = [1.0]*self.no
# 权重矩阵
self.wi = makeMatrix(self.ni, self.nh)
self.wo = makeMatrix(self.nh, self.no)
# 设置随机值
for i in range(self.ni):
for j in range(self.nh):
self.wi[i][j] = rand(-0.2, 0.2)
for j in range(self.nh):
for k in range(self.no):
self.wo[j][k] = rand(-2.0, 2.0)
# 最后建立动量因子(矩阵)
self.ci = makeMatrix(self.ni, self.nh)
self.co = makeMatrix(self.nh, self.no)
def weights(self):
print('输入层权重:')
for i in range(self.ni):
print(self.wi[i])
print()
print('输出层权重:')
for j in range(self.nh):
print(self.wo[j])
def nodes(self):
print('各层')
print(self.ai)
print(self.ah)
print(self.ao)
n = neuralNetwork(2, 2, 1)
n.weights()
输入层权重:
[0.10318176117612099, -0.031771367667662004]
[-0.09643329988281467, 0.004509888547443414]
[-0.03802634501983429, 0.11351943561390904]
输出层权重:
[-0.7867490956842902]
[-0.09361218339057675]
有输入数据进行训练可以得到不一样的权重
def update(self, inputs):
if len(inputs) != self.ni - 1:
raise ValueError('与输入层节点数不符!')
# 激活输入层
for i in range(self.ni-1):
self.ai[i] = inputs[i]
# 激活隐藏层
for j in range(self.nh):
sum = 0.0
for i in range(self.ni):
sum = sum + self.ai[i] * self.wi[i][j]
self.ah[j] = sigmoid(sum)
# 激活输出层
for k in range(self.no):
sum = 0.0
for j in range(self.nh):
sum = sum + self.ah[j] * self.wo[j][k]
self.ao[k] = sigmoid(sum)
return self.ao[:]
neuralNetwork.update = update
data = [
[0, 0],
[0, 1],
[1, 0],
[1, 1]
]
for x in data:
print(