defsetup(self, ni, nh, no):
self.input_n = ni + 1self.hidden_n = nh
self.output_n = no
# init cellsself.input_cells = [1.0] * self.input_n
self.hidden_cells = [1.0] * self.hidden_n
self.output_cells = [1.0] * self.output_n
# init weightsself.input_weights = make_matrix(self.input_n, self.hidden_n)
self.output_weights = make_matrix(self.hidden_n, self.output_n)
# random activatefor i in range(self.input_n):
for h in range(self.hidden_n):
self.input_weights[i][h] = rand(-0.2, 0.2)
for h in range(self.hidden_n):
for o in range(self.output_n):
self.output_weights[h][o] = rand(-2.0, 2.0)
# init correction matrixself.input_correction = make_matrix(self.input_n, self.hidden_n)
self.output_correction = make_matrix(self.hidden_n, self.output_n)
定义predict方法进行一次前馈, 并返回输出:
defpredict(self, inputs):
# activate input layerfor i in range(self.input_n - 1):
self.input_cells[i] = inputs[i]
# activate hidden layerfor j in range(self.hidden_n):
total = 0.0for i in range(self.input_n):
total += self.input_cells[i] * self.input_weights[i][j]
self.hidden_cells[j] = sigmoid(total)
# activate output layerfor k in range(self.output_n):
total = 0.0for j in range(self.hidden_n):
total += self.hidden_cells[j] * self.output_weights[j][k]
self.output_cells[k] = sigmoid(total)
returnself.output_cells[:]
定义back_propagate方法定义一次反向传播和更新权值的过程, 并返回最终预测误差:
def back_propagate(self, case, label, learn, correct):
# feed forward
self.predict(case)
# get output layer error
output_deltas = [0.0] * self.output_n
for o in range(self.output_n):
error = label[o] - self.output_cells[o]
output_deltas[o] = sigmod_derivate(self.output_cells[o]) * error
# get hidden layer error
hidden_deltas = [0.0] * self.hidden_n
for h in range(self.hidden_n):
error = 0.0for o in range(self.output_n):
error += output_deltas[o] * self.output_weights[h][o]
hidden_deltas[h] = sigmod_derivate(self.hidden_cells[h]) * error
# update output weights
for h in range(self.hidden_n):
for o in range(self.output_n):
change = output_deltas[o] * self.hidden_cells[h]
self.output_weights[h][o] += learn * change + correct * self.output_correction[h][o]
self.output_correction[h][o] = change
# update input weights
for i in range(self.input_n):
for h in range(self.hidden_n):
change = hidden_deltas[h] * self.input_cells[i]
self.input_weights[i][h] += learn * change + correct * self.input_correction[i][h]
self.input_correction[i][h] = change
# get global errorerror = 0.0for o in range(len(label)):
error += 0.5 * (label[o] - self.output_cells[o]) ** 2returnerror
定义train方法控制迭代, 该方法可以修改最大迭代次数, 学习率λλ三个参数.
def train(self, cases, labels, limit=10000, learn=0.05, correct=0.1):
for i inrange(limit):
error = 0.0for i inrange(len(cases)):
label = labels[i]
case = cases[i]
error += self.back_propagate(case, label, learn, correct)
编写test方法,演示如何使用神经网络学习异或逻辑:
deftest(self):cases= [
[0,0],
[0,1],
[1,0],
[1,1],
]
labels= [[0], [1], [1], [0]]
self.setup(2,5,1)self.train(cases,labels,10000,0.05,0.1)for case in cases:print(self.predict(case))