最终版神经网络实现
neuralNetwork.py
import math
import numpy
import funcSet
def getLoss(errOut, corOut):
return numpy.sum(1 / 2 * (errOut - corOut) ** 2)
class neuralLine:
def __init__(self, inputLen, hiddenLen, activeFunc, studyRate):
self.matrix = numpy.random.rand(inputLen, hiddenLen)
self.output = None
self.inherent = numpy.random.randn(hiddenLen)
self.activeFunc = activeFunc
self.studyRate = studyRate
self.next = None
self.pre = None
self.d_loss_ho = None
def input(self, inp):
self.output = numpy.dot(inp, self.matrix)
self.output += self.inherent
self.output = self.activeFunc.func(self.output)
def update(self, inp):
d_ho_h = self.activeFunc.derivativeFunc(self.output)
d_loss_h = self.d_loss_ho * d_ho_h
self.inherent -= self.studyRate * d_loss_h
d_loss_inToH = numpy.dot(inp.reshape(len(inp), 1), d_loss_h.reshape(1, len(d_loss_h)))
self.matrix -= self.studyRate * d_loss_inToH
def backpropagation(self, d_loss_ho):
self.d_loss_ho = d_loss_ho
return numpy.sum(self.matrix * d_loss_ho, axis=1)
class neuralNet:
def __init__(self, inputLen, neuralLenArray, funcArray, studyRateArray):
self.netHead = neuralLine(inputLen, neuralLenArray[0], funcArray[0], studyRateArray[0])
self.netEnd = self.netHead
for i in range(1, len(neuralLenArray)):
self.netEnd.next = neuralLine(neuralLenArray[i - 1], neuralLenArray[i], funcArray[i], studyRateArray[i])
self.netEnd.next.pre = self.netEnd
self.netEnd = self.netEnd.next
def input(self, inp):
temp = self.netHead
temp.input(inp)
temp = temp.next
while temp is not None:
temp.input(temp.pre.output)
temp = temp.next
def train(self, inputs, outputs):
circuCount = 0
while circuCount < 10000:
cost = 0
for i in range(len(inputs)):
# start single input train
self.singleTrain(inputs[i], outputs[i])
cost += getLoss(self.netEnd.output, outputs[i])
circuCount += 1
if cost < 0.000001:
break
print('circuCount = %d, cost = ' % circuCount + str(cost))
def singleTrain(self, inp, output):
self.input(inp)
d_loss_o = self.netEnd.output - output
temp = self.netEnd
d_loss_o = temp.backpropagation(d_loss_o)
temp = temp.pre
while temp is not None:
d_loss_o = temp.backpropagation(d_loss_o)
temp = temp.pre
temp = self.netHead
temp.update(inp)
temp = temp.next
while temp is not None:
temp.update(temp.pre.output)
temp = temp.next
if __name__ == '__main__':
inputs = numpy.array([[0, 1], [-1, 0], [0, 0], [1, -1]])
outputs = numpy.array([[0, 1], [-1, 0], [0, 0], [1, -1]])
neuralLenArray = [4, 2]
funcArray = [funcSet.straightSet, funcSet.straightSet]
studyRateArray = [0.1, 0.1]
nnet = neuralNet(len(inputs[0]), neuralLenArray, funcArray, studyRateArray)
nnet.train(inputs, outputs)
print(nnet.netEnd.output)
funcSet.py
import math
import numpy
class FuncSet:
def __init__(self, func, derivativeFunc):
self.name = str(func)
self.func = func
self.derivativeFunc = derivativeFunc
def d_sigmoid(x):
return x * (1 - x)
def sigmoid(x):
return 1 / (1 + math.e ** -x)
def tanh(x):
a = math.e ** x
b = math.e ** -x
return (a - b) / (a + b)
def d_tanh(x):
return 1 - x ** 2
def straight(x):
return x
def d_straight(x):
return 1
def reLu(x):
return numpy.maximum(0, x)
def d_reLu(x):
y = x.copy()
for i in range(len(x)):
if y[i] > 0:
y[i] = 1
else:
y[i] = 0
return y
sigmoidSet = FuncSet(sigmoid, d_sigmoid)
straightSet = FuncSet(straight, d_straight)
tanhSet = FuncSet(tanh, d_tanh)
reLuSet = FuncSet(reLu, d_reLu)
allBackAbleSets = [sigmoidSet, straightSet, tanhSet]