import numpy
import funcSet
def get_d_loss_o(errOut, corOut):
return (errOut - corOut) / len(errOut[0])
def getCost(errOut, corOut):
return numpy.sum(1 / 2 * (errOut - corOut) ** 2) / len(errOut) / len(errOut[0])
class NeuralLine:
def __init__(self, inputLen, hiddenLen, activeFunc, studyRate):
self.matrix = numpy.random.rand(inputLen, hiddenLen)
self.outputs = None
self.bias = numpy.zeros((1, hiddenLen))
self.activeFunc = activeFunc
self.studyRate = studyRate
self.next = None
self.pre = None
self.d_loss_h = None
self.vDw = numpy.zeros((inputLen, hiddenLen))
self.sDw = numpy.zeros((inputLen, hiddenLen))
self.vDb = numpy.zeros((1, hiddenLen))
self.sDb = numpy.zeros((1, hiddenLen))
def input(self, inputs):
self.outputs = numpy.dot(inputs, self.matrix)
self.outputs += self.bias
self.outputs = self.activeFunc.func(self.outputs)
def adamUpdate(self, inputs, step):
db = numpy.sum(self.d_loss_h, axis=0, keepdims=True)
self.vDb = 0.9 * self.vDb + 0.1 * db
self.sDb = 0.9 * self.sDb + 0.1 * db ** 2
cor = 1 / numpy.sqrt(1 - 0.9 ** step)
self.bias -= self.studyRate * cor * self.vDb / numpy.sqrt(self.sDb + 1e-10)
dw = numpy.dot(inputs.T, self.d_loss_h)
self.vDw = 0.9 * self.vDw + 0.1 * dw
self.sDw = 0.9 * self.sDw + 0.1 * dw ** 2
self.matrix -= self.studyRate * cor * self.vDw / numpy.sqrt(self.sDw + 1e-10)
def update(self, inputs, step, optimize='adam'):
if optimize == 'adam':
return self.adamUpdate(inputs, step)
db = numpy.sum(self.d_loss_h, axis=0, keepdims=True)
self.bias -= self.studyRate * db
dw = numpy.dot(inputs.T, self.d_loss_h)
self.matrix -= self.studyRate * dw
def backpropagation(self, d_loss_ho):
d_ho_h = self.activeFunc.derivativeFunc(self.outputs)
self.d_loss_h = d_loss_ho * d_ho_h
return numpy.dot(self.d_loss_h, self.matrix.T)
class NeuralNet:
def __init__(self, inputLen, neuralLenArray, funcArray, studyRateArray):
self.netHead = NeuralLine(inputLen, neuralLenArray[0], funcArray[0], studyRateArray[0])
self.netEnd = self.netHead
self.step = 0
for i in range(1, len(neuralLenArray)):
self.netEnd.next = NeuralLine(neuralLenArray[i - 1], neuralLenArray[i], funcArray[i], studyRateArray[i])
self.netEnd.next.pre = self.netEnd
self.netEnd = self.netEnd.next
def input(self, inputs):
temp = self.netHead
temp.input(inputs)
temp = temp.next
while temp is not None:
temp.input(temp.pre.outputs)
temp = temp.next
def updateStudyRate(self, rate):
temp = self.netHead
while temp is not None:
temp.studyRate *= rate
temp = temp.next
def train(self, inputs, outputs):
oldCost = 0
while self.step < 10000:
self.step += 1
self.setTrain(inputs, outputs)
cost = getCost(self.netEnd.outputs, outputs)
if cost < 1e-10:
break
if 0 < oldCost < cost:
self.updateStudyRate(0.8)
print('oldCost < cost!!!', oldCost, ': ', cost, ' step: ', self.step, ' studyRate: ', self.netHead.studyRate)
oldCost = cost
print('self.step = %d, cost = ' % self.step + str(cost))
def setTrain(self, inputs, outputs):
self.input(inputs)
d_loss_o = get_d_loss_o(self.netEnd.outputs, outputs)
temp = self.netEnd
d_loss_o = temp.backpropagation(d_loss_o)
temp = temp.pre
while temp is not None:
d_loss_o = temp.backpropagation(d_loss_o)
temp = temp.pre
temp = self.netHead
temp.update(inputs, self.step)
temp = temp.next
while temp is not None:
temp.update(temp.pre.outputs, self.step)
temp = temp.next
def test(self, inputs):
self.input(inputs)
print(self.netEnd.outputs)
def test():
inputs = numpy.array([[0, 1], [1, 0], [0, 0], [1, 1], [0, 2], [2, 0], [-2, -2], [2, 2]])
outputs = numpy.array([[1], [1], [0], [0], [1], [1], [0], [0]])
neuralLenArray = [4, len(outputs[0])]
funcArray = [funcSet.tanhSet, funcSet.straightSet]
studyRateArray = [0.2, 0.1]
nnet = NeuralNet(len(inputs[0]), neuralLenArray, funcArray, studyRateArray)
print(nnet.netHead.matrix)
nnet.train(inputs, outputs)
print(nnet.netHead.matrix)
print(nnet.netEnd.outputs)
if __name__ == '__main__':
test()