2022.8.27

最终版神经网络实现

neuralNetwork.py

import math

import numpy
import funcSet


def getLoss(errOut, corOut):
    return numpy.sum(1 / 2 * (errOut - corOut) ** 2)


class neuralLine:
    def __init__(self, inputLen, hiddenLen, activeFunc, studyRate):
        self.matrix = numpy.random.rand(inputLen, hiddenLen)
        self.output = None
        self.inherent = numpy.random.randn(hiddenLen)
        self.activeFunc = activeFunc
        self.studyRate = studyRate
        self.next = None
        self.pre = None
        self.d_loss_ho = None

    def input(self, inp):
        self.output = numpy.dot(inp, self.matrix)
        self.output += self.inherent
        self.output = self.activeFunc.func(self.output)

    def update(self, inp):
        d_ho_h = self.activeFunc.derivativeFunc(self.output)

        d_loss_h = self.d_loss_ho * d_ho_h

        self.inherent -= self.studyRate * d_loss_h

        d_loss_inToH = numpy.dot(inp.reshape(len(inp), 1), d_loss_h.reshape(1, len(d_loss_h)))

        self.matrix -= self.studyRate * d_loss_inToH

    def backpropagation(self, d_loss_ho):
        self.d_loss_ho = d_loss_ho
        return numpy.sum(self.matrix * d_loss_ho, axis=1)


class neuralNet:
    def __init__(self, inputLen, neuralLenArray, funcArray, studyRateArray):
        self.netHead = neuralLine(inputLen, neuralLenArray[0], funcArray[0], studyRateArray[0])
        self.netEnd = self.netHead
        for i in range(1, len(neuralLenArray)):
            self.netEnd.next = neuralLine(neuralLenArray[i - 1], neuralLenArray[i], funcArray[i], studyRateArray[i])
            self.netEnd.next.pre = self.netEnd
            self.netEnd = self.netEnd.next

    def input(self, inp):
        temp = self.netHead
        temp.input(inp)
        temp = temp.next
        while temp is not None:
            temp.input(temp.pre.output)
            temp = temp.next

    def train(self, inputs, outputs):
        circuCount = 0
        while circuCount < 10000:
            cost = 0
            for i in range(len(inputs)):
                # start single input train
                self.singleTrain(inputs[i], outputs[i])
                cost += getLoss(self.netEnd.output, outputs[i])
            circuCount += 1
            if cost < 0.000001:
                break
        print('circuCount = %d, cost = ' % circuCount + str(cost))

    def singleTrain(self, inp, output):
        self.input(inp)
        d_loss_o = self.netEnd.output - output
        temp = self.netEnd
        d_loss_o = temp.backpropagation(d_loss_o)
        temp = temp.pre
        while temp is not None:
            d_loss_o = temp.backpropagation(d_loss_o)
            temp = temp.pre
        temp = self.netHead
        temp.update(inp)
        temp = temp.next
        while temp is not None:
            temp.update(temp.pre.output)
            temp = temp.next


if __name__ == '__main__':
    inputs = numpy.array([[0, 1], [-1, 0], [0, 0], [1, -1]])
    outputs = numpy.array([[0, 1], [-1, 0], [0, 0], [1, -1]])
    neuralLenArray = [4, 2]
    funcArray = [funcSet.straightSet, funcSet.straightSet]
    studyRateArray = [0.1, 0.1]
    nnet = neuralNet(len(inputs[0]), neuralLenArray, funcArray, studyRateArray)
    nnet.train(inputs, outputs)
    print(nnet.netEnd.output)

funcSet.py

import math

import numpy


class FuncSet:
    def __init__(self, func, derivativeFunc):
        self.name = str(func)
        self.func = func
        self.derivativeFunc = derivativeFunc


def d_sigmoid(x):
    return x * (1 - x)


def sigmoid(x):
    return 1 / (1 + math.e ** -x)


def tanh(x):
    a = math.e ** x
    b = math.e ** -x
    return (a - b) / (a + b)


def d_tanh(x):
    return 1 - x ** 2


def straight(x):
    return x


def d_straight(x):
    return 1


def reLu(x):
    return numpy.maximum(0, x)


def d_reLu(x):
    y = x.copy()
    for i in range(len(x)):
        if y[i] > 0:
            y[i] = 1
        else:
            y[i] = 0
    return y


sigmoidSet = FuncSet(sigmoid, d_sigmoid)
straightSet = FuncSet(straight, d_straight)
tanhSet = FuncSet(tanh, d_tanh)
reLuSet = FuncSet(reLu, d_reLu)

allBackAbleSets = [sigmoidSet, straightSet, tanhSet]

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值