neuralNet,支持训练集一起输入训练,之前是一个个训练。。没利用好numpy的单指令多数据优化

import math

import numpy
import funcSet


def getLoss(errOut, corOut):
    return numpy.sum(1 / 2 * (errOut - corOut) ** 2)


class NeuralLine:
    def __init__(self, inputLen, hiddenLen, activeFunc, studyRate):
        self.matrix = numpy.random.rand(inputLen, hiddenLen)
        self.outputs = None
        self.inherent = numpy.zeros((1, hiddenLen))
        self.activeFunc = activeFunc
        self.studyRate = studyRate
        self.next = None
        self.pre = None
        self.d_loss_ho = None

    def input(self, inputs):
        self.outputs = numpy.dot(inputs, self.matrix)
        self.outputs += self.inherent
        self.outputs = self.activeFunc.func(self.outputs)

    def update(self, inputs):
        d_ho_h = self.activeFunc.derivativeFunc(self.outputs)

        d_loss_h = self.d_loss_ho * d_ho_h

        self.inherent -= self.studyRate * numpy.sum(d_loss_h, axis=0, keepdims=True)

        # 这里比较烦,不过相乘后矩阵行列数对了,正好就对了
        d_loss_matrix = numpy.dot(inputs.T, d_loss_h)

        self.matrix -= self.studyRate * d_loss_matrix

    def backpropagation(self, d_loss_ho):
        self.d_loss_ho = d_loss_ho
        # return numpy.sum(self.matrix * d_loss_ho, axis=1)
        # 这里也很烦,不过相乘后矩阵行列数对了,正好就对了
        return numpy.dot(d_loss_ho, self.matrix.T)


class NeuralNet:
    def __init__(self, inputLen, neuralLenArray, funcArray, studyRateArray):
        self.netHead = NeuralLine(inputLen, neuralLenArray[0], funcArray[0], studyRateArray[0])
        self.netEnd = self.netHead
        for i in range(1, len(neuralLenArray)):
            self.netEnd.next = NeuralLine(neuralLenArray[i - 1], neuralLenArray[i], funcArray[i], studyRateArray[i])
            self.netEnd.next.pre = self.netEnd
            self.netEnd = self.netEnd.next

    def input(self, inputs):
        temp = self.netHead
        temp.input(inputs)
        temp = temp.next
        while temp is not None:
            temp.input(temp.pre.outputs)
            temp = temp.next

    def train(self, inputs, outputs):
        circuCount = 0
        while circuCount < 10000:
            self.setTrain(inputs, outputs)
            cost = getLoss(self.netEnd.outputs, outputs)
            circuCount += 1
            if cost < 1e-10:
                break
        print('circuCount = %d, cost = ' % circuCount + str(cost))

    def setTrain(self, inputs, outputs):
        self.input(inputs)
        d_loss_o = self.netEnd.outputs - outputs
        temp = self.netEnd
        d_loss_o = temp.backpropagation(d_loss_o)
        temp = temp.pre
        while temp is not None:
            d_loss_o = temp.backpropagation(d_loss_o)
            temp = temp.pre
        temp = self.netHead
        temp.update(inputs)
        temp = temp.next
        while temp is not None:
            temp.update(temp.pre.outputs)
            temp = temp.next


def test():
    inputs = numpy.array([[0, 1], [1, 0], [0, 0], [1, 1]])
    outputs = numpy.array([[1], [1], [0], [0]])
    neuralLenArray = [4, len(outputs[0])]
    funcArray = [funcSet.reLuSet, funcSet.straightSet]
    studyRateArray = [0.1, 0.1]
    nnet = NeuralNet(len(inputs[0]), neuralLenArray, funcArray, studyRateArray)
    nnet.train(inputs, outputs)
    print(nnet.netEnd.outputs)


if __name__ == '__main__':
    test()

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值