简单的前向传播模型实现(四层神经网络),菜鸟用于交流

#------------------------------------------------------------
#  ## forward spread algorthim with mathmatic
#  ## CopyRight:Jinlun
#-----------------------------------------------------------#
#!-*-coding:utf-8-*-

import numpy as np
import math
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import pandas as pd
import cv2

# A simple nerve layer and you can use session in tensorflow
# instead of it , and it is more effiencient


#单个神经层,由一组神经元组成
class nerve_layer(object):

    #初始化神经元
    def __init__(self,InvariableNum,OutvariableNum):
        self.nerves = np.random.rand(OutvariableNum,InvariableNum)
        self.errors = []
        self.Out = []
        self.Var = []
    
    #设为激活函数存在
    def sigmoid(self,vec):
        res = []
        for i in vec:
            t = 1 / (1 + math.exp(-1 * i))
            res.append(t)
        return res

    #前向传播模型
    def forwardRes(self,InVarible):
        self.Var = InVarible
        res = np.dot(self.nerves ,InVarible)
        Res = self.sigmoid(res)
        size = np.size(Res)
        Res = np.reshape(Res,[size,1])
        self.Out = Res
        return Res

    #反向传播模型
    def backError(self,varBefore,errorForward,n,flag):
        #Nerves[1].nerves[:,1].reshape(4,1)
        self.errors = []
        if flag == 0:
            for i in range(n):
                length = len(varBefore[:,0])
                err = sum(np.dot(errorForward,varBefore[:,i].reshape(length,1)))
                error = err * self.Out[i,0] * (1 - self.Out[i,0])
                self.errors.append(error)
        else:
            for i in range(n):
                err = (errorForward[i] * self.Out[i,0] * ( 1 - self.Out[i,0]))[0]
                self.errors.append(err)
        return self.errors,self.nerves

#多层前向传播
def forwardSpread(Input,NerverLayers,nLayer,Y):
    Out = NerverLayers[0].forwardRes(Input)
    for i in range(1,nLayer):
        Input = Out
        Out = NerverLayers[i].forwardRes(Input)
    return Y - Out

#多层反向传播积累误差
def backSpread( nerverLayers,nLayer,E):
    for x in range(nLayer,-1,1):
        if x == nLayer:
            err,ner = nerverLayers[x].backError(0,E,len(nerverLayers[x].Var) ,1)
        else:
            err,ner = nerverLayers[x].backError(ner,err,len(nerverLayers[x].Var),0)
    return nerverLayers

#更新神经元
def updateNerve(nerveLayers,lamda):
    n = len(nerveLayers)
    for i in range(1,n):
        for x in range(4):
            nerveLayers[i].nerves[x,:] =  nerveLayers[i].nerves[x,:] - lamda * nerveLayers[i].nerves[x,:]
    return nerveLayers

#按结果显示图片
def update_point(num):
    fig_points.set_data(data[:, 0:num])
    return fig_points,

if __name__ == '__main__':

    fig1 = plt.figure()
    fig_points, = plt.plot([], [], 'x')
    Nerves = []
    Y = np.array([0,1,0,1])
    Y = Y.reshape((4,1))
    Nerve1 = nerve_layer(3,4)
    varible = np.random.rand(3,1)
    Nerves.append(Nerve1)
    for i in range(3):
        nNerver = nerve_layer(4,4)
        Nerves.append(nNerver)
    E = forwardSpread(varible,Nerves,len(Nerves),Y)

    summ = []

    for i in range(100):
        backSpread(Nerves,4,E)
        updateNerve(Nerves, 0.04)
        E = forwardSpread(varible, Nerves, len(Nerves), Y)
        summer = 0.0
        for t in E:
            #向量的内积
            summer = summer + t[0] * t[0]
        summ.append(summer)
    plt.xlim(1,100)
    plt.ylim(0,2)

    x = list(np.arange(1,101,1,int))
    data = [x,summ]
    data = np.array(data)
    anim = animation.FuncAnimation(fig1, update_point, 100)
    plt.title(u'神经网络', fontproperties='simhei')
    plt.xlabel(u'训练次数', fontproperties='simhei')
    plt.ylabel(u'误差', fontproperties='simhei')

    plt.show()


评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值