BP算法

from numpy import *
import numpy as np
class net:
    layernum=0
    weight=[]
    bias=[]
    delta=[]
    z=[]
    a=[]
    lrate=0.5
    batchsize=1
    deltabias=[]
    deltaweight=[]
    def __init__(self,layer,lrate,batchsize):
        self.layernum=len(layer)
        self.lrate=lrate
        self.batchsize=batchsize
        i=0
        self.weight.append(random.randn(1,1))
        self.deltaweight.append(random.randn(1,1))
        for x in layer:
            self.a.append(random.randn(x,1))
            self.bias.append(random.randn(x,1))
            self.deltabias.append(random.rand(x,1))
            self.delta.append(random.randn(x,1))
            self.z.append(random.randn(x,1))
            if(i>0):
                self.weight.append(random.randn(layer[i],layer[i-1]))
                self.deltaweight.append(random.randn(layer[i], layer[i - 1]))
            i+=1
    def activate(self,z):
        return 1.0 / (1.0 + exp(-z))
        #a=zeros(z.shape)
        #a[z>0]=z[z>0]
        #return a
    def dactivate(self,z):
        return self.activate(z) * (1 - self.activate(z))
        #a=zeros(z.shape)
        #a[z>0]=1
        #return a
    def cost(self,yi,yo):
        yi=yi.reshape(size(yi), 1)
        return yo-yi
    def feedforward(self,inputdata):
        inputdata=inputdata.reshape(size(inputdata), 1)
        self.a[0]=inputdata
        for i in range(1,self.layernum):
            self.z[i]=dot(self.weight[i],self.a[i-1])+self.bias[i]
            self.a[i]=self.activate(self.z[i])
    def backforward(self,cost):
        self.delta[self.layernum-1]=cost*self.dactivate(self.z[self.layernum-1])
        for i in range(self.layernum-2,0,-1):
            self.delta[i]=dot(self.weight[i+1].T,self.delta[i+1])*self.dactivate(self.z[i])
    def train(self,idata,odata,epoch,dsize):
        print('trainning start')
        for i in range(0, epoch):
            print('epoch:%d'%i)
            for m in range(200):
                xt1 = int(floor(random.rand(1) * (dsize))+2)
                xt2 = clip(xt1 - self.batchsize, 0, dsize)
                xt2=clip(xt2,0,dsize)
                self.update_minibatch(idata[xt2:xt1], odata[xt2:xt1])
        print('trainning over')
    def update_minibatch(self,idata,odata):
        for i in range(1,self.layernum):
            self.deltabias[i]=zeros(self.bias[i].shape)
            self.deltaweight[i]=zeros(self.weight[i].shape)
        for i in range(0,len(idata)):
            self.feedforward(idata[i])
            costs = self.cost(odata[i], self.a[self.layernum - 1])
            self.backforward(costs)
            for ii in range(self.layernum - 1, 0, -1):
                self.deltabias[ii] += self.delta[ii]
                self.deltaweight[ii]+=dot(self.delta[ii],self.a[ii-1].T)
        for i in range(1, self.layernum):
            self.bias[i]-=self.deltabias[i]*self.lrate/self.batchsize
            self.weight[i]-=self.deltaweight[i]*self.lrate/self.batchsize
    def predicate(self,data):
        self.feedforward(data)
        return self.a[-1]
    def test(self,idata,odata):
        sum=0.
        m=len(idata)
        for i in range(0,len(idata)-1):
            result=self.predicate(idata[i])
            otemp=odata[i]
            if(argmax(result)==argmax(otemp.reshape(size(otemp),1))):
                sum+=1.
        return sum/m



import py5
from numpy import *
idata=[array([[0],[0]]),array([[0],[1]]),array([[1],[0]]),array([[1],[1]])]
odata=[array([0.79]),array([0.15]),array([0.53]),array([0.84])]
net1=py5.net([2,8,1],3,2)
net1.train(idata,odata,200,4)
print(net1.predicate(idata[0]))
print(net1.predicate(idata[1]))
print(net1.predicate(idata[2]))
print(net1.predicate(idata[3]))


评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值