脉冲神经网络BP

这一段代码是整个过程的宏观表现
在这里插入图片描述

#method to train the neural network, given the training data (inputS) or the input time
#sequence of spikes for the input neurons and the expected spiking times (outputS)
@classmethod
def train(self, network, inputS, outputS, learningR, epochs):
   global learningRate    #学习率
   learningRate = learningR
   lenTimeSeq, inNeurons = inputS.shape      #时间序列长度、


   #this should be done for a number of epochs as well and at the end of each epoch the resetSpikeTimes
   #method should be called      #在每一次迭代的最后重置脉冲时间


   print ('%%%%%%%%%%%%%%%%%%%%Start of simulation%%%%%%%%%%%%%%%%%%%%%%%%')


   for e in range(epochs):
      error = 0
      inputS, outputS = DataProc.shuffleInUnison(inputS, outputS)          #随机打乱inputS和outputS的下标
      # network.displaySNN()
      #inIndex represents the index of the spikes in the training data time sequence
      for inIndex in range(lenTimeSeq):
         inLayer = inputS[inIndex,:]
         print ('The input layer is ', inLayer)
         expSpikes = outputS[inIndex,:]
         print ('The expected spikes are ', expSpikes)
         predSpikes = np.zeros((lenTimeSeq))


         print ('The forward propagation phase started *********')
         predSpikes = self.forwardProp(network, inLayer, expSpikes) #前馈
         print ('The predicted spike times are ++++++++ ', predSpikes)


         scale = 10 if e < 200 else 1


         network = self.backProp(network, expSpikes, inLayer)   #反馈


         network.resetSpikeTimeNet()       #重置


         predSpikes = self.forwardProp(network, inLayer, expSpikes) #反馈后又前馈一次
         print ('The predicted spike times are ++++++++ ', predSpikes)
         # network.displaySNN()
         # network.resetSpikeTimeLayer(network.layers[-1])
         error += self.errorFMSE(expSpikes, predSpikes)    #计算误差


         #the spikes should be reset after each example
         network.resetSpikeTimeNet()


      print ('The error is ', error)
      if error == 0:
         break

然后解析里面的语句 forwaidProp这个语句是关于层的前馈,这个属于先更新一层,然后根据更新的这一层把后面的所有层都更新一次。

#function to simulate a forward pass through the network
@classmethod
def forwardProp(self, network, inLayer, expSpikes):       #前馈    这是整个网络的前馈,所以会以层的传输为主
   global codingInterval  #编码时间间隙
   global timeStep          #时间步长
   noLayers = len(network.layers)    #层数


   # print 'The simulation time is ', time
   time = 0      #仿真时间


   while time <= codingInterval:
      #compute the update for the first layer, using the input spikes
      updatedLayer = self.forwardPropL(inLayer, network.layers[0], 0, time)  #返回的是一个对象,是一个异步神经元类型
      network.layers[0] = updatedLayer   #更新层变成当前层


      for layer in range(1,noLayers):       #传递到最后一层
         # print 'layer is ', layer, time
         #check for updates for the layers of the network
         updatedLayer = self.forwardPropL(network.layers[layer-1], network.layers[layer], layer, time)
         network.layers[layer] = updatedLayer
      time += timeStep


   predSpikes = SNNetwork.getFireTimesLayer(network.layers[-1])
   return predSpikes

这一段属于单层的前馈,就是具体的前一层对后一层的影响,这里最终改变网络电势的是actionPot这个函数。

#returns the currentLayer whose spiking times have been updated after the signal has gone through
#the layer    #返回  信号通过后在当前层发生脉冲时间更新的神经元
@classmethod
def forwardPropL(self, prevLayer, currLayer, lNo, time):      #这是单层的前馈
   #when there is only one neuron on the layer, an object is returned instead of an array, so
   #recast it
   if type(currLayer) is not np.ndarray:  #把当前层转换成array形式
      tmp = [currLayer]
      currLayer = np.asarray(tmp)


   # print 'the current layer is ', currLayer
   noNeurons = currLayer.shape       #返回当前层的大小


   #get the number of terminals in the network
   noTerm = currLayer[0].getNTerminals()  #返回终止节点数


   #check on which iteration we are, if we are computing the forward pass of the first layer then use
   #the input data which will just be int or floats, otherwise check for AsyncSN objects
   if isinstance(prevLayer[0], AsyncSN):     #如果前层也是异步神经元的话  就运行下面
      preSNFTime = SNNetwork.getFireTimesLayer(prevLayer)       #得到前一层的最后一次脉冲发放时间
      preSNTypes = SNNetwork.getTypesLayer(prevLayer)          #
   else:     #第一层
      preSNFTime = prevLayer
      preSNTypes = np.ones(prevLayer.size)


   # print 'The presynaptic types and firing times are ', preSNTypes, preSNFTime


   #simulate the passes through the network
   for n in range(noNeurons[0]):
      currLayer[n].actionPot(preSNFTime, time, preSNTypes)      #获得动作电势
         
   # print 'The updated firing times of the current layer are ', SNNetwork.getFireTimesLayer(currLayer)


   return currLayer

这里的函数属于模拟前馈公式的

那简单神经网络的向传播算法和脉冲神经网络的反向传播算法有什么区别呢?普通神经网络的BP算法中间过程它都是可以用公式表示的所以它可以写出它的导数,而脉冲神经网络的反向传播算法,它中间因为有脉冲发放这个机制,所以它无法用公式表示,从而求不出它的导数。所以我们需要通过线性假设,假设一:在脉冲发放时刻的小领域内,二者呈线性关系。膜电势增加,那脉冲发放时间应该减小。因为它离发放点越近。
在这里插入图片描述

假设2:在较小领域内,其斜率为在这里插入图片描述

下面的lifFunction函数就是在这里插入图片描述这个公式,但是它的前面也是(1-t/tao)

#based on the internal state variable of the neuron, check if it is generating an action potential
#(spike) or not
def actionPot(self, preSNFTime, currTime, preSNTypes):    #动作电势
   global threshold
   # self.displaySN()
   if len(self.fireTime) == 0:
      stateVariable = self.intStateVar(preSNFTime, currTime, preSNTypes)
      # print 'The state variable for neuron with presynaptic firing time ', preSNFTime, ' is ', stateVariable
      if stateVariable >= threshold:
         # print '^^^^^^^^^^^^A new spike time was appended ', currTime
         self.fireTime.append(currTime)    

#计算当前神经元的内部状态变量方法,为了去决定神经元是否发放 内部状态变量可能是当前神经元电压
#method to compute the internal state variable of the current neuron, in order to determine if the
#neuron is spiking or not
def intStateVar(self, preSNFTime, currTime, preSNTypes):
   connections = self.getNConnections()   #获得当前的连接矩阵
   stateVariable = 0.0


   for s in range(connections):
      # print 'The presynaptic fr received is ', preSNFTime[s], currTime
      if currTime >= preSNFTime[s] and preSNFTime[s] >= 0:
         #终止节点数
         terminals = self.getNTerminals()
         # print terminals
         for t in range(terminals):
            # print 'the term contribution is ********** ', self.termContr(preSNFTime[s], currTime, self.synapses[s].delays[t],\
                         # preSNTypes[s])
            stateVariable += self.synapses[s].weights[t] \
                        * self.termContr(preSNFTime[s], currTime, self.synapses[s].delays[t],\
                         preSNTypes[s])
    return stateVariable


# preSNFTime 是前层神经元发放时间
def termContr(self, preSNFTime, currTime, termDelay, nType):
   time = float(currTime) - preSNFTime #- termDelay
   return self.lifFunction(time, nType)

#a standard spike response function describing a postsynaptic potential
#creates a leaky-integrate-and-fire neuron
def lifFunction(self, time, nType):       #多了一个nType
   global memPotDecayT
   if time > 0:
      div = float(time) / memPotDecayT
      return div * nType * math.exp(1 - div)
   else:
      return 0

以上就是前馈全过程。

反馈过程其实就是前馈的逆过程
下面这一段是输出层的更新,
在这里插入图片描述
代码段中标红的代码就是yik的计算。

#method that modifies the weights of each neuron using the SpikeProp algorithm and gradient descent
@classmethod
def backProp(self, network, expSpikeT, inLayer):
   global learningRate


   net = deepcopy(network)  #复制网络


   layersNo = len(network.layers)    #网络层数
   neuronsNo = network.layers[layersNo-1].shape   #神经元数


   deltaNeuron = np.zeros((neuronsNo))


   #compute the update for the output layer and store any variables that might be needed for
   #the other layers as well
   for n in range(neuronsNo[0]):
      if network.layers[-1][n].getLastFireTime() != -1:  #如果网络最后一层的某个发放时间不等于-1,说明发放了
         connNo = network.layers[-1][n].getNConnections()   #连接矩阵
         termNo = network.layers[-1][n].getNTerminals()    #获得终端数,就是神经元数


         #array to store the updates for each connection and terminal for the current neuron
         deltaWO = np.zeros((connNo, termNo))


         preSNFTimes = SNNetwork.getFireTimesLayer(network.layers[layersNo-2])  #得到该层的神经元发放时间
         preSNTypes = SNNetwork.getTypesLayer(network.layers[layersNo-2])   #获得该层的类别


         for c in range(connNo):
            if preSNFTimes[c] != -1:
               for t in range(termNo):
                  termContrib =  network.layers[-1][n].termContr(preSNFTimes[c], \
                           network.layers[-1][n].getLastFireTime(), \
                           network.layers[-1][n].synapses[c].delays[t], \
                           preSNTypes[c])

。
                  deltaWO[c,t] = termContrib * (-1) * learningRate
            else:
               deltaWO[c] = np.zeros((termNo))
               # print '------------------', termContrib


         #independent of connection and terminals, store for reuse for hidden layers
         deltaNeuron[n] = self.deltaOutputN(network.layers[-1][n], preSNFTimes, preSNTypes, \
                                    expSpikeT[n])


         # print 'the delta is ', deltaWO
         # print 'the error is ', deltaNeuron[n]
         updates = np.multiply(deltaWO, deltaNeuron[n])


         net.layers[-1][n].updateWeights(updates)   #更新输出层
      else:
         deltaNeuron[n] = 0


      #enable line to do gradient checking
      # self.checkGradient(deltaWO * deltaNeuron[n], \
                  # self.approxGradient(network, inLayer, expSpikeT, n))

标蓝的这一段就是在这里插入图片描述
该公式中除了yik的其余各部分的计算。

#method to compute the delta term of the update for the output layer nodes
@classmethod
def deltaOutputN(self, neuron, preSNFTimes, preSNTypes, expSpikeT):       #计算输出层的反馈导数
   actualSpike = neuron.getLastFireTime()    #实际脉冲发放时间
   error = expSpikeT - actualSpike


   connNo = neuron.getNConnections()
   termNo = neuron.getNTerminals()


   delta = 0.0
   # neuron.displaySN()


   for c in range(connNo):
      if preSNFTimes[c] != -1:
         for t in range(termNo):
            delta += neuron.synapses[c].weights[t] * neuron.termContrDer(preSNFTimes[c], actualSpike, \
                  neuron.synapses[c].delays[t], preSNTypes[c])


   return float(error)/delta

隐藏层和输出层求导的区别
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

这一步是去l中i的部分对tia进行求导所以是提出跟tia无关项。

这一段是隐藏层更新的方法
在这里插入图片描述

这中间的在这里插入图片描述

红色部分与上面输出层的一样yhk,蓝色部分是剩余的后面部分。

#compute the update for the other layers    #更新隐藏层
for l in range(layersNo-2, -1, -1):
   # print 'backpropagation through layer ', l
   neuronsNo = network.layers[l].shape       #神经元数
   deltaNeuronH = np.zeros((neuronsNo[0]))
   for n in range(neuronsNo[0]):
      if network.layers[l][n].getLastFireTime() != -1:      #证明该神经元发放
         connNo = network.layers[l][n].getNConnections()       #返回突触形状
         termNo = network.layers[l][n].getNTerminals()     #该层延时矩阵形状
         deltaWH = np.zeros((connNo, termNo))


         if l > 0:
            preSNFTimes = network.getFireTimesLayer(network.layers[l-1])
            preSNTypes = SNNetwork.getTypesLayer(network.layers[l-1])
         else:
            preSNFTimes = inLayer
            preSNTypes = np.ones(inLayer.size)


         for c in range(connNo):
            if preSNFTimes[c] != -1:
               for t in range(termNo):
                  termContrib = network.layers[l][n].termContr(preSNFTimes[c], \
                                 network.layers[l][n].getLastFireTime(),\
                                 network.layers[l][n].synapses[c].delays[t], preSNTypes[c])


                  deltaWH[c,t] = termContrib * (-1) * learningRate
            else:
               deltaWH[c] = np.zeros((termNo))


         #compute the hidden layer delta
         deltaNeuronH[n] = self.deltaHiddenN(network.layers[l][n], preSNFTimes, preSNTypes,\
                                    network.layers[l+1], deltaNeuron, n)
         # print 'The delta of neuron ', n,' is ', deltaNeuronH[n]
         # print 'Delta weight hidden are ', deltaWH
         updates = np.multiply(deltaWH, deltaNeuronH[n])
         net.layers[l][n].updateWeights(updates)
      else:
         deltaNeuronH[n] = 0


   deltaNeuron = deltaNeuronH


return net
#method to compute the delta term in the update for the hidden layer nodes
@classmethod
def deltaHiddenN(self, neuron, preSNFTimes, preSNTypes, nextLayer, deltaNextLayer, currNInd):     #这是计算隐藏层的反馈导数
   prevError = 0.0
   currError = 0.0
   termSum = 0.0


   actualSpike = neuron.getLastFireTime()    #得到最后一次发放时间


   connNo = neuron.getNConnections()
   termNo = neuron.getNTerminals()


   neuronsNo = nextLayer.shape


   for c in range(connNo):
      if preSNFTimes[c] != -1:
         for t in range(termNo):
            currError += neuron.synapses[c].weights[t] * neuron.termContrDer(preSNFTimes[c], \
                        actualSpike, neuron.synapses[c].delays[t], preSNTypes[c])


   for n in range(neuronsNo[0]):
      termNo = nextLayer[n].getNTerminals()
      termSum = 0.0
      for t in range(termNo):
         termSum += nextLayer[n].synapses[currNInd].weights[t] \
                  * neuron.termContrDer(actualSpike, nextLayer[n].getLastFireTime(),\
                     nextLayer[n].synapses[currNInd].delays[t],\
                     neuron.type)


      # print 'delta next layer ', deltaNextLayer[n]
      prevError += termSum * deltaNextLayer[n]


   return prevError / currError
评论 4
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值