Python写一个BP神经网络

先写一个Layer类

class Neuron:

    def __init__(self):
        self.value = 0

class Layer:

    def __init__(self, n):
        self.neurons = [Neuron() for _ in range(n)]

再来个Weight类和Bias类

import random
import numpy

class Weight:
    def __init__(self, M, N):
      self.matrix = numpy.zeros((N, M))
      for i in range(N):
          for j in range(M):
              self.matrix[i][j]= random.random()

class Bias:
    def __init__(self, N):
        self.bias = numpy.zeros(N)
        for n in range(N):
            self.bias[n] = random.random()

然后NN类

import numpy
import math

import paras
import layer


class NeuralNetwork:
    def __init__(self, _M, _N, _K):
        #三层节点数
        self.M = _M
        self.N = _N
        self.K = _K
        #三层初始化
        self.layer_input = layer.Layer(_M)
        self.layer_hidden = layer.Layer(_N)
        self.layer_output = layer.Layer(_K)
        #权重矩阵
        self.w1 = paras.Weight(_M, _N)
        self.w2 = paras.Weight(_N, _K)
        #偏差数组
        self.bias1 = paras.Bias(_N)
        self.bias2 = paras.Bias(_K)

    #前向传播算法
    def feed_forward(self):
        for n in range(self.N):
            for m in range(self.M):
                self.layer_hidden.neurons[n].value += self.w1.matrix[n, m] * self.layer_input.neurons[m].value
            self.layer_hidden.neurons[n].value += self.bias1.bias[n]
            self.layer_hidden.neurons[n].value = self.sigmoid(self.layer_hidden.neurons[n].value)
       for k in range(self.K):
            for n in range(self.N):
                self.layer_output.neurons[k].value += self.w2.matrix[k,n] * self.layer_hidden.neurons[n].value
            self.layer_output.neurons[k].value += self.bias2.bias[k]
            self.layer_output.neurons[k].value = self.sigmoid(self.layer_output.neurons[k].value)

    #反向传播算法
    def back_propagation(self, lin, Y):
        w2_update = numpy.zeros((self.K, self.N))

        for k in range(self.K):
            for n in range(self.N):
                w2_update[k, n] = self.w2.matrix[k, n] - lin * (self.layer_output.neurons[k].value - Y[k]) * self.df(
                    self.layer_output.neurons[k].value) * self.layer_hidden.neurons[n].value
            self.bias2.bias[k] = self.bias2.bias[k] - lin * (self.layer_output.neurons[k].value - Y[k])*self.df(self.layer_output.neurons[k].value)


        for n in range(self.N):
            for m in range(self.M):
                tmp = 0
                for k in range(self.K):
                    tmp += (self.layer_output.neurons[k].value - Y[k]) * self.df(self.layer_output.neurons[k].value) * self.w2.matrix[k, n]
                self.w1.matrix[n, m] -= lin * tmp * self.df( self.layer_hidden.neurons[n].value)* self.layer_input.neurons[m].value
            self.bias1.bias[n] -= lin * tmp * self.df( self.layer_hidden.neurons[n].value)

        self.w2.matrix = w2_update

    #激活函数
    def sigmoid(self, x):
        return 1 / (1 + math.exp(-x))

    def df(self, y):
        return y * (1 - y)

搞定!

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值