BP神经网络

import random
import numpy as np
import math

#初始化神经网络,sizes([3,4,1])
class network(object):
#网络初始化
def init(self,sizes):
self.sizes = sizes
self.biase01 = np.random.randn(sizes[1],1)#输入层的偏置
self.biase02 = np.random.randn(sizes[2],1)#隐藏层的偏置
self.weight01 = np.random.randn(sizes[1],sizes[0])#输入层和隐藏层之间的偏置
self.weight02 = np.random.randn(sizes[2],sizes[1])#隐藏层和输出层之间的偏置
self.num_layers = len(sizes)

    # print("***********")


#前向运算
def feedforward(self,x):
    #将所有的a和z保存起来
    a = []
    zs = []
    z1 = np.dot(x,self.weight01.transpose()) + self.biase01.transpose()
    a1 = self.sigmoid(z1)
    z2 = np.dot(a1,self.weight02.transpose()) + self.biase02
    a2 = self.sigmoid(z2)
    zs.append(z1)
    zs.append(z2)
    a.append([x])
    a.append(a1)
    a.append(a2)
    #将保存好的a和z返回
    return (a,zs)

# 随机梯度下降法
def SGD(self, traing_data, epochs, eta, test_data):
    traing_data = list(traing_data)
    n = len(traing_data)

    if test_data:
        test_data = list(test_data)
        n_test = len(test_data)

    for j in range(epochs):
        random.shuffle(traing_data)

        for mini_batch in traing_data:

            self.update_mini_batch(mini_batch, eta)

    for x,y in test_data:

        aa = []
        az = self.feedforward(x)
        a = az[1]
        ay = a[1]

        s = ay - y
        print("************")
        print("预测的y值:\n",ay)
        print("************")
        aa.append(s)
    for i in (0,len(aa)-1):
        b = aa[i]
        sum = b**2
        sum += sum
    esm = math.sqrt(sum)

    print("预测数据平方差:\n")
    print("esm = :",esm)
    # a.append(a0)
    leng = len(a)
        #print("预测数据方差:\n", SEM /leng)




#小批量下降法
def update_mini_batch(self,mini_batch,eta):

    nabla_b1 = np.zeros(self.biase01.shape)
    nabla_b2 = np.zeros(self.biase02.shape)
    nabla_w1 = np.zeros(self.weight01.shape)
    nabla_w2 = np.zeros(self.weight02.shape)

    x = mini_batch[0]
    y = mini_batch[1]

    delta_nabla_b,delta_nabla_w = self.backprop(x,y)
    nabla_b1 = nabla_b1 + delta_nabla_b[0]
    nabla_b2 = nabla_b2 + delta_nabla_b[1]
    nabla_w1 = nabla_w1 + delta_nabla_w[0]
    nabla_w2 = nabla_w2 + delta_nabla_w[1]
    self.weight01 = self.weight01 - (eta * nabla_w1)
    self.weight02 = self.weight02 - (eta * nabla_w2)
    self.biase01 = self.biase01 - (eta * nabla_b1)
    self.biase02 = self.biase02 - (eta * nabla_b2)



#反向传播
def backprop(self,x,y):
    #从前向运算中取出a 和z
    az = self.feedforward(x)
    a = az[0]

    z = az[1]


    delta = self.cost_derivate(a[-1],y) * self.dsigmoid(z[-1])

    nabla_b = []
    nabla_w = []
    nabla_b1 = np.zeros(self.biase01.shape)
    nabla_b2 = np.zeros(self.biase02.shape)
    nabla_b.append(nabla_b1)
    nabla_b.append(nabla_b2)
    nabla_w1 = np.zeros(self.weight01.shape)
    nabla_w2 = np.zeros(self.weight02.shape)
    nabla_w.append(nabla_w1)
    nabla_w.append(nabla_w2)

    nabla_b[-1] = delta
    nabla_w[-1] = np.dot(delta,a[-2])
    zz = z[-1]
    sp = self.dsigmoid(zz)

    delta = np.dot(self.weight02.transpose(),delta) * sp

    nabla_b[-2] = delta
    nabla_w[-2] = np.dot(delta,a[0])
    # print("*************")
    # print("nabla_b:\n",nabla_b)
    # print("*************")
    # print("nabla_w:\n", nabla_w)
    return (nabla_b,nabla_w)
#估值函数
# def evaluate(self,test_data):
#     test_results  =[(np.argmax(self.feedforward(x)),y) for x,y in test_data]
#     return sum(int(x == y) for (x,y) in test_results)





#误差
def cost_derivate(self,output_activation,y):
    return (output_activation - y)
#sigmoid函数
def sigmoid(self,z):
    return 1.0 / (1.0 + np.exp(-z))

#sigmoid求导
def dsigmoid(self,z):
    return self.sigmoid(z) * (1 - self.sigmoid(z))

n = network([2,4,1])
training_data = [
[[1, 1],[2]],
[[1, 2],[6]],
[[2, 1],[12]],
[[5, 2],[50]],
[[8, 3],[96]],
[[7, 4],[196]],
[[9,5],[343]],
[[13, 8],[512]],
[[15,10],[660]],
[[18,12],[990]],
[[24,16],[1176]]]
test_data = [[[ 3, 3],[16]],
[[9, 6],[330]],
[[13,8],[530]],
[[14, 9],[996]]]
n.SGD(training_data,2000,0.01,test_data)

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值