梯度确认(比较数值微分法和反向传播法计算的梯度值的差是否足够小,验证反向传播计算梯度值的正确性)

反向传播是利用解析的方式推导梯度的计算式,但由于NN网络复杂,容易推导错误;数值微分求梯度虽然更耗时,但结果一定基本是正确的;所以我们把两种梯度的值作差观察差距,验证BP计算的正确性。

# GradientCheck.py
# 2层网络,一个隐层,结构:784-50-10
# 梯度确认,比较数值梯度计算值和反向传播梯度计算值的差
# 以验证反向传播法梯度计算的正确性

from dataset.mnist import load_mnist
import numpy as np
from TwoLayerNet import TwoLayerNet
import time

start = time.clock()

# 读入数据
(x_train, t_train), (x_test, t_test) = \
    load_mnist(normalize=True, one_hot_label=True)

net = TwoLayerNet(input_size=784, hidden_size=50, output_size=10)

x_batch = x_train[:3]
t_batch = t_train[:3]  # 获取从第一个元素到索引为3的元素(不包括3),即取前3条输入数据用于验算即可

grad_numerical = net.numerical_gradient(x_batch, t_batch)
grad_backpro = net.gradient(x_batch, t_batch)

# 求出w1, b1, w2, b2的绝对误差的平均值
for key in grad_numerical.keys():
    diff = np.average(np.abs(grad_backpro[key] - grad_numerical[key]))
    print(key + ':' + str(diff))

end = time.clock()

print('Running Time: %s Seconds' %(end - start))
# TwoLayerNet.py
# 2层网络,1个隐层
# 反向传播法求梯度


import numpy as np
from collections import OrderedDict
# 有序字典,NN的层必须保存为有序字典变量以实现前向反向的依序处理
from BackPropagation import *
# 导入定义affine,relu,softmaxwithloss层的类

# 数值梯度的实现,类内的数值梯度方法需要调用这个方法
def numerical_gradient(f, x):
    h = 1e-4  # 0.0001
    grad = np.zeros_like(x)

    it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
    while not it.finished:
        idx = it.multi_index
        tmp_val = x[idx]
        x[idx] = float(tmp_val) + h
        fxh1 = f(x)  # f(x+h)

        x[idx] = tmp_val - h
        fxh2 = f(x)  # f(x-h)
        grad[idx] = (fxh1 - fxh2) / (2 * h)

        x[idx] = tmp_val  # 还原值
        it.iternext()

    return grad

class TwoLayerNet:

    def __init__(self, input_size, hidden_size, output_size,weight_init_std=0.01):
        self.params = {}
        self.params['w1'] = weight_init_std * \
                            np.random.randn(input_size, hidden_size)
        # 权重初始化为高斯分布
        self.params['b1'] = np.zeros(hidden_size)  # 偏置初始化为0
        self.params['w2'] = weight_init_std * \
                            np.random.randn(hidden_size, output_size)
        self.params['b2'] = np.zeros(output_size)

        # 生成层,用层进行模块化地实现NN非常便利
        # 可以像组装乐高积木一样组装任意层数的NN
        self.layers = OrderedDict()  # 有序字典
        self.layers['Affine1'] = \
            Affine(self.params['w1'], self.params['b1'])
        self.layers['Relu1'] = Relu()
        self.layers['Affine2'] = \
            Affine(self.params['w2'], self.params['b2'])
        self.lastlayer = SoftmaxWithLoss()

    def predict(self, x):
        for layer in self.layers.values():
            # 有序字典变量共3个键值对,affine1, relu1, affine2
            x = layer.forward(x)

            # x是输出层的affine2的输出,未经过softmax和损失计算
        return x

    def loss(self, x, t):
        y = self.predict(x)
        # x是输出层的affine2的输出,未经过softmax和损失计算
        return self.lastlayer.forward(y, t)

    def accuracy(self, x, t):
        y = self.predict(x)
        y = np.argmax(y, axis=1)
        if t.ndim != 1:
            t = np.argmax(t, axis=1)

        accuracy = np.sum( y==t ) / float(x.shape[0])
        return accuracy

    def gradient(self, x, t):
        # 基于反向传播的解析求梯度
        # forward
        self.loss(x, t)

        # backward
        dout = 1
        dout = self.lastlayer.backward(dout)  # 先经过softmaxwithloss层的反向梯度计算

        layers = list(self.layers.values())
        layers.reverse()  # 列表反序
        for layer in layers:
            # 依次经过affIne2,relu1,affine1的反向梯度计算
            dout = layer.backward(dout)

        grads = {}
        grads['w1'] = self.layers['Affine1'].dw
        grads['b1'] = self.layers['Affine1'].db
        grads['w2'] = self.layers['Affine2'].dw
        grads['b2'] = self.layers['Affine2'].db

        return grads

    def numerical_gradient(self, x, t):
        loss_w = lambda w: self.loss(x, t)

        grads = {}

        grads['w1'] = numerical_gradient(loss_w, self.params['w1'])
        grads['b1'] = numerical_gradient(loss_w, self.params['b1'])
        grads['w2'] = numerical_gradient(loss_w, self.params['w2'])
        grads['b2'] = numerical_gradient(loss_w, self.params['b2'])

        return grads

TwoLayerNet.py中的Gradient()方法中的self.loss(x, t)行不可缺少,我之前觉得求梯度都是反向的,干嘛还管前向,结果后面报错了,还找了半天原因,因为在学习的时候,设置好batch_size, lr等等超参数,直接调用TwoLayerNet类生成类的示例,作为一个NN,参数全部初始化了,只差求loss就可以反向求梯度了,然后直接调用求梯度的函数即可,所以如果少了这句,就不会先计算损失,后面的代码既无法计算梯度。详见另一篇博客,讲BP的学习,代码完整

# BackPropagation.py
# relu层的类
import numpy as np
class Relu:
    def __init__(self):
        self.mask = None

    # 前向传播的计算
    def forward(self, x):
        self.mask = (x <= 0)
        out = x.copy()  # out就等于x
        out[self.mask] = 0

        return out

    # 反向传播的计算
    def backward(self, dout):
        dout[self.mask] = 0
        dx = dout

        return dx


class Sigmoid:
    def __init__(self):
        self.out = None

    def forward(self, x):
        out = 1 / 1 + np.exp(-x)
        self.out = out

        return out

    def backward(self, dout):
        dx = dout * self.out * (1 - self.out)

        return dx


class Affine:
    def __init__(self, w, b):
        self.w = w
        self.b = b
        self.x = None
        self.dw = None
        self.db = None


    def forward(self,x):
        self.x = x
        out = np.dot(x, self.w) + self.b

        return out


    def backward(self,dout):
        dx = np.dot(dout, self.w.T)
        # 权重经过的是乘法器单元,对数据x求导则让输出dout乘以权重
        # 对权重求导则让dout乘以数据x
        # 偏置经过加法器单元,对b求导就等于对dout求导
        self.dw = np.dot(self.x.T, dout)
        self.db = np.sum(dout, axis=0)

        return dx


def softmax(a):
    c = np.max(a)
    exp_a = np.exp(a-c) # 防溢出
    sum_exp_a = np.sum(exp_a)
    y = exp_a / sum_exp_a

    return y


def cross_entropy_error(y, t):
    if y.ndim == 1:
        # 如果y是一维数组,即不是批处理(mini-batch)输入,而是单条数据输入
        # 则确认把t,y转变为行向量
        t = t.reshape(1, t.size)
        y = y.reshape(1, y.size)

    # 监督数据是one-hot-vector的情况下,转换为正确解标签的索引
    if t.size == y.size:
        t = t.argmax(axis=1)  # 得到值最大的数的索引

    batch_size = y.shape[0]
    return -np.sum(np.log(y[np.arange(batch_size), t] + 1e-7)) / batch_size


class SoftmaxWithLoss:
    def __init__(self):
        self.loss = None
        self.y = None
        self.t = None  # one-hot vector


    def forward(self, x, t):
        self.t = t
        self.y = softmax(x)
        self.loss = cross_entropy_error(self.y, self.t)

        return self.loss


    def backward(self, dout=1):
        batch_size = self.t.shape[0]
        dy = (self.y - self.t) / batch_size

        return dy

# 所有层的backward()函数的输入参数都只有从后部送来的梯度,依次反着往前传
# 最尾部开始的梯度是1,所以softmaxwithloss层的输入参数是dout=1



输出:

w1:0.00011225812393514702
b1:0.0008511727449861644
w2:0.00266798953422139
b2:0.0666666667863487
Running Time: 21.596621570882924 Seconds

可以看出差距很小,BP在正确工作。

程序位置:(注意 import)
在这里插入图片描述

  • 1
    点赞
  • 7
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值