Pytorch与drop_out(丢弃法)

简述

深度学习模型常常使用丢弃法(dropout)[1] 来应对过拟合问题。丢弃法有一些不同的变体。文中提到的丢弃法特指倒置丢弃法(inverted dropout)。

对于激活函数而言有:
h i = ϕ ( x 1 w 1 i + x 2 w 2 i + x 3 w 3 i + x 4 w 4 i + b i ) h_i = ϕ(x_1w_{1i} + x_2w_{2i} + x_3w_{3i} + x_4w_{4i} + b_i) hi=ϕ(x1w1i+x2w2i+x3w3i+x4w4i+bi)
这里ϕ是激活函数, x 1 , . . . , x 4 x_1,...,x_4 x1,...,x4是输入,隐藏单元i的权重参数是 w 1 i , . . . , w 4 i w_{1i},...,w_{4i} w1i,...,w4i,偏差参数为 b i b_i bi.当对该隐藏层使用丢弃法时,该层的隐藏层单元有一定概率被丢弃.设丢弃概率为 p p p,那么有 p p p的概率 h i h_i hi会被清零,有 1 − p 1-p 1p的概率做拉伸.丢弃概率是丢弃法的超参数.具体来说,设随机变量 ξ i ξ_i ξi为0和1的概率分别为 p p p 1 − p 1-p 1p.使用丢弃法时我们计算新的隐藏单元 h i ′ h'_i hi
h i ′ = ξ i 1 − p h i h'_i = \frac{ξ_i}{1-p}h_i hi=1pξihi
由于 E ( ξ i ) = 1 − p E(ξ_i)=1-p E(ξi)=1p,因此:
E ( h i ′ ) = E ( ξ i ) 1 − p h i = h i E(h'_i)=\frac{E(ξ_i)}{1-p}h_i = h_i E(hi)=1pE(ξi)hi=hi
即丢弃法不改变其输入的期望值.
由于在训练中隐藏层神经元的丢弃是随机的,即 h 1 , . . . , h 5 h_1,...,h_5 h1,...,h5都有可能被清零,输出层的计算无法过度依赖 h 1 , . . . , h 5 h_1,...,h_5 h1,...,h5 中的任一个,从而在训练模型时起到正则化的作用,并可以用来应对过拟合。在测试模型时,我们为了拿到更加确定性的结果,一般不使用丢弃法。
在这里插入图片描述

手动实现

简述

  • 定义的模型将全连接层和激活函数ReLU串起来,并对每个激活函数的输出使用丢弃法。我们可以分别设置各个层的丢弃概率。通常的建议是把靠近输入层的丢弃概率设得小一点。在这个实验中,我们把第一个隐藏层的丢弃概率设为0.2,把第二个隐藏层的丢弃概率设为0.5。我们可以通过参数is_training来判断运行模式为训练还是测试,并只需在训练模式下使用丢弃法。

d2lzh_pytorch.py

import random
from IPython import display
import matplotlib.pyplot as plt
import torch
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import time
import sys
import torch.nn as nn


def use_svg_display():
    # 用矢量图显示
    display.set_matplotlib_formats('svg')

def set_figsize(figsize=(3.5, 2.5)):
    use_svg_display()
    # 设置图的尺寸
    plt.rcParams['figure.figsize'] = figsize

'''给定batch_size, feature, labels,做数据的打乱并生成指定大小的数据集'''
def data_iter(batch_size, features, labels):
    num_examples = len(features)
    indices = list(range(num_examples))
    random.shuffle(indices)
    for i in range(0, num_examples, batch_size): #(start, staop, step)
        j = torch.LongTensor(indices[i: min(i + batch_size, num_examples)]) #最后一次可能没有一个batch
        yield features.index_select(0, j), labels.index_select(0, j)

'''定义线性回归的模型'''
def linreg(X, w, b):
    return torch.mm(X, w) + b

'''定义线性回归的损失函数'''
def squared_loss(y_hat, y):
    return (y_hat - y.view(y_hat.size())) ** 2 / 2

'''线性回归的优化算法 —— 小批量随机梯度下降法'''
def sgd(params, lr, batch_size):
    for param in params:
        param.data -= lr * param.grad / batch_size #这里使用的是param.data

'''MINIST,可以将数值标签转成相应的文本标签'''
def get_fashion_mnist_labels(labels):
    text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat',
                   'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']
    return [text_labels[int(i)] for i in labels]

'''定义一个可以在一行里画出多张图像和对应标签的函数'''
def show_fashion_mnist(images, labels):
    use_svg_display()
    # 这里的_表示我们忽略(不使用)的变量
    _, figs = plt.subplots(1, len(images), figsize=(12, 12))
    for f, img, lbl in zip(figs, images, labels):
        f.imshow(img.view((28, 28)).numpy())
        f.set_title(lbl)
        f.axes.get_xaxis().set_visible(False)
        f.axes.get_yaxis().set_visible(False)
    plt.show()

'''获取并读取Fashion-MNIST数据集;该函数将返回train_iter和test_iter两个变量'''
def load_data_fashion_mnist(batch_size):
    mnist_train = torchvision.datasets.FashionMNIST(root='Datasets/FashionMNIST', train=True, download=True,
                                                    transform=transforms.ToTensor())
    mnist_test = torchvision.datasets.FashionMNIST(root='Datasets/FashionMNIST', train=False, download=True,
                                                   transform=transforms.ToTensor())
    if sys.platform.startswith('win'):
        num_workers = 0  # 0表示不用额外的进程来加速读取数据
    else:
        num_workers = 4
    train_iter = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True, num_workers=num_workers)
    test_iter = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=False, num_workers=num_workers)

    return train_iter, test_iter

'''评估模型net在数据集data_iter的准确率,同时考虑了drop_out的情况'''
def evaluate_accuracy(data_iter, net):
    acc_sum, n = 0.0, 0
    for X, y in data_iter:
        if isinstance(net, torch.nn.Module):
            net.eval()  # 评估模式, 这会关闭dropout
            acc_sum += (net(X).argmax(dim=1) == y).float().sum().item()
            net.train()  # 改回训练模式
        else: #自定义的模型
            if ('is_training' in net.__code__.co_varnames):  # 如果有is_training这个参数
                # 将is_training设置成False
                acc_sum += (net(X, is_training = False).argmax(dim=1) == y).float().sum().item()
            else:
                acc_sum += (net(X).argmax(dim=1) == y).float().sum().item()
        n += y.shape[0]
    return acc_sum / n

'''训练模型,softmax'''
def train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, params=None, lr=None, optimizer=None):
    for epoch in range(num_epochs):
        train_l_sum, train_acc_sum, n = 0.0, 0.0, 0
        for X, y in train_iter:
            y_hat = net(X)
            l = loss(y_hat, y).sum()

            # 梯度清零
            if optimizer is not None:
                optimizer.zero_grad()
            elif params is not None and params[0].grad is not None:
                for param in params:
                    param.grad.data.zero_()

            l.backward()
            if optimizer is None:
                sgd(params, lr, batch_size)
            else:
                optimizer.step()

            train_l_sum += l.item()
            train_acc_sum += (y_hat.argmax(dim=1) == y).sum().item()
            n += y.shape[0]
        test_acc = evaluate_accuracy(test_iter, net)
        print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f'
              % (epoch + 1, train_l_sum / n, train_acc_sum / n, test_acc))

'''对x的形状转换'''
class FlattenLayer(nn.Module):
    def __init__(self):
        super(FlattenLayer, self).__init__()
    def forward(self, x):
        return x.view(x.shape[0], -1)

'''作图函数,其中y轴使用了对数尺度,画出训练与测试的Loss图像'''
def semilogy(x_vals, y_vals, x_label, y_label, x2_vals = None, y2_vals = None,
             legend = None, figsize=(3.5, 2.5)):
    set_figsize(figsize)
    plt.xlabel(x_label)
    plt.ylabel(y_label)
    plt.semilogy(x_vals, y_vals)
    if x2_vals and y2_vals:
        plt.semilogy(x2_vals, y2_vals, linestyle=':')
        plt.legend(legend)
    plt.show()

main.py

import torch
import torch.nn as nn
import numpy as np
import sys
sys.path.append("..") 
import d2lzh_pytorch as d2l

def dropout(X, drop_prob):
    X = X.float()
    assert 0 <= drop_prob <= 1
    keep_prob = 1 - drop_prob
    # 这种情况下把全部元素都丢弃
    if keep_prob == 0:
        return torch.zeros_like(X)
    mask = (torch.rand(X.shape) < keep_prob).float()

    return mask * X / keep_prob

# 定义模型参数
num_inputs, num_hiddens1, num_hiddens2, num_outputs = 784, 256, 256, 10
w1 = torch.tensor(np.random.normal(0, 0.01, (num_inputs, num_hiddens1)), dtype = torch.float, requires_grad=True)
b1 = torch.zeros(num_hiddens1, requires_grad=True)
w2 = torch.tensor(np.random.normal(0, 0.01, (num_hiddens1, num_hiddens2)), dtype = torch.float, requires_grad=True)
b2 = torch.zeros(num_hiddens2, requires_grad=True)
w3 = torch.tensor(np.random.normal(0, 0.01, (num_hiddens2, num_outputs)), dtype = torch.float, requires_grad=True)
b3 = torch.zeros(num_outputs, requires_grad=True)
params = [w1, b1, w2, b2, w3, b3]

# 定义模型
drop_prob1, drop_prob2 = 0.2, 0.5

def net(X, is_training = True):
    X = X.view(-1, num_inputs)
    H1 = (torch.mm(X, w1) + b1).relu()
    if is_training:
        H1 = dropout(H1, drop_prob1)
    H2 = (torch.matmul(H1, w2) + b2).relu()
    if is_training:
        H2 = dropout(H2, drop_prob2)

    return torch.matmul(H2, w3) + b3

# 测试与训练模型
num_epochs, lr, batch_size = 5, 100.0, 256
loss = torch.nn.CrossEntropyLoss()
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, params, lr)

在这里插入图片描述

简洁实现

import torch
import torch.nn as nn
import numpy as np
import sys
sys.path.append("..") 
import d2lzh_pytorch as d2l



# 定义参数,损失函数,并获取数据集
num_inputs, num_hiddens1, num_hiddens2, num_outputs = 784, 256, 256, 10
num_epochs, lr, batch_size = 5, 100.0, 256
loss = torch.nn.CrossEntropyLoss()
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
drop_prob1, drop_prob2 = 0.2, 0.5

# 定义模型
net = nn.Sequential(
    d2l.FlattenLayer(),
    nn.Linear(num_inputs, num_hiddens1),
    nn.ReLU(),
    nn.Dropout(drop_prob1),
    nn.Linear(num_hiddens1, num_hiddens2),
    nn.ReLU(),
    nn.Dropout(drop_prob2),
    nn.Linear(num_hiddens2, num_outputs)
)
for param in net.parameters():
    nn.init.normal_(param, mean = 0, std = 0.01)

optimizer = torch.optim.SGD(net.parameters(), lr=0.5)
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, None, None, optimizer)

小结

  • 我们可以通过使用丢弃法应对过拟合.
  • 丢弃法只在训练模型时使用.
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值