声明
本文章为个人学习使用,版面观感若有不适请谅解,文中知识仅代表个人观点,若出现错误,欢迎各位批评指正。
十四、暂退法(Dropout)
暂退法(Dropout)是一种正则化技术,用于防止神经网络在训练过程中出现过拟合现象。在训练期间,暂退法随机将网络中的一部分神经元及其连接暂时从网络中移除,这有助于减少神经网络模型的复杂性,使其更加泛化。这种技术在训练期间是有效的,但在测试或实际应用中,所有的神经元和连接都会被保留下来,以确保输出的稳定性。通过这种方式,暂退法帮助模型学习到更加鲁棒的特征表示,提高了模型的泛化能力。
import torch
import torchvision
from torch.utils import data
from torch import nn
from d2l import torch as d2l
from torchvision import transforms
from IPython import display
def load_data_fashion_mnist(batch_size, resize=None):
"""下载 Fashion-MNIST 数据集,然后将其加载到内存中"""
trans = [transforms.ToTensor()]
if resize:
trans.insert(0, transforms.Resize(resize))
trans = transforms.Compose(trans)
mnist_train = torchvision.datasets.FashionMNIST(
root="../data", train=True, transform=trans, download=False) # 根据自身情况选择存放位置及是否下载
mnist_test = torchvision.datasets.FashionMNIST(
root="../data", train=False, transform=trans, download=False)
return (data.DataLoader(mnist_train, batch_size, shuffle=True,
num_workers=4),
data.DataLoader(mnist_test, batch_size, shuffle=False,
num_workers=4))
def accuracy(y_hat, y): # 定义一个函数来为预测正确的数量计数
"""计算预测正确的数量"""
if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:
y_hat = y_hat.argmax(axis=1)
cmp = y_hat.type(y.dtype) == y # bool 类型,若预测结果与实际结果一致,则为 True
return float(cmp.type(y.dtype).sum())
def evaluate_accuracy(net, data_iter): # 定义一个函数来计算模型的精度
"""计算在指定数据集上模型的精度"""
if isinstance(net, torch.nn.Module):
net.eval().cuda() # 将模型设置为评估模式
metric = Accumulator(2) # 正确预测数、预测总数
with torch.no_grad():
for X, y in data_iter:
X, y = X.cuda(), y.cuda()
metric.add(accuracy(net(X), y), y.numel())
return metric[0] / metric[1]
class Accumulator: # 定义一个实用程序类 Accumulator,用于对多个变量进行累加
"""在n个变量上累加"""
def __init__(self, n):
self.data = [0.0] * n
def add(self, *args):
self.data = [a + float(b) for a, b in zip(self.data, args)]
def reset(self):
self.data = [0.0] * len(self.data)
def __getitem__(self, idx):
return self.data[idx]
def train_epoch(net, train_iter, loss, updater): # 定义一个函数来训练一个迭代周期
"""训练模型一个迭代周期"""
if isinstance(net, torch.nn.Module):
net.train().cuda() # 将模型设置为训练模式
metric = Accumulator(3) # 训练损失总和、训练准确度总和、样本数
for X, y in train_iter: # 计算梯度并更新参数
X, y = X.cuda(), y.cuda()
y_hat = net(X)
l = loss(y_hat, y)
if isinstance(updater, torch.optim.Optimizer): # 使用PyTorch内置的优化器和损失函数
updater.zero_grad()
l.mean().backward()
updater.step()
else: # 使用定制的优化器和损失函数
l.sum().backward()
updater(X.shape[0])
metric.add(float(l.sum()), accuracy(y_hat, y), y.numel())
return metric[0] / metric[2], metric[1] / metric[2] # 返回训练损失和训练精度
class Animator: # 定义一个在动画中绘制数据的实用程序类 Animator
"""在动画中绘制数据"""
def __init__(self, xlabel=None, ylabel=None, legend=None, xlim=None,
ylim=None, xscale='linear', yscale='linear',
fmts=('-', 'm--', 'g-.', 'r:'), nrows=1, ncols=1,
figsize=(3.5, 2.5)):
# 增量地绘制多条线
if legend is None:
legend = []
d2l.use_svg_display()
self.fig, self.axes = d2l.plt.subplots(nrows, ncols, figsize=figsize)
if nrows * ncols == 1:
self.axes = [self.axes, ]
# 使用lambda函数捕获参数
self.config_axes = lambda: d2l.set_axes(
self.axes[0], xlabel, ylabel, xlim, ylim, xscale, yscale, legend)
self.X, self.Y, self.fmts = None, None, fmts
def add(self, x, y):
# 向图表中添加多个数据点
if not hasattr(y, "__len__"):
y = [y]
n = len(y)
if not hasattr(x, "__len__"):
x = [x] * n
if not self.X:
self.X = [[] for _ in range(n)]
if not self.Y:
self.Y = [[] for _ in range(n)]
for i, (a, b) in enumerate(zip(x, y)):
if a is not None and b is not None:
self.X[i].append(a)
self.Y[i].append(b)
self.axes[0].cla()
for x, y, fmt in zip(self.X, self.Y, self.fmts):
self.axes[0].plot(x, y, fmt)
self.config_axes()
display.display(self.fig)
display.clear_output(wait=True)
def train(net, train_iter, test_iter, loss, num_epochs, updater, dropout1, dropout2): # 定义一个训练函数
"""训练模型"""
animator = Animator(xlabel='epoch', xlim=[1, num_epochs], ylim=[0.3, 0.9],
legend=['train loss', 'train acc', 'test acc'])
for epoch in range(num_epochs): # 该训练函数将会运行 num_epochs 个迭代周期
train_metrics = train_epoch(net, train_iter, loss, updater)
test_acc = evaluate_accuracy(net, test_iter)
animator.add(epoch + 1, train_metrics + (test_acc,))
train_loss, train_acc = train_metrics
assert train_loss < 0.5, train_loss
assert train_acc <= 1 and train_acc > 0.7, train_acc
assert test_acc <= 1 and test_acc > 0.7, test_acc
d2l.plt.rcParams['font.sans-serif'] = ['Microsoft YaHei']
d2l.plt.title(f"dropout1 = {dropout1}, dropout2 = {dropout2}")
d2l.plt.show()
################ 暂退法的实现 ################
def dropout_layer(X, dropout):
assert 0 <= dropout <= 1
# 在本情况中,所有元素都被丢弃
if dropout == 1:
return torch.zeros_like(X).cuda()
# 在本情况中,所有元素都被保留
if dropout == 0:
return X.cuda()
mask = (torch.rand(X.shape).cuda() > dropout).float()
return (mask * X / (1.0 - dropout)).cuda()
class Net(nn.Module):
def __init__(self, num_inputs, num_outputs, num_hiddens1, num_hiddens2,
is_training = True):
super(Net, self).__init__()
self.num_inputs = num_inputs
self.training = is_training
self.lin1 = nn.Linear(num_inputs, num_hiddens1).cuda()
self.lin2 = nn.Linear(num_hiddens1, num_hiddens2).cuda()
self.lin3 = nn.Linear(num_hiddens2, num_outputs).cuda()
self.relu = nn.ReLU().cuda()
def forward(self, X):
H1 = self.relu(self.lin1(X.reshape((-1, self.num_inputs))))
# 只有在训练模型时才使用dropout
if self.training == True:
# 在第一个全连接层之后添加一个dropout层
H1 = dropout_layer(H1, dropout1)
H2 = self.relu(self.lin2(H1))
if self.training == True:
# 在第二个全连接层之后添加一个dropout层
H2 = dropout_layer(H2, dropout2)
out = self.lin3(H2)
return out
X = torch.arange(16, dtype = torch.float32).reshape((2, 8)).cuda()
print("X = ", X, "\n")
print("dropout:0.0, X =", dropout_layer(X, 0.), "\n")
print("dropout:0.3, X =", dropout_layer(X, 0.3), "\n")
print("dropout:1.0, X =", dropout_layer(X, 1.))
num_inputs, num_outputs, num_hiddens1, num_hiddens2 = 784, 10, 256, 256
dropout1, dropout2 = 0.2, 0.5
net = Net(num_inputs, num_outputs, num_hiddens1, num_hiddens2)
num_epochs, lr, batch_size = 10, 0.5, 256
loss = nn.CrossEntropyLoss(reduction='none')
train_iter, test_iter = load_data_fashion_mnist(batch_size)
trainer = torch.optim.SGD(net.parameters(), lr=lr)
train(net, train_iter, test_iter, loss, num_epochs, trainer, dropout1, dropout2) # dropout1, dropout2 参数仅为添加标题使用
import torch
import torchvision
from torch.utils import data
from torch import nn
from d2l import torch as d2l
from torchvision import transforms
from IPython import display
def load_data_fashion_mnist(batch_size, resize=None):
"""下载 Fashion-MNIST 数据集,然后将其加载到内存中"""
trans = [transforms.ToTensor()]
if resize:
trans.insert(0, transforms.Resize(resize))
trans = transforms.Compose(trans)
mnist_train = torchvision.datasets.FashionMNIST(
root="../data", train=True, transform=trans, download=False) # 根据自身情况选择存放位置及是否下载
mnist_test = torchvision.datasets.FashionMNIST(
root="../data", train=False, transform=trans, download=False)
return (data.DataLoader(mnist_train, batch_size, shuffle=True,
num_workers=4),
data.DataLoader(mnist_test, batch_size, shuffle=False,
num_workers=4))
def accuracy(y_hat, y): # 定义一个函数来为预测正确的数量计数
"""计算预测正确的数量"""
if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:
y_hat = y_hat.argmax(axis=1)
cmp = y_hat.type(y.dtype) == y # bool 类型,若预测结果与实际结果一致,则为 True
return float(cmp.type(y.dtype).sum())
def evaluate_accuracy(net, data_iter): # 定义一个函数来计算模型的精度
"""计算在指定数据集上模型的精度"""
if isinstance(net, torch.nn.Module):
net.eval().cuda() # 将模型设置为评估模式
metric = Accumulator(2) # 正确预测数、预测总数
with torch.no_grad():
for X, y in data_iter:
X, y = X.cuda(), y.cuda()
metric.add(accuracy(net(X), y), y.numel())
return metric[0] / metric[1]
class Accumulator: # 定义一个实用程序类 Accumulator,用于对多个变量进行累加
"""在n个变量上累加"""
def __init__(self, n):
self.data = [0.0] * n
def add(self, *args):
self.data = [a + float(b) for a, b in zip(self.data, args)]
def reset(self):
self.data = [0.0] * len(self.data)
def __getitem__(self, idx):
return self.data[idx]
def train_epoch(net, train_iter, loss, updater): # 定义一个函数来训练一个迭代周期
"""训练模型一个迭代周期"""
if isinstance(net, torch.nn.Module):
net.train().cuda() # 将模型设置为训练模式
metric = Accumulator(3) # 训练损失总和、训练准确度总和、样本数
for X, y in train_iter: # 计算梯度并更新参数
X, y = X.cuda(), y.cuda()
y_hat = net(X)
l = loss(y_hat, y)
if isinstance(updater, torch.optim.Optimizer): # 使用PyTorch内置的优化器和损失函数
updater.zero_grad()
l.mean().backward()
updater.step()
else: # 使用定制的优化器和损失函数
l.sum().backward()
updater(X.shape[0])
metric.add(float(l.sum()), accuracy(y_hat, y), y.numel())
return metric[0] / metric[2], metric[1] / metric[2] # 返回训练损失和训练精度
class Animator: # 定义一个在动画中绘制数据的实用程序类 Animator
"""在动画中绘制数据"""
def __init__(self, xlabel=None, ylabel=None, legend=None, xlim=None,
ylim=None, xscale='linear', yscale='linear',
fmts=('-', 'm--', 'g-.', 'r:'), nrows=1, ncols=1,
figsize=(3.5, 2.5)):
# 增量地绘制多条线
if legend is None:
legend = []
d2l.use_svg_display()
self.fig, self.axes = d2l.plt.subplots(nrows, ncols, figsize=figsize)
if nrows * ncols == 1:
self.axes = [self.axes, ]
# 使用lambda函数捕获参数
self.config_axes = lambda: d2l.set_axes(
self.axes[0], xlabel, ylabel, xlim, ylim, xscale, yscale, legend)
self.X, self.Y, self.fmts = None, None, fmts
def add(self, x, y):
# 向图表中添加多个数据点
if not hasattr(y, "__len__"):
y = [y]
n = len(y)
if not hasattr(x, "__len__"):
x = [x] * n
if not self.X:
self.X = [[] for _ in range(n)]
if not self.Y:
self.Y = [[] for _ in range(n)]
for i, (a, b) in enumerate(zip(x, y)):
if a is not None and b is not None:
self.X[i].append(a)
self.Y[i].append(b)
self.axes[0].cla()
for x, y, fmt in zip(self.X, self.Y, self.fmts):
self.axes[0].plot(x, y, fmt)
self.config_axes()
display.display(self.fig)
display.clear_output(wait=True)
def train(net, train_iter, test_iter, loss, num_epochs, updater, dropout1, dropout2): # 定义一个训练函数
"""训练模型"""
animator = Animator(xlabel='epoch', xlim=[1, num_epochs], ylim=[0.3, 0.9],
legend=['train loss', 'train acc', 'test acc'])
for epoch in range(num_epochs): # 该训练函数将会运行 num_epochs 个迭代周期
train_metrics = train_epoch(net, train_iter, loss, updater)
test_acc = evaluate_accuracy(net, test_iter)
animator.add(epoch + 1, train_metrics + (test_acc,))
train_loss, train_acc = train_metrics
assert train_loss < 0.5, train_loss
assert train_acc <= 1 and train_acc > 0.7, train_acc
assert test_acc <= 1 and test_acc > 0.7, test_acc
d2l.plt.rcParams['font.sans-serif'] = ['Microsoft YaHei']
d2l.plt.title(f"dropout1 = {dropout1}, dropout2 = {dropout2}")
d2l.plt.show()
################ 暂退法的简洁实现 ################
dropout1, dropout2 = 0.6, 0.8
num_epochs, lr, batch_size = 10, 0.5, 256
net = nn.Sequential(nn.Flatten(),
nn.Linear(784, 256),
nn.ReLU(),
# 在第一个全连接层之后添加一个 dropout 层
nn.Dropout(dropout1),
nn.Linear(256, 256),
nn.ReLU(),
# 在第二个全连接层之后添加一个 dropout 层
nn.Dropout(dropout2),
nn.Linear(256, 10)).cuda()
def init_weights(m):
if type(m) == nn.Linear:
nn.init.normal_(m.weight, std=0.01).cuda()
net.apply(init_weights).cuda()
loss = nn.CrossEntropyLoss(reduction='none')
train_iter, test_iter = load_data_fashion_mnist(batch_size)
trainer = torch.optim.SGD(net.parameters(), lr=lr)
train(net, train_iter, test_iter, loss, num_epochs, trainer, dropout1, dropout2)
文中部分知识参考:B 站 —— 跟李沐学AI;百度百科