Softmax回归
Softmax回归是个分类问题,他将预测值映射在[0,1]之间,从而可以预测概率。相较于线性回归,它只是增加了一个激活函数以及输出层的个数。并且关于Weight参数的个数可以看出来增加了三倍。
简单实现步骤
- 构建Fashion-MNIST数据集
Fashion-MNIST数据集是图像数据,是一个Class=10的分类任务 - 构建模型并初始化参数
这里由于数据的是图像数据,我们需要读取图片数据(RGB)转为一维的tensor数据(不使用卷积操作),所以相比于线性回归,Softmax回归只不过是最后用了Softmax函数映射成了概率。 - 定义损失函数
由于这里是分类问题,常用交叉熵损失 - 定义优化器
这里使用SGD,一般Adam在图像分类问题收敛不好。 - Train
输入数据
batch_size = 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
搭建网络
# 初始化参数
def init_weights(m):
if type(m) == nn.Linear:
nn.init.normal_(m.weight, std=0.01)
net = nn.Sequential(nn.Fltten(), nn.Linear(28*28, 10))
net.apply(init_weights)
继承nn.Modult类版本:
class Net(nn.Module):
def __init__(self, input_dim, output_dim):
super(Net, self).__init__()
self.flat = nn.Flatten()
self.linear = nn.Linear(input_dim, output_dim)
def forward(self, x):
return self.linear(self.flat(x))
损失函数
# 损失函数
loss = nn.CrossEntropyLoss()
优化器
# 优化器
trainer = torch.optim.SGD(net.parameters(), lr=0.1)
train
这里直接导入d2l库的训练模块train_ch3
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
可能有bug->RuntimeError: DataLoader worker (pid(s) 9528, 8320) exited unexpectedly
是因为电脑进程数不够导致的。
修改源码:
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer, num_workers=0)
解决BUG!
完整代码:
import torch
from torch import nn
from d2l import torch as d2l
import matplotlib.pyplot as plt
# 初始化参数
def init_weights(m):
if type(m) == nn.Linear:
nn.init.normal_(m.weight, std=0.01)
batch_size = 32
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size, num_workers=0)
# net = nn.Sequential(nn.Flatten(), nn.Linear(28*28, 10))
class Net(nn.Module):
def __init__(self, input_dim, output_dim):
super(Net, self).__init__()
self.flat = nn.Flatten()
self.linear = nn.Linear(input_dim, output_dim)
def forward(self, x):
return self.linear(self.flat(x))
net = Net(28*28, 10)
net.apply(init_weights)
# 损失函数
loss = nn.CrossEntropyLoss()
# 优化器
trainer = torch.optim.SGD(net.parameters(), lr=0.1)
num_epochs = 10
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
plt.show()
epoch大约6、7模型训练就比较好了。epoch大于10可能会过拟合。
附:train_ch3具体文档实现:
def evaluate_accuracy(net, data_iter): #@save
"""计算在指定数据集上模型的精度。"""
if isinstance(net, torch.nn.Module):
net.eval() # 将模型设置为评估模式
metric = Accumulator(2) # 正确预测数、预测总数
for X, y in data_iter:
metric.add(accuracy(net(X), y), y.numel())
return metric[0] / metric[1]
def accuracy(y_hat, y): #@save
"""计算预测正确的数量。"""
if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:
y_hat = y_hat.argmax(axis=1)
cmp = y_hat.type(y.dtype) == y
return float(cmp.type(y.dtype).sum())
class Accumulator: #@save
"""在`n`个变量上累加。"""
def __init__(self, n):
self.data = [0.0] * n
def add(self, *args):
self.data = [a + float(b) for a, b in zip(self.data, args)]
def reset(self):
self.data = [0.0] * len(self.data)
def __getitem__(self, idx):
return self.data[idx]
class Animator: #@save
"""在动画中绘制数据。"""
def __init__(self, xlabel=None, ylabel=None, legend=None, xlim=None,
ylim=None, xscale='linear', yscale='linear',
fmts=('-', 'm--', 'g-.', 'r:'), nrows=1, ncols=1,
figsize=(3.5, 2.5)):
# 增量地绘制多条线
if legend is None:
legend = []
d2l.use_svg_display()
self.fig, self.axes = d2l.plt.subplots(nrows, ncols, figsize=figsize)
if nrows * ncols == 1:
self.axes = [self.axes, ]
# 使用lambda函数捕获参数
self.config_axes = lambda: d2l.set_axes(
self.axes[0], xlabel, ylabel, xlim, ylim, xscale, yscale, legend)
self.X, self.Y, self.fmts = None, None, fmts
def add(self, x, y):
# 向图表中添加多个数据点
if not hasattr(y, "__len__"):
y = [y]
n = len(y)
if not hasattr(x, "__len__"):
x = [x] * n
if not self.X:
self.X = [[] for _ in range(n)]
if not self.Y:
self.Y = [[] for _ in range(n)]
for i, (a, b) in enumerate(zip(x, y)):
if a is not None and b is not None:
self.X[i].append(a)
self.Y[i].append(b)
self.axes[0].cla()
for x, y, fmt in zip(self.X, self.Y, self.fmts):
self.axes[0].plot(x, y, fmt)
self.config_axes()
display.display(self.fig)
display.clear_output(wait=True)
def train_epoch_ch3(net, train_iter, loss, updater): #@save
"""训练模型一个迭代周期(定义见第3章)。"""
# 将模型设置为训练模式
if isinstance(net, torch.nn.Module):
net.train()
# 训练损失总和、训练准确度总和、样本数
metric = Accumulator(3)
for X, y in train_iter:
# 计算梯度并更新参数
y_hat = net(X)
l = loss(y_hat, y)
if isinstance(updater, torch.optim.Optimizer):
# 使用PyTorch内置的优化器和损失函数
updater.zero_grad()
l.backward()
updater.step()
metric.add(float(l) * len(y), accuracy(y_hat, y),
y.size().numel())
else:
# 使用定制的优化器和损失函数
l.sum().backward()
updater(X.shape[0])
metric.add(float(l.sum()), accuracy(y_hat, y), y.numel())
# 返回训练损失和训练准确率
return metric[0] / metric[2], metric[1] / metric[2]
def train_ch3(net, train_iter, test_iter, loss, num_epochs, updater): #@save
"""训练模型(定义见第3章)。"""
animator = Animator(xlabel='epoch', xlim=[1, num_epochs], ylim=[0.3, 0.9],
legend=['train loss', 'train acc', 'test acc'])
for epoch in range(num_epochs):
train_metrics = train_epoch_ch3(net, train_iter, loss, updater)
test_acc = evaluate_accuracy(net, test_iter)
animator.add(epoch + 1, train_metrics + (test_acc,))
train_loss, train_acc = train_metrics
assert train_loss < 0.5, train_loss
assert train_acc <= 1 and train_acc > 0.7, train_acc
assert test_acc <= 1 and test_acc > 0.7, test_acc