Pytorch学习笔记 03 Softmax回归与图片分类数据集

学习参考:

09 Softmax 回归 + 损失函数 + 图片分类数据集【动手学深度学习v2】_哔哩哔哩_bilibili

1. 数据集的导入和预处理(1.5 整合)

1.1 涉及到的库及工具包

% matplotlib inline
import numpy as np
import torch
import torchvision
from torch.utils import data
from torchvision import transforms
from d2l import torch as d2l
d2l.use_svg_display()

1.2 通过框架中的内置函数将Fashion MNIST数据集读取到内存中

trans = transforms.ToTensor()
"""通过ToTensor实例将图像数据从PIL类型转换为32位浮点数据格式"""
"""并处以255,是所有的像素点数值位于0-1"""
mnist_train = torchvision.datasets.FashionMNIST(root = '.\data',
                                                 train = True,
                                                 transform = trans,
                                                 download = False)
mnist_test = torchvision.datasets.FashionMNIST(root = '.\data',
                                               train = False,
                                               transform = trans,
                                               download = False)

1.3 返回Fashion MNIST数据集中的文版标签

def get_fashion_mnist_labels(labels):
    """将类别的索引转化为文字"""
    text_labels = [
           ]
    return [text_labels[int(i)] for i in labels]

【附】如何将预处理后的数据再转换为图片并输出对应的文本标签?

X, y = next(iter(data.DataLoader(mnist_train, batch_size = 10)))
show_image(X.reshape(10, 28, 28), 2, 9,
           titles = get_fashion_mnist_labels(y))
d2l.plt.show()

1.4 批次生成器

涉及到读取数据时的进程数和计时

batch_size = 256

def get_dataloader_workers():
    return 0

"""训练数据迭代生成器"""
trian_iter = data.DataLoader(mnist_train, batch_size, shuffle = True,
                             num_workers = get_dataloader_workers())

timer = d2l.Timer()
for X, y in train_iter:
    continue
"""读取一个epoch即所有batch总时长"""
print(f'{timer.stop():2f} sec')

1.5  汇总(包含数据集选取下载和迭代生成器的生成)

def load_data_fashion_mnist(batch_size, resize = None):
    """load用于生成批次迭代生成器"""
    
    """预处理转换列表构建"""
    # 这一部分是对输入的图片数据进行预处理,将其转化为需要的张量Tensor格式
    # 此外还可以根据需要调整图像大小,也可以不调整大小
    trans = [transforms.ToTensor()]
    if resize:    trans.insert(0, transforms.Resize(resize))
    # 检查是否有resize这个变量(期望尺寸)
    # 动态地在预处理流程中插入图像调整大小的操作
    # 此操作被放置在列表的最开始,意味着在转换之前会先进行图像大小的调整
    # 预处理列表
    trans = transforms.Compose(trans)

    """下载并定义数据集"""
    mnsit_train = torchvision.datasets.FashionMNIST(root = '.\data',
                                                    train = True,
                                                    transform = trans,
                                                    download = False)
    mnist_test = torchvision.datasets.FashionMNIST(root = '.\data',
                                                    train = False,
                                                    transform = trans,
                                                    download = False)

    """创建数据集加载器"""
    return (data.DataLoader(mnist_train, batch_size, shuffle = True,
                            num_workers = get_dataloader_workers()),
            data.DataLoader(mnist_test, batch_size, shuffle = False.
                            num_workers = get_dataloader_workers()))

2. Softmax的从零实现

2.1 导入工具包与定义数据迭代生成器

import torch
from IPython import display
from d2l import torch as d2l

"""确定批次大小,同时调用load_data_fashion_mnist()
   导入数据集并生成训练/测试集的迭代生成器"""
batch_size = 256
train_iter, test_iter = load_data_fashion_mnsit(batch_size)

2.2 确定数据特征维度,并以此初始化权重和偏置

num_input = 28 * 28
# 注意,此处的input不是输入的样本量
# 而是单个样本的特征维度
num_output = 10    # 类别数

W = torch.normal(0, 0.01, size = (num_input, num_output),
                 requires_grad = True)
# W起到特征维度转换的作用,故其大小为(num_input, num_output)
b = torch.zeros(num_output, requires_grad = True)

2.3 构建Softmax函数和网络模型

def softmax(X):
    """只包含计算指数和计算各自所占百分比的部分"""
    X_exp = torch.exp(X)
    partition = X_exp.sum(1, keepdim = True)
    # 压缩列,每行求和,并保持维度不变,方便后续的运算
    return X_exp / partition

def net(X):
    """先进行线性运算再进行概率预测"""
    return softmax(torch.matmul(X.reshape((-1, X.shape[0])), W) + b)
    # 注意,这里调整/重新确定输入特征矩阵的尺寸

2.4 Softmax回归的训练过程

def train_epoch_ch3(net, train_iter, loss, updater):
    """只训练一个epoch且未在测试/验证集上验证性能"""
    if isinstance(net, torch.nn.Module):
        net.train()

    metric = Accumulator(3)
    for X, y in train_iter:
        y_hat = net(X)
        l = loss(y_hat, y)
        """以下内容同时面向自定义优化器和torch内置优化器"""
        if isiinstance(updater, torch.optim.Optimizer):
            updater.zero_grad()
            l.backword()
            updater.step()
            metrc.add(float(l) * len(y), accuracy(y_hat, y), 
                      y.size.numel())
        else:
            l.sum().backward()
            updater(X.shape[0])
            metric.add(float(l.sum()), accuracy(y_hat, y), 
                      y.size.numel())
    return metrc[0] / metric[2], metric[1] / metric[2]


def train_ch3(net, train_iter, test_iter, loss, num_epochs, updater):
    """绘图功能导入"""
    animator = Animator(xlabel='epoch', xlim=[1, num_epochs], ylim=[0.3, 0.9],
                        legend=['train loss', 'train acc', 'test acc'])
    
    for epoch in range(num_epochs):
        train_metric = train_epoch_ch3(net, train_iter, loss, updater)
        test_acc = evaluate_acc(net, test_iter)
        animator.add(epoch+1, train_metric + (test_acc,))
    """输出最后一轮训练后的损失和精度"""
    train_loss, train_acc = train_metrc

    assert train_loss < 0.5, train_loss
    assert train_acc <= 1 and train_acc > 0.7, train_acc
    assert test_acc <= 1 and test_acc > 0.7, test_acc

2.5 对图像进行分类预测

def predict_ch3(net, test_iter, n = 6):
    """仅在一个批次的测试集上进行测试,且输出前六章图片"""
    for X, y in test_iter:
        break
    trues = d2l.get_fashion_mnist_labels(y)
    preds = d2l.get_fashion_mnist_labels(net(X).argmax(axis = 1))
    titles = [true + '\n' + pre for true, pred in zip(trues, preds)]
    d2l.show_images(X[0:n].rehshape((n, 28, 28)), 1, n, tilte = tiile[0:n])

3. Softmax回归的简介实现

3.1 导入相关工具包并生成训练/测试迭代器

import torch
from torch import nn
from d2l import torch as nn

"""生成训练数据集和验证/测试数据集迭代生成器"""
batch_size = 256    # 迭代生成器需要提前定义批量大小
train_iter, test_train = load_data_fashion_mnist(batch)

3.2 模型和参数的初始化

net = nn.Sequential(nn.Flatten(), nn.Linear(28 * 28, 10))
# nn.Flatten是一个层,作用是将多维输入数据展平(flatten)成一维
# 图像处理常用层,但是会丢失不一部分二维信息

def init_weights(m):
    """这里是对nn.Sequential中的层进行操作"""
    if type(m): == nn.Linear:
        nn.init.normal_(m.weight, std = 0.01)

net.apply(init_weights)

3.3 定义损失函数和优化算法

loss = nn.CrossEntropyLoss()
trainer = torch.optim.SGD(net.parameter, lr = 0.1)

3.4 使用之前定义的训练函数来训练模型

# 开始之前需要 确定训练轮次
num_epochs = 10
train(net, train_iter, test_iter, loss, num_epochs, trainer)

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值