最适合机器学习入门者的项目:FashionMNIST衣服种类识别与可视化之pytorch简单实现(超详细!!!附完整代码)

前言

机器学习模型的目的之一是学习数据的有效嵌入表示。在此次任务中,为了让读者看懂且上手机器学习,笔者没有在上游的嵌入学习使用复杂的模型以挖掘数据潜在的价值,而是专注于下游的机器学习分类任务。

为了直观展示原始数据的分布并方便读者理解,我们在第二章数据可视化中进行了T-SNE可视化(附代码),如果读者想看机器学习代码可查看第三章分类任务中的完整代码。在第三章中,我们给出了此分类任务的处理(附代码)。

数据可视化

加载的库如下:

from __future__ import print_function, division
import torch
from sklearn.manifold import TSNE
import torchvision
from torchvision import datasets
from torch.utils.data import DataLoader
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import matplotlib.patheffects as pe
import torch.nn as nn
from tqdm import tqdm

首先提取下载数据:

def download():
    # 将图片转化为张量以及归一化处理
    Trans = torchvision.transforms.Compose(
        [torchvision.transforms.ToTensor(), torchvision.transforms.Normalize(mean=[0.5], std=[0.5])])

    # 下载MNIST对应的训练和测试数据集
    training_data = datasets.FashionMNIST(
        root="data",
        train=True,
        download=True,
        transform=Trans,
    )

    test_data = datasets.FashionMNIST(
        root="data",
        train=False,
        download=True,
        transform=Trans,
    )


    batch_size = 64

    train_dataloader = DataLoader(training_data, batch_size=batch_size)
    test_dataloader = DataLoader(test_data, batch_size=10000)

    return train_dataloader,test_dataloader,training_data,test_data

download函数返回了dataloader和data(训练集和测试集都有)

在得到数据后需要对数据进行处理,目的是为了适应T-SNE降维的需要。sklearn中很多库的实现都是numpy类型,T-SNE也不例外。数据处理代码:

if __name__ == "__main__":
    _,_,train_data,test_data = download()
    images = torch.empty((0,784))
    labels = torch.empty((0,), dtype=torch.long)

    for image, label in tqdm(test_data):
        images = torch.cat((images,image.reshape(1,-1)),dim=0)
        label_tensor = torch.tensor([label], dtype=torch.long)  # 将label转换为张量,并增加一个维度
        labels = torch.cat((labels, label_tensor), dim=0)  # 拼接标签


    print("images:",type(images))
    print("images.shape:",images.shape)
    images = images.numpy()
    labels = labels.numpy()
    X = np.vstack([images[labels == i] for i in range(10)])
    # Place the arrays of data of each target digit by the side of each other continuosly and store in Y
    Y = np.hstack([images[labels == i] for i in range(10)])
    digits_final = TSNE(perplexity=50).fit_transform(images)
    plot(digits_final, labels)

这里采用的是拼接torch.cat()/torch.concat()方法,时间消耗大一点。其实完全不用这么复杂,可以直接转换test_data进行代码优化。

作图代码:

def plot(image,colors):

    palette = np.array(sns.color_palette("pastel", 10))

    f = plt.figure(figsize=(8, 8))
    ax = plt.subplot(aspect='equal')
    sc = ax.scatter(image[:,0], image[:,1], lw=0, s=40, c=palette[colors.astype(np.int8)])
    # Add the labels for each digit.
    txts = []
    for i in range(10):
        # Position of each label.
        xtext, ytext = np.median(image[colors == i, :], axis=0)
        txt = ax.text(xtext, ytext, str(i), fontsize=24)
        txt.set_path_effects([pe.Stroke(linewidth=5, foreground="w"), pe.Normal()])
        txts.append(txt)
    plt.savefig('./digits_tsne-pastel.png', dpi=120)
    plt.show()
    return f, ax, txts

FashionMNIST中标签种类有10种(包含夹克,棉袄这种),作图时对每个种类的索引进行了标记。

可视化完整代码(直接运行即可):

from __future__ import print_function, division

import torch
from sklearn.manifold import TSNE
import torchvision
from torchvision import datasets
from torch.utils.data import DataLoader
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import matplotlib.patheffects as pe
import torch.nn as nn
from tqdm import tqdm
def download():
    # 将图片转化为张量以及归一化处理
    Trans = torchvision.transforms.Compose(
        [torchvision.transforms.ToTensor(), torchvision.transforms.Normalize(mean=[0.5], std=[0.5])])

    # 下载MNIST对应的训练和测试数据集
    training_data = datasets.FashionMNIST(
        root="data",
        train=True,
        download=True,
        transform=Trans,
    )

    test_data = datasets.FashionMNIST(
        root="data",
        train=False,
        download=True,
        transform=Trans,
    )


    batch_size = 64

    train_dataloader = DataLoader(training_data, batch_size=batch_size)
    test_dataloader = DataLoader(test_data, batch_size=10000)

    return train_dataloader,test_dataloader,training_data,test_data

def plot(image,colors):

    palette = np.array(sns.color_palette("pastel", 10))

    f = plt.figure(figsize=(8, 8))
    ax = plt.subplot(aspect='equal')
    sc = ax.scatter(image[:,0], image[:,1], lw=0, s=40, c=palette[colors.astype(np.int8)])
    # Add the labels for each digit.
    txts = []
    for i in range(10):
        # Position of each label.
        xtext, ytext = np.median(image[colors == i, :], axis=0)
        txt = ax.text(xtext, ytext, str(i), fontsize=24)
        txt.set_path_effects([pe.Stroke(linewidth=5, foreground="w"), pe.Normal()])
        txts.append(txt)
    plt.savefig('./digits_tsne-pastel.png', dpi=120)
    plt.show()
    return f, ax, txts

if __name__ == "__main__":
    _,_,train_data,test_data = download()
    images = torch.empty((0,784))
    labels = torch.empty((0,), dtype=torch.long)

    for image, label in tqdm(test_data):
        images = torch.cat((images,image.reshape(1,-1)),dim=0)
        label_tensor = torch.tensor([label], dtype=torch.long)  # 将label转换为张量,并增加一个维度
        labels = torch.cat((labels, label_tensor), dim=0)  # 拼接标签


    print("images:",type(images))
    print("images.shape:",images.shape)
    images = images.numpy()
    labels = labels.numpy()
    X = np.vstack([images[labels == i] for i in range(10)])
    # Place the arrays of data of each target digit by the side of each other continuosly and store in Y
    Y = np.hstack([images[labels == i] for i in range(10)])
    digits_final = TSNE(perplexity=50).fit_transform(images)
    plot(digits_final, labels)

可视化结果:

可以看到数据解耦不是很好(不同标签数据混在一起),而学习嵌入表示是为了让这些不同标签的样本能够分开,以提高后续分类任务的性能。

分类任务

这是一个十分类问题,为了便于读者理解,在此采用了最简单的NN实现,相信能对深入理解神经网络的实现流程有帮助。

下载数据集代码同数据可视化:

def download():
    # 将图片转化为张量以及归一化处理
    Trans = torchvision.transforms.Compose(
        [torchvision.transforms.ToTensor(), torchvision.transforms.Normalize(mean=[0.5], std=[0.5])])

    # 下载MNIST对应的训练和测试数据集
    train_data = datasets.FashionMNIST(
        root="data",
        train=True,
        download=True,
        transform=Trans,
    )

    test_data = datasets.FashionMNIST(
        root="data",
        train=False,
        download=True,
        transform=Trans,
    )

    train_Dataloader = DataLoader(train_data,batch_size=64)
    test_Dataloader = DataLoader(test_data,batch_size=999999)

    return train_Dataloader, test_Dataloader, train_data, test_data

神经网络的定义:

class MLP(nn.Module):
    def __init__(self):
        super(MLP, self).__init__()
        self.flatten = nn.Flatten()
        self.layer = nn.Sequential(nn.Linear(784,256),nn.ReLU(),nn.Linear(256,32),nn.ReLU(),nn.Linear(32,10),nn.ReLU())

    def forward(self,x):
        x = self.flatten(x)
        x = self.layer(x)

        return x

训练和测试函数:

def train(net, train_Dataloader, loss_function, optimizer):
    for X,y in tqdm(train_Dataloader):
        X = X.to('cuda')
        y = y.to('cuda')
        pred = net(X)
        loss = loss_function(pred,y)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

def test(net, test_Dataloader):
    for X,y in test_Dataloader:
        X = X.to('cuda')
        with torch.no_grad():
            pred = net(X)
            pred = pred.argmax(dim=1).cpu().numpy()
            y = y.cpu()
            acc = accuracy_score(y,pred)
            print("acc:{}".format(acc))

这里可以在训练和测试时打印损失loss方便调试,因为项目简单调试也并不复杂,此处就不再输入loss等信息。

训练和测试代码:

if __name__ == "__main__":
    train_Dataloader,test_Dataloader,train_data,test_data = download()

    # 参数解析,方便调参
    parser = argparse.ArgumentParser(
        description='train',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)

    parser.add_argument('--lr', type=float, default=0.001)
    parser.add_argument('--epoch', type=int, default=10)

    args = parser.parse_args()

    # NN的配置
    net = MLP().to('cuda')
    optimizer = Adam(net.parameters(),lr=args.lr)
    loss_function = nn.CrossEntropyLoss()

    # 开始训练和测试
    for epoch in range(args.epoch):
        print("training epoch {}:".format(epoch))
        train(net, train_Dataloader, loss_function, optimizer)
        test(net, test_Dataloader)

这里使用argparser是为了记录超参数(可以理解为一个类,定义了不同的超参数)以方便调参。当项目复杂超参数较多时,为每一个超参数都设置不同的变量名较麻烦且容易混乱,而argpaser很好地解决了这一问题。

之后保存模型并重新加载模型,并通过单个样本测试模型的分类效果:

torch.save(net.state_dict(),'./net.pth')
print("模型保存完毕")

new_net = MLP()
new_net.load_state_dict(torch.load('./net.pth'))
print("模型重载成功")

# 假设我们想要查看第5张图片
image_index = 4

# 获取第5张图片和其标签
image, label = test_data[image_index]

# 显示图片
plt.imshow(image.squeeze(), cmap='gray')  # 因为图像是单通道的,使用gray colormap
plt.title(f"Label: {label}")
plt.show()

result = new_net(image).argmax(dim=1)
print("image的标签为:{}  image的预测结果为:{}".format(label,result))

第5个样本的图像为:

实验运行结果为:

完整代码(可直接运行):

import torch
import torch.nn as nn
import torchvision
from torchvision import datasets,transforms
from torch.utils.data import DataLoader
import argparse
from torch.optim import Adam
import tqdm
from tqdm import tqdm
import numpy as np
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt

def download():
    # 将图片转化为张量以及归一化处理
    Trans = torchvision.transforms.Compose(
        [torchvision.transforms.ToTensor(), torchvision.transforms.Normalize(mean=[0.5], std=[0.5])])

    # 下载MNIST对应的训练和测试数据集
    train_data = datasets.FashionMNIST(
        root="data",
        train=True,
        download=True,
        transform=Trans,
    )

    test_data = datasets.FashionMNIST(
        root="data",
        train=False,
        download=True,
        transform=Trans,
    )

    train_Dataloader = DataLoader(train_data,batch_size=64)
    test_Dataloader = DataLoader(test_data,batch_size=999999)

    return train_Dataloader, test_Dataloader, train_data, test_data


class MLP(nn.Module):
    def __init__(self):
        super(MLP, self).__init__()
        self.flatten = nn.Flatten()
        self.layer = nn.Sequential(nn.Linear(784,256),nn.ReLU(),nn.Linear(256,32),nn.ReLU(),nn.Linear(32,10),nn.ReLU())

    def forward(self,x):
        x = self.flatten(x)
        x = self.layer(x)

        return x


def train(net, train_Dataloader, loss_function, optimizer):
    for X,y in tqdm(train_Dataloader):
        X = X.to('cuda')
        y = y.to('cuda')
        pred = net(X)
        loss = loss_function(pred,y)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

def test(net, test_Dataloader):
    for X,y in test_Dataloader:
        X = X.to('cuda')
        with torch.no_grad():
            pred = net(X)
            pred = pred.argmax(dim=1).cpu().numpy()
            y = y.cpu()
            acc = accuracy_score(y,pred)
            print("acc:{}".format(acc))



if __name__ == "__main__":
    train_Dataloader,test_Dataloader,train_data,test_data = download()

    # 参数解析,方便调参
    parser = argparse.ArgumentParser(
        description='train',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)

    parser.add_argument('--lr', type=float, default=0.001)
    parser.add_argument('--epoch', type=int, default=10)

    args = parser.parse_args()

    # NN的配置
    net = MLP().to('cuda')
    optimizer = Adam(net.parameters(),lr=args.lr)
    loss_function = nn.CrossEntropyLoss()

    # 开始训练和测试
    for epoch in range(args.epoch):
        print("training epoch {}:".format(epoch))
        train(net, train_Dataloader, loss_function, optimizer)
        test(net, test_Dataloader)

    torch.save(net.state_dict(),'./net.pth')
    print("模型保存完毕")

    new_net = MLP()
    new_net.load_state_dict(torch.load('./net.pth'))
    print("模型重载成功")

    # 假设我们想要查看第5张图片
    image_index = 4

    # 获取第5张图片和其标签
    image, label = test_data[image_index]

    # 显示图片
    plt.imshow(image.squeeze(), cmap='gray')  # 因为图像是单通道的,使用gray colormap
    plt.title(f"Label: {label}")
    plt.show()

    result = new_net(image).argmax(dim=1)
    print("image的标签为:{}  image的预测结果为:{}".format(label,result))

评论 13
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值