Pytorch 使用AlexNet实现 FashionMNIST数据集分类(官网例子)

1. AlexNet模型

参考链接:pytorch图像分类篇:3.搭建AlexNet并训练花分类数据集
注意:下面模型代码里有些参数跟论文里不一样,是因为FashionMnist数据集图像是单通道图像,分辨率大小为28*28,跟论文里不一样,所以要改一些参数

model.py

import torch.nn as nn
import torch

class AlexNet(nn.Module):

    def __init__(self, num_classes=1000, init_weights=False):
        super(AlexNet, self).__init__()
        # 使用nn.Sequential()将网络打包成一个模块,精简代码
        self.features = nn.Sequential(
            nn.Conv2d(1, 32, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),
            nn.Conv2d(32, 64, kernel_size=5, stride=1, padding=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2,stride=2),
            nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2)
        )

        self.classifier = nn.Sequential(
            nn.Dropout(p=0.5),
            nn.Linear(256*3*3, 1024),
            nn.ReLU(inplace=True),
            nn.Dropout(p=0.5),
            nn.Linear(1024, 512),
            nn.ReLU(inplace=True),
            nn.Linear(512, num_classes)
        )

        if init_weights:
            self._initialize_weights()

    #定义前向传播的过程
    def forward(self, x):
        x = self.features(x)
        x = torch.flatten(x, start_dim=1)
        x = self.classifier(x)
        return x

    def _initialize_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.normal_(m.weight, 0, 0.01)  #正态分布初始化
                nn.init.constant_(m.bias, 0)    #初始化偏重为0

train.py参考:Pytorch官网例子

import torch
from torch import nn
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from torchvision.transforms import ToTensor
#AlexNet模型导入
from AlexNet.model import AlexNet

train_data = datasets.FashionMNIST(
    root='./data',
    train=True,
    download=True,
    transform=ToTensor()
)

test_data = datasets.FashionMNIST(
    root='./data',
    train=False,
    download=True,
    transform=ToTensor()
)

batch_size = 64
train_dataloader = DataLoader(train_data, batch_size=batch_size)
test_dataloader = DataLoader(test_data, batch_size=batch_size)

device = "cuda" if torch.cuda.is_available() else "cpu"
print("Using {} device".format(device))

model = AlexNet(num_classes=10, init_weights=True).to(device)
print(model)

loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)

def train(dataloader, model, loss_fn, optimizer):
    size = len(dataloader.dataset)
    for batch, (X, y) in enumerate(dataloader):
        X, y = X.to(device), y.to(device)

        # 前向传播求出预测值
        pred = model(X)
        #求loss
        loss = loss_fn(pred, y)

        #将梯度设置为0,即初始化梯度
        optimizer.zero_grad()
        #反向传播求梯度
        loss.backward()
        #更新所有参数
        optimizer.step()

        if batch % 100 == 0:
            loss, current = loss.item(), batch * len(X)
            print(f"loss: {loss: >7f} [{current: >5d}/{size: >5d}]")

def _test(dataloader, model):
    size = len(dataloader.dataset)
    model.eval()
    test_loss, correct = 0, 0
    with torch.no_grad():
        for X, y in dataloader:
            X, y = X.to(device), y.to(device)
            pred = model(X)
            test_loss += loss_fn(pred, y).item()
            correct += (pred.argmax(1) == y).type(torch.float).sum().item()
    test_loss /= size
    correct /= size

    print(f"Test Error: \n Acc: {correct}, Avg loss: {test_loss}")

epochs = 5
for t in range(epochs):
    print(f"Epoch {t+1}\n-----------------")
    train(train_dataloader, model, loss_fn, optimizer)
    _test(test_dataloader, model)
print("Done!")

# Save Models
torch.save(model.state_dict(), "AlexNet-FashionMnist.pth")
print("Saved PyTorch Model State to AlexNet-FashionMnist.pth")

predict.py

import torch
from AlexNet.model import AlexNet
from torchvision import datasets
from torchvision.transforms import ToTensor

test_data = datasets.FashionMNIST(
    root='./data',
    train=False,
    download=False,
    transform=ToTensor()
)

#这里就不随机找图片来预测了,要不然还要处理图片
x, y = test_data[0][0], test_data[0][1]
print(x.shape)
#这里注意要给图片数据x扩展一个维度
x = x.unsqueeze(0)
print(x.shape)

model = AlexNet(num_classes=10)
model.load_state_dict(torch.load("AlexNet-FashionMnist.pth"))

classes = [
    "T-shirt/top",
    "Trouser",
    "Pullover",
    "Dress",
    "Coat",
    "Sandal",
    "Shirt",
    "Sneaker",
    "Bag",
    "Ankle boot",
]

model.eval()
print(model)
with torch.no_grad():
    pred = model(x)
    predicted, actual = classes[pred[0].argmax(0)], classes[y]
    print(f'Predicted: "{predicted}", Actual: "{actual}"')
  • 0
    点赞
  • 19
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
好的,以下是使用PyTorchFashion MNIST数据集进行分类的示例代码: 首先,我们需要导入必要的库和模块: ```python import torch import torch.nn as nn import torch.optim as optim import torchvision.datasets as datasets import torchvision.transforms as transforms from torch.utils.data import DataLoader ``` 然后,我们需要下载并加载数据集Fashion MNIST数据集可以通过以下方式下载: ```python train_data = datasets.FashionMNIST( root="data", train=True, download=True, transform=transforms.ToTensor() ) test_data = datasets.FashionMNIST( root="data", train=False, download=True, transform=transforms.ToTensor() ) ``` 接下来,我们需要定义一个神经网络模型。在这个例子中,我们使用了一个简单的卷积神经网络: ```python class CNN(nn.Module): def __init__(self): super(CNN, self).__init__() self.layer1 = nn.Sequential( nn.Conv2d(1, 32, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool2d(kernel_size=2, stride=2) ) self.layer2 = nn.Sequential( nn.Conv2d(32, 64, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool2d(kernel_size=2, stride=2) ) self.fc = nn.Sequential( nn.Linear(7 * 7 * 64, 128), nn.ReLU(), nn.Linear(128, 10) ) def forward(self, x): out = self.layer1(x) out = self.layer2(out) out = out.reshape(out.size(0), -1) out = self.fc(out) return out ``` 然后,我们需要定义损失函数和优化器: ```python model = CNN() criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=0.001) ``` 最后,我们可以开始训练模型并评估其性能: ```python train_loader = DataLoader(train_data, batch_size=100, shuffle=True) test_loader = DataLoader(test_data, batch_size=100, shuffle=False) for epoch in range(10): for i, (images, labels) in enumerate(train_loader): optimizer.zero_grad() outputs = model(images) loss = criterion(outputs, labels) loss.backward() optimizer.step() if (i + 1) % 100 == 0: print(f"Epoch [{epoch + 1}/{10}], Step [{i + 1}/{len(train_loader)}], Loss: {loss.item():.4f}") with torch.no_grad(): correct = 0 total = 0 for images, labels in test_loader: outputs = model(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() accuracy = 100 * correct / total print(f"Test Accuracy: {accuracy:.2f}%") ``` 这就是使用PyTorchFashion MNIST数据集进行分类的示例代码。希望能对你有所帮助!

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值