Alexnet分类mnist——torch实现

一、MNIST数据集

MNIST 60000训练图像、100000测试图像、10个类别、图像大小1×28×28、内容是0-9手写数字。

二、代码实现

(一)导包

import numpy as np
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
# 定义是否使用GPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

(二)定义AlexNet网络结构

class AlexNet(nn.Module):
    def __init__(self, width_mult=1):
        super(AlexNet, self).__init__()

        self.layer1 = nn.Sequential(     # 输入1*28*28
            nn.Conv2d(1, 32, kernel_size=3, padding=1),  # 32*28*28
            nn.MaxPool2d(kernel_size=2, stride=2),       # 32*14*14
            nn.ReLU(inplace=True),
        )
        self.layer2 = nn.Sequential(
            nn.Conv2d(32, 64, kernel_size=3, padding=1),  # 64*14*14
            nn.MaxPool2d(kernel_size=2, stride=2),        # 64*7*7
            nn.ReLU(inplace=True),
        )
        self.layer3 = nn.Sequential(
            nn.Conv2d(64, 128, kernel_size=3, padding=1),  # 128*7*7
        )
        self.layer4 = nn.Sequential(
            nn.Conv2d(128, 256, kernel_size=3, padding=1),  # 256*7*7
        )

        self.layer5 = nn.Sequential(
            nn.Conv2d(256, 256, kernel_size=3, padding=1),  # 256*7*7
            nn.MaxPool2d(kernel_size=3, stride=2),          # 256*3*3
            nn.ReLU(inplace=True),
        )
        self.fc1 = nn.Linear(256 * 3 * 3, 1024)
        self.fc2 = nn.Linear(1024, 512)
        self.fc3 = nn.Linear(512, 10)

        def forward(self, x):
            x = self.layer1(x)
            x = self.layer2(x)
            x = self.layer3(x)
            x = self.layer4(x)
            x = self.layer5(x)
            x = x.view(-1, 256 * 3 * 3)
            x = self.fc1(x)
            x = self.fc2(x)
            x = self.fc3(x)
            return x

net = AlexNet()

(三)超参数设置

# 超参数设置
epochs = 900   # 遍历数据集次数
batch_size = 64  # 批处理尺寸(batch_size)
LR = 0.01   # 学习率

(四)定义数据预处理方式

transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.1307,), (0.3081,))
])

(五)加载数据集

# 训练集
trainset = torchvision.datasets.MNIST(
    root = './data/',
    train = True,
    download = True,
    transform = transform)

# 定义训练批处理数据
trainloader = torch.utils.data.DataLoader(
    trainset,
    batch_size = batch_size,
    shuffle = True)

# 测试集
testset = torchvision.datasets.MNIST(
    root = './data/',
    train = False,
    download = True,
    transform = transform)

# 定义测试批处理数据
testloader = torch.utils.data.DataLoader(
    testset,
    batch_size = batch_size,
    shuffle = False)

(六)定义损失函数和优化方式

model = AlexNet().to(device)
criterion = nn.CrossEntropyLoss()  # 交叉熵损失函数,通常用于多分类问题上
optimizer = optim.SGD(model.parameters(), lr=LR, momentum=0.9)

(七)训练模型

epoch = 0
train_acc = []
train_loss = []

for batch_idx, (data, target) in enumerate(trainloader):
    data, target = data.to(device), target.to(device)
    optimizer.zero_grad()
    output = model(data)
    _, preds = torch.max(output, 1)
    pre_ = target

    loss = criterion(output, target)
    acc = torch.sum(preds == pre_)

    loss.backward()
    optimizer.step()
    optimizer.zero_grad()

    train_loss.append(loss.data)
    train_acc.append(acc)

    epoch+=1
    if epoch == 1001:
        break
    elif epoch % 100 == 0:
        print('epoch: {}, Train loss:{:.6f}, Train acc:{:.6f}'.format(
            epoch, np.mean(train_loss) / batch_size,
                   np.mean(train_acc) / batch_size))

(八)模型评估

model.eval()
eval_loss = 0
eval_acc = 0
for data in testloader:
    img, label = data
    # img = img.view(img.size(0), -1)
    img = Variable(img)
    out = model(img)
    loss = criterion(out, label)
    eval_loss += loss.data.item()*label.size(0)
    _, pred = torch.max(out, 1)
    num_correct = (pred == label).sum()
    eval_acc += num_correct.item()

print('Test Loss: {:.6f}, Acc: {:.6f}'.format(
    eval_loss / (len(testset)),
    eval_acc / (len(testset))
))

三、结果在这里插入图片描述

四、报错解决在这里插入图片描述

  • 这是由于在下载MNIST数据时,由于网络问题, 下载不全。
  • 只需找到未下载完的数据集删去,建议直接复制下载地址在浏览器中下载,下载完成后将数据集复制到对应的mnist数据集文件下(MNIST-raw)
  • 1
    点赞
  • 7
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
好的,以下是一个简单的MNIST分类的PyTorch实现: ```python import torch import torch.nn as nn import torch.optim as optim from torchvision import datasets, transforms # 定义数据变换 transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ]) # 加载数据集 train_dataset = datasets.MNIST('./data', train=True, download=True, transform=transform) test_dataset = datasets.MNIST('./data', train=False, download=True, transform=transform) # 定义数据加载器 train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=64, shuffle=True) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1000, shuffle=False) # 定义模型 class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv = nn.Sequential( nn.Conv2d(1, 32, kernel_size=5), nn.ReLU(), nn.MaxPool2d(2), nn.Conv2d(32, 64, kernel_size=5), nn.ReLU(), nn.MaxPool2d(2) ) self.fc = nn.Sequential( nn.Linear(1024, 512), nn.ReLU(), nn.Dropout(0.5), nn.Linear(512, 10) ) def forward(self, x): x = self.conv(x) x = x.view(x.size(0), -1) x = self.fc(x) return x model = Net() # 定义损失函数和优化器 criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=0.01) # 训练模型 for epoch in range(10): running_loss = 0.0 for i, data in enumerate(train_loader, 0): inputs, labels = data optimizer.zero_grad() outputs = model(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() running_loss += loss.item() if i % 100 == 99: print('[%d, %5d] loss: %.3f' % (epoch+1, i+1, running_loss/100)) running_loss = 0.0 print('Finished Training') # 测试模型 correct = 0 total = 0 with torch.no_grad(): for data in test_loader: images, labels = data outputs = model(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy on the test set: %d %%' % (100 * correct / total)) ``` 在这个实现中,我们定义了一个包含两个卷积层和两个全连接层的神经网络。我们使用Adam优化器和交叉熵损失函数进行训练。训练完成后,我们在测试集上计算了模型的准确率。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值