Pytorch训练LeNet模型MNIST数据集

如何用torch框架训练深度学习模型(详解)

0. 需要的包

import torch
from torch.nn import CrossEntropyLoss
from torch.optim import SGD
from torch.utils.data import DataLoader
from torchvision import datasets, transforms

1. 数据加载和导入

以MNIST数据集为例

# 1.1 需要设置数据归一化
train_transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.1307,), (0.3081,))])
test_transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.1307,), (0.3081,))])
# 1.2 用dataset.MNIST函数下载和加载训练集与测试集 
train_dataset = datasets.MNIST(dataset_path, train=True, 
	download=False, transform=train_transform)
test_dataset = datasets.MNIST(dataset_path, train=False, 
	download=False, transform=test_transform)
# 1.3 加载进dataload用于后续数据按batch取用
batch_size = 256
train_loader = DataLoader(train_dataset, batch_size=batch_size)
test_loader = DataLoader(test_dataset, batch_size=batch_size)

补充:这里的transform根据不同的数据集选择不同的值
datasets加载数据集时path的路径为:'./data/' 该目录下包括/MNIST文件夹

2. 加载模型和设置超参数

# 2.1 这里需要提前定义model的class,包括层结构和forward函数
model = LeNet_Mnist().to(device)
# 2.2 设置优化器、损失函数、训练轮次
learning_rate = 1e-2
# 传入模型参数,用于优化更新
sgd = SGD(model.parameters(), lr=learning_rate)  
loss_fn = CrossEntropyLoss()
all_epoch = 20

3. 训练

# 3.1 首先设置训练模式
model.train()
# 3.2 按照batch从train_loader中批量选择数据
for idx, (train_x, train_label) in enumerate(train_loader):
    train_x = train_x.to(device)
    train_label = train_label.to(device)
    sgd.zero_grad()
    predict_y = model(train_x.float())
    loss = loss_fn(predict_y, train_label.long())
    loss.backward()
    sgd.step()

补充:可以在外面再套一层迭代次数

for current_epoch in range(all_epoch):  # local training

4. 测试

# 4.1 记录测试结果
all_correct_num = 0
all_sample_num = 0
# 4.2 进入模型验证模式,该模式下不会修改梯度
model.eval()
# 4.3 按批次测试
for idx, (test_x, test_label) in enumerate(test_loader):
    test_x = test_x.to(device)
    test_label = test_label.to(device)
    predict_y = model(test_x.float()).detach()
    predict_y = torch.argmax(predict_y, dim=-1)
    current_correct_num = predict_y == test_label
    all_correct_num += np.sum(current_correct_num.to('cpu').numpy(), axis=-1)
    all_sample_num += current_correct_num.shape[0]
# 4.4 记录结果并输出
acc = all_correct_num / all_sample_num
print('accuracy: {:.3f}'.format(acc), flush=True)

5. 保存结果

# 5.1 保存参数
print("Save the model state dict")
torch.save(model.state_dict(), "./lenet_mnist.pt")
# 5.2 或者也可以选择保存checkpoint,每轮都保存一次,万一中断能继续
checkpoint = {
                "model": model.state_dict(),
                "optim": sgd.state_dict(),
             }
print("Save the checkpoint")
torch.save(checkpoint, "./checkpoint{}.pt".format(current_epoch))

6. 导入模型

model.load_state_dict(torch.load(resume_path))

resume_path为保存模型的地址

  • 3
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
以下是用PyTorch实现LeNet模型训练Mnist数据集的示例代码: ```python import torch import torch.nn as nn import torch.optim as optim import torchvision.datasets as dsets import torchvision.transforms as transforms # 定义LeNet模型 class LeNet(nn.Module): def __init__(self): super(LeNet, self).__init__() self.conv1 = nn.Conv2d(1, 6, 5) self.pool1 = nn.MaxPool2d(kernel_size=2) self.conv2 = nn.Conv2d(6, 16, 5) self.pool2 = nn.MaxPool2d(kernel_size=2) self.fc1 = nn.Linear(16*4*4, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, x): x = self.pool1(torch.relu(self.conv1(x))) x = self.pool2(torch.relu(self.conv2(x))) x = x.view(-1, 16*4*4) x = torch.relu(self.fc1(x)) x = torch.relu(self.fc2(x)) x = self.fc3(x) return x # 设置超参数 batch_size = 100 learning_rate = 0.001 num_epochs = 5 # 加载Mnist数据集 train_dataset = dsets.MNIST(root='./data', train=True, transform=transforms.ToTensor(), download=True) test_dataset = dsets.MNIST(root='./data', train=False, transform=transforms.ToTensor(), download=True) # 创建数据加载器 train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False) # 创建LeNet模型和优化器 model = LeNet() optimizer = optim.Adam(model.parameters(), lr=learning_rate) # 训练模型 total_step = len(train_loader) for epoch in range(num_epochs): for i, (images, labels) in enumerate(train_loader): images = images labels = labels # 前向传播 outputs = model(images) loss = nn.CrossEntropyLoss()(outputs, labels) # 反向传播和优化 optimizer.zero_grad() loss.backward() optimizer.step() # 输出训练状态 if (i+1) % 100 == 0: print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}' .format(epoch+1, num_epochs, i+1, total_step, loss.item())) # 测试模型 model.eval() with torch.no_grad(): correct = 0 total = 0 for images, labels in test_loader: images = images labels = labels outputs = model(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print('Test Accuracy of the model on the 10000 test images: {} %'.format(100 * correct / total)) ``` 这个代码可以在Mnist数据集训练LeNet模型,并在测试集上计算模型的准确率。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值