Pytorch:以CIFAR-10分类为例,给出了神经网络的训练流程

下面给出了神经网络的训练流程,包括数据加载与预处理、网络定义、损失函数和优化器定义、网络训练和网络测试。

import torch as t
import torchvision as tv
import torchvision.transforms as transforms
from torchvision.transforms import ToPILImage
import torch.nn as nn
import torch.nn.functional as F
from torch import optim, multiprocessing

# 显示图像
show = ToPILImage()  # 把Tensor转换为Image

# 定义数据预处理
transform = transforms.Compose([
    transforms.ToTensor(),  # 转换为Tensor
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))  # 归一化
])

# 1.加载CIFAR-10数据集
trainset = tv.datasets.CIFAR10(root='./pytorch-book-cifar10/', train=True, download=False, transform=transform)
testset = tv.datasets.CIFAR10(root='./pytorch-book-cifar10/', train=False, download=False, transform=transform)

# 类别标签
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')


# 2.定义卷积神经网络
class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(3, 6, 5)  # 输入3通道图像,输出6通道
        self.conv2 = nn.Conv2d(6, 16, 5)  # 输出16通道
        self.fc1 = nn.Linear(16 * 5 * 5, 120)
        self.fc2 = nn.Linear(120, 84)
        self.fc3 = nn.Linear(84, 10)  # 10分类

    def forward(self, x):
        x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
        x = F.max_pool2d(F.relu(self.conv2(x)), 2)
        x = x.view(-1, 16 * 5 * 5)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x


net = Net()

# 3.定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)


# 4.训练模型
def train_model(trainloader):
    for epoch in range(2):
        running_loss = 0.0
        for i, data in enumerate(trainloader, 0):
            inputs, labels = data
            optimizer.zero_grad()
            outputs = net(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

            # 打印训练状态
            running_loss += loss.item()
            if i % 2000 == 1999:
                print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 2000))
                running_loss = 0.0
    print('Finished Training')


if __name__ == '__main__':
    multiprocessing.freeze_support()  # 兼容Windows多进程

    # 数据加载器
    trainloader = t.utils.data.DataLoader(trainset, batch_size=4, shuffle=True, num_workers=2)
    testloader = t.utils.data.DataLoader(testset, batch_size=4, shuffle=False, num_workers=2)

    # 训练模型
    train_model(trainloader)

    # 展示训练集中的某一图像
    (data, label) = trainset[100]
    print("类别标签:", classes[label])
    show((data + 1) / 2).resize((100, 100))

    # 5.测试网络
    dataiter = iter(testloader)
    images, labels = dataiter.__next__()
    print("实际标签:", ' '.join('%5s' % classes[labels[j]] for j in range(4)))
    show(tv.utils.make_grid((images + 1) / 2)).resize((400, 100))

    outputs = net(images)
    _, predicted = t.max(outputs, 1)
    print("预测结果:", ' '.join('%5s' % classes[predicted[j]] for j in range(4)))

    # 在测试集上计算准确率
    correct = 0
    total = 0
    with t.no_grad():
        for data in testloader:
            images, labels = data
            outputs = net(images)
            _, predicted = t.max(outputs, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
    print('测试集上的准确率: %d %%' % (100 * correct / total))

输出:

[1,  2000] loss: 2.158
[1,  4000] loss: 1.832
[1,  6000] loss: 1.685
[1,  8000] loss: 1.582
[1, 10000] loss: 1.533
[1, 12000] loss: 1.501
[2,  2000] loss: 1.421
[2,  4000] loss: 1.361
[2,  6000] loss: 1.340
[2,  8000] loss: 1.339
[2, 10000] loss: 1.332
[2, 12000] loss: 1.264
Finished Training
类别标签: ship
实际标签:   cat  ship  ship plane
预测结果:   dog  ship  ship  ship
测试集上的准确率: 53 %

进程已结束,退出代码为 0

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值