Pytorch入门(三)训练一个分类器

训练一个图像分类器

依次按照下列顺序进行:

  • 使用torchvision加载和归一化CIFAR10训练集和测试集
  • 定义一个卷积神经网络
  • 定义损失函数
  • 在训练集上训练网络
  • 在测试集上测试网络

1. 读取和归一化 CIFAR10

#使用torchvision可以非常容易地加载CIFAR10。
import torch
import torchvision
import torchvision.transforms as transforms

#torchvision的输出是[0,1]的PILImage图像,我们把它转换为归一化范围为[-1, 1]的张量。
transform2 = transforms.Compose([  
    transforms.ToTensor(),  
    transforms.Normalize(mean = (0.5, 0.5, 0.5), std = (0.5, 0.5, 0.5))  
    ]  
)  
#torchvision.transforms是pytorch中的图像预处理包。一般用Compose把多个步骤整合到一起
#ToTensor()把灰度范围从(0,255)变换到(0,1),Normalize()把(0,1)变换到(-1,1)
#Normalize:Normalized an tensor image with mean and standard deviation
#Normalize使用如下公式进行归一化:channel=(channel-mean)/std

trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
                                        download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,
                                          shuffle=True, num_workers=2)

testset = torchvision.datasets.CIFAR10(root='./data', train=False,
                                       download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=4,
                                         shuffle=False, num_workers=2)

classes = ('plane', 'car', 'bird', 'cat',
           'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
import matplotlib.pyplot as plt
import numpy as np

# 展示图像的函数
def imshow(img):
    img = img / 2 + 0.5     # unnormalize
    npimg = img.numpy()
    plt.imshow(np.transpose(npimg, (1, 2, 0)))

# 获取随机数据
dataiter = iter(trainloader)
#trainloader本质上是一个可迭代对象,可以使用iter()进行访问,采用iter(dataloader)返回的是一个迭代器,然后可以使用next()访问。
images, labels = dataiter.next()

# 展示图像
imshow(torchvision.utils.make_grid(images))
#torchvision.utils.make_grid()将一组图片绘制到一个窗口,其本质是将一组图片拼接成一张图片
# 显示图像标签
print(' '.join('%5s' % classes[labels[j]] for j in range(4)))

2. 定义一个卷积神经网络

import torch.nn as nn
import torch.nn.functional as F


class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(3, 6, 5)
        self.pool = nn.MaxPool2d(2, 2)
        self.conv2 = nn.Conv2d(6, 16, 5)
        self.fc1 = nn.Linear(16 * 5 * 5, 120)
        self.fc2 = nn.Linear(120, 84)
        self.fc3 = nn.Linear(84, 10)

    def forward(self, x):
        x = self.pool(F.relu(self.conv1(x)))
        x = self.pool(F.relu(self.conv2(x)))
        x = x.view(-1, 16 * 5 * 5)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x


net = Net()

3. 定义损失函数和优化器

#我们使用交叉熵作为损失函数,使用带动量的随机梯度下降。
import torch.optim as optim
criterion = nn.CrossEntropyLoss() #交叉熵
optimizer = optim.SGD(net.parameters(),lr=0.01, momentum=0.9) #梯度下降方法

4. 训练网路

#训练网络
for epoch in range(2):
    running_loss = 0.0
    for i, data in enumerate(trainloader,0):
        #获取输入
        inputs, labels = data
        #梯度置零
        optimizer.zero_grad()
        #正向传播,反向传播,优化
        outputs = net(inputs) #调用forward方法
        loss = criterion(outputs,labels) #计算损失
        loss.backward()#反向传播
        optimizer.step()#参数优化
        #打印状态信息
        running_loss += loss.item()
        if i % 2000 == 1999: #每2000批次打印一次
            print('[%d,%5d] loss: %.3f' % (epoch +1, i+1, running_loss/2000))
            running_loss=0.0
print('Finished Training')
        

5. 在测试集上测试网络

dataiter = iter(testloader)
images, labels = dataiter.next()

# 显示图片,在四张图片上真实结果
imshow(torchvision.utils.make_grid(images))
print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(4)))

#在四张图片上的模型训练结果,
outputs = net(images)
_, predicted = torch.max(outputs, 1)

print('Predicted: ', ' '.join('%5s' % classes[predicted[j]]
                              for j in range(4)))
#整个测试集上的结果
correct = 0
total = 0
with torch.no_grad():
    for data in testloader:
        images, labels = data
        outputs = net(images)
        _, predicted = torch.max(outputs.data, 1)
        total += labels.size(0)
        correct += (predicted == labels).sum().item()

print('Accuracy of the network on the 10000 test images: %d %%' % (
    100 * correct / total))

#每个类别的正确率
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
with torch.no_grad():
    for data in testloader:
        images, labels = data
        outputs = net(images)
        _, predicted = torch.max(outputs, 1)
        c = (predicted == labels).squeeze()
        for i in range(4):
            label = labels[i]
            class_correct[label] += c[i].item()
            class_total[label] += 1


for i in range(10):
    print('Accuracy of %5s : %2d %%' % (
        classes[i], 100 * class_correct[i] / class_total[i]))
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
你好,我是C知道。关于你的问题,我可以回答。以下是使用PyTorch框架编写LeNet分类网络的代码: ```python import torch import torch.nn as nn import torch.optim as optim import torchvision.transforms as transforms import torchvision.datasets as datasets # 定义LeNet网络结构 class LeNet(nn.Module): def __init__(self): super(LeNet, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool1 = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.pool2 = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, x): x = self.pool1(torch.relu(self.conv1(x))) x = self.pool2(torch.relu(self.conv2(x))) x = x.view(-1, 16 * 5 * 5) x = torch.relu(self.fc1(x)) x = torch.relu(self.fc2(x)) x = self.fc3(x) return x # 加载数据集 train_dataset = datasets.CIFAR10(root='./data', train=True, download=True, transform=transforms.ToTensor()) test_dataset = datasets.CIFAR10(root='./data', train=False, download=True, transform=transforms.ToTensor()) # 定义训练参数 batch_size = 64 learning_rate = .001 num_epochs = 10 # 定义数据加载器 train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False) # 定义模型、损失函数和优化器 model = LeNet() criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=learning_rate) # 训练模型 for epoch in range(num_epochs): for i, (images, labels) in enumerate(train_loader): optimizer.zero_grad() outputs = model(images) loss = criterion(outputs, labels) loss.backward() optimizer.step() if (i+1) % 100 == : print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, i+1, len(train_loader), loss.item())) # 测试模型 model.eval() with torch.no_grad(): correct = total = for images, labels in test_loader: outputs = model(images) _, predicted = torch.max(outputs.data, 1) total += labels.size() correct += (predicted == labels).sum().item() print('Test Accuracy of the model on the 10000 test images: {} %'.format(100 * correct / total)) ``` 希望这个回答能够帮助到你。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值