VGG实现CIFAR10(PYTORCH)

VGG实现CIFAR10(PYTORCH)

import torch
import torch.nn as nn
from torch import optim
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision import datasets
from tqdm import tqdm

transform = transforms.Compose([
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor()
    ])

# 定义超参数
BATCH_SIZE = 128  # 批的大小
# CIFAR-10
train_dataset = datasets.CIFAR10('E:/Users/PycharmProjects/nlp/task/CIFAR10_try/CIFAR10', train=True, transform=transform, download=False)
train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=0, pin_memory=True)
test_dataset = datasets.CIFAR10('E:/Users/PycharmProjects/nlp/task/CIFAR10_try/CIFAR10', train=False, transform=transform, download=False)
test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=0, pin_memory=True)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')

# 定义网络模型
class VGG16(nn.Module):
    def __init__(self, num_classes=1000):
        super(VGG16, self).__init__()
        self.features = nn.Sequential(
            # 1
            nn.Conv2d(3, 64, kernel_size=3, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(True),
            # 2
            nn.Conv2d(64, 64, kernel_size=3, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(True),
            nn.MaxPool2d(kernel_size=2, stride=2),
            # 3
            nn.Conv2d(64, 128, kernel_size=3, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(True),
            # 4
            nn.Conv2d(128, 128, kernel_size=3, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(True),
            nn.MaxPool2d(kernel_size=2, stride=2),
            # 5
            nn.Conv2d(128, 256, kernel_size=3, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(True),
            # 6
            nn.Conv2d(256, 256, kernel_size=3, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(True),
            # 7
            nn.Conv2d(256, 256, kernel_size=3, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(True),
            nn.MaxPool2d(kernel_size=2, stride=2),
            # 8
            nn.Conv2d(256, 512, kernel_size=3, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(True),
            # 9
            nn.Conv2d(512, 512, kernel_size=3, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(True),
            # 10
            nn.Conv2d(512, 512, kernel_size=3, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(True),
            nn.MaxPool2d(kernel_size=2, stride=2),
            # 11
            nn.Conv2d(512, 512, kernel_size=3, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(True),
            # 12
            nn.Conv2d(512, 512, kernel_size=3, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(True),
            # 13
            nn.Conv2d(512, 512, kernel_size=3, padding=1),
            # nn.BatchNorm2d(512),
            nn.ReLU(True),
            nn.MaxPool2d(kernel_size=2, stride=2),
        )
        self.classifier = nn.Sequential(
            # 14
            nn.Linear(512, 4096),
            nn.ReLU(True),
            nn.Dropout(),
            # 15
            nn.Linear(4096, 4096),
            nn.ReLU(True),
            nn.Dropout(),
            # 16
            nn.Linear(4096, num_classes),
        )
        # self.classifier = nn.Linear(512, 10)

    def forward(self, x):
        out = self.features(x)
        out = out.view(out.size(0), -1)
        out = self.classifier(out)
        return out


# 创建模型
net = VGG16().to('cuda')
# 定义优化器和损失函数
criterion = nn.CrossEntropyLoss()  # 交叉式损失函数
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)  # 优化器


# 定义轮数
EPOCHS = 50

for epoch in range(EPOCHS):
    train_loss = 0.0
    for i, (datas, labels) in tqdm(enumerate(train_loader)):
        datas, labels = datas.to('cuda'), labels.to('cuda')
        # 梯度置零
        optimizer.zero_grad()
        # 训练
        outputs = net(datas)
        # 计算损失
        loss = criterion(outputs, labels)
        # 反向传播
        loss.backward()
        # 参数更新
        optimizer.step()
        # 累计损失
        train_loss += loss.item()
    print("Epoch : {} , Batch :{} , Loss : {:.3f}".format(epoch+1, i+1, train_loss/len(train_loader.dataset)))


# 保存模型
PATH = 'cifar_net.pth'
torch.save(net.state_dict(), PATH)

# 加载模型
model = net.to('cuda')
model.load_state_dict(torch.load(PATH))     # .load_state_dict() 加载模型

# 测试
correct = 0
total = 0
with torch.no_grad():
    for i, (datas, labels) in enumerate(test_loader):
        datas, labels = datas.to('cuda'), labels.to('cuda')
        # 输出
        outputs = model(datas)  # outputs.data.shape --> torch.Size([128, 10])
        _, predicted = torch.max(outputs.data, dim=1)   # 第一个是值的张量,第二个是序号的张量
        # 累计数据量
        total += labels.size(0)     # labels.size() --> torch.Size([128]), labels.size(0) --> 128
        # 比较有多少个预测正确
        correct += (predicted == labels).sum()  # 相同为1,不同为0,利用sum()求总和
    print('在10000张测试集图片上的准确率:{:.3f}'.format(correct / total * 100))

# 显示每一类预测的概率
class_correct = list(0. for i in range(10))
total = list(0. for i in range(10))
with torch.no_grad():
    for (images, labels) in test_loader:
        # 输出
        outputs = model(images)
        # 获取到每一行最大值的索引
        _, predicted = torch.max(outputs, dim=1)
        c = (predicted == labels).squeeze()     # squeeze() 去掉0维[默认], unsqueeze() 增加一维
        if labels.shape[0] == 128:
            for i in range(BATCH_SIZE):
                label = labels[i]   # 获取每一个label
                class_correct[label] += c[i].item()     # 累计True的个数,注意 1+True=2, 1+False=1
                total[label] += 1   # 该类总的个数

# 输出正确率
for i in range(10):
    print('正确率 : %5s : %2d %%' % (classes[i], 100 * class_correct[i] / total[i]))
  • 2
    点赞
  • 6
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值