CNN for CIFAR10 in Pytorch

CNN for CIFAR10 in Pytorch

Code Structure
  1. 分为两个源代码文件,Neural_Network_Model.py中定义网络模型,CNN_for_CIFAR10.py中定义训练和测试过程。
  2. Neural_Network_Model.py中定义网络模型的网络模型如下:
    在这里插入图片描述
  3. CNN_for_CIFAR10.py中定义训练和测试过程如下:
    • batchsize = 64,每64张图片计算一次梯度并更新一次参数,每更新100次参数输出一次在当前训练batch上的Loss。
    • 每一个训练epoch过后,将模型在测试数据集计算Loss和accuracy_rate。
    • 上述计算结果用tensorboard展示。
    • 每10个epoch保存一次模型。
  4. 在工作目录下,创建Tensorboard的工作目录(代码中是logs),存储event file,在Terminal中使用*tensorboard --logdir=‘logs’*在浏览器中查看Loss、accuracy_rate的可视化展示。
  5. 在工作目录下,创建用于保存数据集的文件夹(代码中是dataset),CIFAR10数据集无需单独下载。
  6. pytorch配置了cuda就使用GPU训练,默认使用CPU训练。
Neural_Network_Model.py
from torch import nn
import torch

class Neural_Network(nn.Module):
    def __init__(self):
        super(Neural_Network, self).__init__()
        self.model = nn.Sequential(
            nn.Conv2d(3, 32, 5, 1, 2),
            nn.MaxPool2d(2),
            nn.Conv2d(32, 32, 5, 1, 2),
            nn.MaxPool2d(2),
            nn.Conv2d(32, 64, 5, 1, 2),
            nn.MaxPool2d(2),
            nn.Flatten(),
            nn.Linear(64 * 4 * 4, 64),
            nn.Linear(64, 10)
        )
    def forward(self, x):
        x = self.model(x)
        return x

# 测试
if __name__ == '__main__':
    neural_network = Neural_Network()
    input = torch.ones((64, 3, 32, 32))
    output = neural_network(input)
    print(output.shape)

CNN_for_CIFAR10.py
import torch.optim
import torchvision
from torch.utils.data import DataLoader
from torch import nn
from Neural_Network_model import *
from torch.utils.tensorboard import SummaryWriter

# 定义训练设备
#device = torch.device("cpu")
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)

# 准备数据集
train_data = torchvision.datasets.CIFAR10(root="./dataset", train=True, download=True,
                                          transform=torchvision.transforms.ToTensor())
test_data = torchvision.datasets.CIFAR10(root="./dataset", train=False, download=True,
                                          transform=torchvision.transforms.ToTensor())

# length
train_data_size = len(train_data)
test_data_size = len(test_data)
# 如果train_data_size = 10,训练数据集的长度为:10
print("训练数据集的长度为:{}".format(train_data_size))
print("测试数据集的长度为:{}".format(test_data_size))

# 利用DataLoader加载数据集
train_dataloader = DataLoader(train_data, batch_size=64)
test_dataloader = DataLoader(test_data, batch_size=64)

# 搭建神经网络,放到一个单独的文件 Neural_Network_model.py

# 创建网络模型
neural_network = Neural_Network()
# 利用GPU训练
neural_network = neural_network.to(device)

# 创建损失函数
loss_function = nn.CrossEntropyLoss()
# 利用GPU训练
loss_function = loss_function.to(device)

# 优化器
# learning_rate = 0.01
learning_rate = 1e-2
optimizer = torch.optim.SGD(neural_network.parameters(),lr=learning_rate)

# 设置训练网络的一些参数
total_train_step = 0     # 记录训练的次数
total_test_step = 0      # 记录测试的次数
epoch = 50               # 记录训练的轮数

# 添加tensorboard
writer = SummaryWriter("logs")

for i in range(epoch):
    print("-----------第 {} 轮训练开始----------".format(i+1))

    # 训练步骤开始
    neural_network.train()
    for data in train_dataloader:
        imgs, targets = data
        # 利用GPU训练
        imgs = imgs.to(device)
        targets = targets.to(device)

        outputs = neural_network(imgs)
        loss = loss_function(outputs, targets)
        # 优化器优化模型
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        total_train_step = total_train_step + 1
        if total_train_step % 100 == 0:
            print("训练次数:{},loss:{}".format(total_train_step, loss.item()))
            writer.add_scalar("train_loss", loss.item(), total_train_step)
    # 测试步骤开始
    neural_network.eval()
    total_test_loss = 0
    total_accuracy = 0
    with torch.no_grad():
        for data in test_dataloader:
            imgs, targets = data
            # 利用GPU训练
            imgs = imgs.to(device)
            targets = targets.to(device)

            outputs = neural_network(imgs)
            print_output_test = outputs
            loss = loss_function(outputs, targets)
            total_test_loss = total_test_loss + loss.item()
            accuracy = (outputs.argmax(1) == targets).sum()
            total_accuracy = total_accuracy + accuracy
    print("整体测试集上的loss:{}".format(total_test_loss))
    print("整体测试集上的accuracy_rate:{}".format(total_accuracy / test_data_size))
    writer.add_scalar("test_loss", total_test_loss, total_test_step)
    writer.add_scalar("test_accuracy_rate", total_accuracy / test_data_size, total_test_step)
    total_test_step = total_test_step + 1
    if(i == 10):
        torch.save(neural_network, "Neural_Network_for_CIFAR10{}.pth".format(i))
#       torch.save(neural_network.state_dict(), "Neural_Network_{}.pth".format(i))
        print("模型已保存")

writer.close()
  • 1
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
以下是使用PyTorch训练CIFAR10数据集的CNN代码: ``` import torch import torch.nn as nn import torch.optim as optim import torchvision import torchvision.transforms as transforms # 定义设备 device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # 定义预处理和数据增强 transform_train = transforms.Compose([ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) transform_test = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) # 加载数据集 trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train) trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2) testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test) testloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False, num_workers=2) # 定义网络结构 class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(3, 64, 3, padding=1) self.conv2 = nn.Conv2d(64, 128, 3, padding=1) self.conv3 = nn.Conv2d(128, 256, 3, padding=1) self.fc1 = nn.Linear(256 * 4 * 4, 512) self.fc2 = nn.Linear(512, 10) def forward(self, x): x = nn.functional.relu(self.conv1(x)) x = nn.functional.max_pool2d(x, 2) x = nn.functional.relu(self.conv2(x)) x = nn.functional.max_pool2d(x, 2) x = nn.functional.relu(self.conv3(x)) x = nn.functional.max_pool2d(x, 2) x = x.view(-1, 256 * 4 * 4) x = nn.functional.relu(self.fc1(x)) x = self.fc2(x) return x net = Net().to(device) # 定义损失函数和优化器 criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9) # 训练网络 for epoch in range(10): # 多次迭代数据集 running_loss = 0.0 for i, data in enumerate(trainloader, 0): # 获取输入 inputs, labels = data inputs, labels = inputs.to(device), labels.to(device) # 清零梯度 optimizer.zero_grad() # 正向传播,反向传播和优化 outputs = net(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() # 打印统计信息 running_loss += loss.item() if i % 100 == 99: # 每 100 个小批量打印一次 print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 100)) running_loss = 0.0 print('Finished Training') # 在测试集上评估网络 correct = 0 total = 0 with torch.no_grad(): for data in testloader: inputs, labels = data inputs, labels = inputs.to(device), labels.to(device) outputs = net(inputs) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy of the network on the 10000 test images: %d %%' % ( 100 * correct / total)) ``` 该代码定义了一个3层卷积神经网络,使用交叉熵损失函数和随机梯度下降优化器进行训练。在每个epoch中,代码打印每100个小批量的损失,并在训练结束后在测试集上评估模型的准确性。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值