
以CIFAR10数据集为例
搭建神经网络,作为一个模块
model.py
import torch
from torch import nn
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.model = nn.Sequential(
# 进行卷积和池化
nn.Conv2d(3, 32, 5, 1, 2),
nn.MaxPool2d(2),
nn.Conv2d(32, 32, 5, 1, 2),
nn.MaxPool2d(2),
nn.Conv2d(32, 64, 5, 1, 2),
nn.MaxPool2d(2),
# 展平
nn.Flatten(),
# 全连接层
nn.Linear(64 * 4 * 4, 64),
nn.Linear(64, 10)
)
def forward(self, x):
x = self.model(x)
return x
if __name__ == '__main__':
net = Net()
input = torch.ones((64, 3, 32, 32))
output = net(input)
print(output.shape)
train.py
import torch
import torchvision
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from model1 import *
train_data = torchvision.datasets.CIFAR10(root="data", train=True, transform=torchvision.transforms.ToTensor(),
download=True)
test_data = torchvision.datasets.CIFAR10(root="data", train=False, transform=torchvision.transforms.ToTensor(),
download=True)
# length长度
train_data_size = len(train_data)
test_data_size = len(test_data)
# 如果train_data_size=10,训练数据集的长度为:10
print(f"训练数据集的长度为:{train_data_size}")
print(f"训练数据集的长度为:{test_data_size}")
# 利用DataLoader来加载数据集
train_dataloader = DataLoader(train_data, batch_size=64)
test_dataloader = DataLoader(test_data, batch_size=64)
# 创建网络模型
net = Net()
# 损失函数
loss_fn = nn.CrossEntropyLoss()
# 优化器
# learning_rate = 0.01
# 1e-2=1 x (10)^(-2) = 1/100 = 0.01
learning_rate = 1e-2
optimizer = torch.optim.SGD(net.parameters(), lr=learning_rate)
# 设置训练网络的一些参数
# 记录训练的次数
total_train_step = 0
# 记录测试的次数
total_test_step = 0
# 训练的轮次
epoch = 10
# 添加TensorBoard
writer = SummaryWriter("logs_train")
for i in range(epoch):
print(f"------第{i+1}轮训练开始------")
# 训练步骤开始
# 把网络设置为训练模式
net.train()
for data in train_dataloader:
imgs, targets = data
outputs = net(imgs)
#计算损失值
loss = loss_fn(outputs, targets)
#梯度清零
optimizer.zero_grad()
#反向传播
loss.backward()
optimizer.step()
total_train_step = total_train_step + 1
# 每训练100次再输出
if total_train_step % 100 == 0:
print(f"训练次数:{total_train_step},Loss:{loss}")
writer.add_scalar("train_loss", loss.item(), total_train_step)
# 测试步骤开始
total_test_loss = 0
total_accuracy = 0
with torch.no_grad():
for data in test_dataloader:
imgs, targets = data
outputs = net(imgs)
loss = loss_fn(outputs, targets)
total_test_loss = total_test_loss + loss.item()
accuracy = (outputs.argmax(1) == targets).sum()
total_accuracy = total_accuracy + accuracy
print(f"整体测试集上的Loss:{total_test_loss}")
print(f"整体测试集上的正确率:{total_accuracy/test_data_size}")
writer.add_scalar("test_loss", total_test_loss, total_test_step)
writer.add_scalar("test_accuracy", total_accuracy/test_data_size, total_test_step)
total_test_step += 1
#每轮训练结束保存模型
torch.save(net, f"net_{i}.pth")
print("模型已保存")
writer.close()

TensorBoard可视化结果


在训练和测试步骤开始时,需要分别使用train()函数和eval()将模型调为训练和测试模式,但不输入这两个代码,我们的模型仍可进行训练和测试
为啥捏?
在官方文档中有详细说明

其对一些网络层对有作用,比如Dropout层、BatchNorm层,这时就需要调用,
因为我们的模型中并不存在这两个类型的网络层,所以不调用也可以正常运行
套路可简单总结为:
创建Dataset-->DataLoader加载数据-->设置模型的层数-->定义损失函数-->定义优化器-->设置网络参数-->开始训练-->验证模型-->可视化结果-->保存模型
1285

被折叠的 条评论
为什么被折叠?



