1.神经网络模型训练一般步骤
# 1. 在model.py搭建神经网络
import torch
from torch import nn
class YourModelClass(nn.Module):
def __init__(self):
super(YourModelClass, self).__init__()
self.model = nn.Sequential(
# operations
)
def forward(self, x):
x = self.model(x)
return x
#检查神经网络设置是否出错
if __name__ =="__main__":
model = YourModelClass()
input = torch.ones((expected input))
output = model(input)
print(output.shape)
# 2. 创建train.py文件,在train.py文件中执行下述第3-21步操作
# 3. 创建训练数据集和测试数据集
from model import *
train_dataset = YourDatasetClass(train_data)
test_dataset = YourDatasetClass(test_data)
# 4. 获取数据集大小
train_data_size = len(train_dataset)
test_data_size = len(test_dataset)
# 5. 创建数据加载器
batch_size = 32 # 设置batch size
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
# 6. 创建网络模型实例
model = YourModelClass() # 实例化你的模型类
# 7. 选择损失函数
loss_fn = nn.CrossEntropyLoss() # 选择交叉熵损失函数(适用于分类问题)
# 8. 选择优化器和学习率
learning_rate = 0.001 # 设置学习率
optimizer = torch.optim.SGD(tudui.parameters(), lr=learning_rate) # 使用SGD优化器
# 9. 初始化训练和测试步数
total_train_step = 0
total_test_step = 0
# 10. 初始化轮次
epochs = 1000 # 设置总的训练轮次
# 11. 训练和测试循环
for epoch in range(epochs):
# 12. 开始训练步骤
model.train()
for i, (inputs, targets) in enumerate(train_dataloader):
total_train_step += 1
# 将梯度清零
optimizer.zero_grad()
# 前向传播
outputs = model(inputs)
# 计算损失
loss = loss_fn(outputs, targets)
# 反向传播
loss.backward()
# 更新模型参数
optimizer.step()
# 输出损失值
print(f"Epoch {epoch+1}, Train Step {total_train_step}, Loss: {loss.item()}")
# 13. 开始测试步骤
model.eval()
total_test_loss = 0.0
total_accuracy = 0.0
with torch.no_grad():
for i, (inputs, targets) in enumerate(test_dataloader):
total_test_step += 1
# 前向传播
outputs = model(inputs)
# 计算损失
test_loss = loss_fn(outputs, targets)
total_test_loss += test_loss.item()
accuracy = (outputs.argmax(1) == targets).sum()
total_accuracy += accuracy
# 计算平均测试损失
avg_test_loss = total_test_loss / len(test_dataloader)
print(f"Epoch {epoch+1}, Avg. Test Loss: {avg_test_loss}")
# 计算准确率
print(f"Verage accuracy:{total_accuracy/test_data_size}")
# 21. 保存模型
torch.save(model.state_dict(), f"model_epoch{epoch+1}.pth")
# 22. 结束训练
print("Training Finished.")
2.训练CIFAR 10 model 网络模型
首先需要创建神经网络并测试,保存在model文件中:
# 搭建神经网络
import torch
from torch import nn
class Tudui(nn.Module):
def __init__(self):
super(Tudui, self).__init__()
self.model = nn.Sequential(
nn.Conv2d(3, 32, 5, 1, 2),
nn.MaxPool2d(2),
nn.Conv2d(32, 32, 5, 1, 2),
nn.MaxPool2d(2),
nn.Conv2d(32, 64, 5, 1, 2),
nn.MaxPool2d(2),
nn.Flatten(),
nn.Linear(64*4*4, 64),
nn.Linear(64, 10)
)
def forward(self, x):
x = self.model(x)
return x
if __name__ =="__main__":
tudui = Tudui()
input = torch.ones((64, 3, 32, 32))
output = tudui(input)
print(output.shape)
随后创建train文件,根据步骤逐步完成训练代码框架:
import torchvision
import torch
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from model import * # 相当于把 model中的所有内容写到这里,这里直接把 model 写在这里
# 准备数据集
train_data = torchvision.datasets.CIFAR10("./dataset",train=True,transform=torchvision.transforms.ToTensor(),download=True)
test_data = torchvision.datasets.CIFAR10("./dataset",train=False,transform=torchvision.transforms.ToTensor(),download=True)
# length 长度
train_data_size = len(train_data)
test_data_size = len(test_data)
# 如果train_data_size=10,则打印:训练数据集的长度为:10
print("训练数据集的长度:{}".format(train_data_size))
print("测试数据集的长度:{}".format(test_data_size))
# 利用 Dataloader 来加载数据集
train_dataloader = DataLoader(train_data, batch_size=64)
test_dataloader = DataLoader(test_data, batch_size=64)
# 创建网络模型
tudui = Tudui()
# 损失函数
loss_fn = nn.CrossEntropyLoss() # 交叉熵,fn 是 fuction 的缩写
# 优化器
learning = 0.01 # 1e-2 就是 0.01 的意思
optimizer = torch.optim.SGD(tudui.parameters(),learning) # 随机梯度下降优化器
# 设置网络的一些参数
# 记录训练的次数
total_train_step = 0
# 记录测试的次数
total_test_step = 0
# 训练的轮次
epoch = 10
# 添加 tensorboard
writer = SummaryWriter("logs")
for i in range(epoch):
print("-----第 {} 轮训练开始-----".format(i+1))
# 训练步骤开始
tudui.train() # 当网络中有dropout层、batchnorm层时,这些层能起作用
for data in train_dataloader:
imgs, targets = data
outputs = tudui(imgs)
loss = loss_fn(outputs, targets) # 计算实际输出与目标输出的差距
# 优化器对模型调优
optimizer.zero_grad() # 梯度清零
loss.backward() # 反向传播,计算损失函数的梯度
optimizer.step() # 根据梯度,对网络的参数进行调优
total_train_step = total_train_step + 1
if total_train_step % 100 == 0:
print("训练次数:{},Loss:{}".format(total_train_step,loss.item())) # 方式二:获得loss值
writer.add_scalar("train_loss",loss.item(),total_train_step)
# 测试步骤开始(每一轮训练后都查看在测试数据集上的loss情况)
tudui.eval() # 当网络中有dropout层、batchnorm层时,这些层不能起作用
total_test_loss = 0
total_accuracy = 0
with torch.no_grad(): # 没有梯度了
for data in test_dataloader: # 测试数据集提取数据
imgs, targets = data
outputs = tudui(imgs)
loss = loss_fn(outputs, targets) # 仅data数据在网络模型上的损失
total_test_loss = total_test_loss + loss.item() # 所有loss
accuracy = (outputs.argmax(1) == targets).sum()
total_accuracy = total_accuracy + accuracy
print("整体测试集上的Loss:{}".format(total_test_loss))
print("整体测试集上的正确率:{}".format(total_accuracy/test_data_size))
writer.add_scalar("test_loss",total_test_loss,total_test_step)
writer.add_scalar("test_accuracy",total_accuracy/test_data_size,total_test_step)
total_test_step = total_test_step + 1
torch.save(tudui, "./model/tudui_{}.pth".format(i)) # 保存每一轮训练后的结果
#torch.save(tudui.state_dict(),"tudui_{}.path".format(i)) # 保存方式二
print("模型已保存")
writer.close()
最终在Tensorboard可观察到结果如下,通过查看输出结果结合tensorboard的损失值变化情况图,随着训练次数的增加,损失值在优化器的作用下呈现出逐渐减小的趋势:
3.模型验证
完整的模型验证(测试,demo)套路,利用已经训练好的模型,然后给它提供输入
![](https://img-blog.csdnimg.cn/9eefb33c78db4b68ac616766ef06f77b.jpeg)
import torchvision
from PIL import Image
from torch import nn
import torch
image_path = "imgs/dog.png"
image = Image.open(image_path) # PIL类型的Image
image = image.convert("RGB") # 4通道的RGBA转为3通道的RGB图片
print(image)
transform = torchvision.transforms.Compose([torchvision.transforms.Resize((32,32)),
torchvision.transforms.ToTensor()])
image = transform(image)
print(image.shape)
class Tudui(nn.Module):
def __init__(self):
super(Tudui, self).__init__()
self.model1 = nn.Sequential(
nn.Conv2d(3,32,5,1,2),
nn.MaxPool2d(2),
nn.Conv2d(32,32,5,1,2),
nn.MaxPool2d(2),
nn.Conv2d(32,64,5,1,2),
nn.MaxPool2d(2),
nn.Flatten(),
nn.Linear(64*4*4,64),
nn.Linear(64,10)
)
def forward(self, x):
x = self.model1(x)
return x
model = torch.load("model/tudui_29.pth",map_location=torch.device('cpu')) # GPU上训练的东西映射到CPU上
print(model)
image = torch.reshape(image,(1,3,32,32)) # 转为四维,符合网络输入需求
model.eval()
with torch.no_grad(): # 不进行梯度计算,减少内存计算
output = model(image)
output = model(image)
print(output)
print(output.argmax(1)) # 概率最大类别的输出
最终模型预测该图片最大概率为标签为5,即为dog,分类正确
<PIL.Image.Image image mode=RGB size=307x173 at 0x1A23417EDA0>
torch.Size([3, 32, 32])
Tudui(
(model1): Sequential(
(0): Conv2d(3, 32, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
(1): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(2): Conv2d(32, 32, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
(3): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(4): Conv2d(32, 64, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
(5): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(6): Flatten(start_dim=1, end_dim=-1)
(7): Linear(in_features=1024, out_features=64, bias=True)
(8): Linear(in_features=64, out_features=10, bias=True)
)
)
tensor([[-12.5589, -24.4361, -1.3480, 20.7813, 21.4084, 30.2107, -5.5689,
-3.5920, -13.8862, -4.2283]])
tensor([5])
4.补充
1、图像分类利用argmax计算准确性:
import torch
outputs = torch.tensor([[0.1,0.2],
[0.05,0.4]])
print(outputs.argmax(0)) # 竖着看,最大值的索引
print(outputs.argmax(1)) # 横着看,最大值的索引
preds = outputs.argmax(0)
targets = torch.tensor([0,1])
print((preds == targets).sum()) # 对应位置相等的个数
tensor([0, 1])
tensor([1, 1])
tensor(2)
2、 特殊层作用
① model.train()和model.eval()的区别主要在于Batch Normalization和Dropout两层。
② 如果模型中有BN层(Batch Normalization)和 Dropout,需要在训练时添加model.train()。model.train()是保证BN层能够用到每一批数据的均值和方差。对于Dropout,model.train()是随机取一部分网络连接来训练更新参数。
③ 不启用 Batch Normalization 和 Dropout。 如果模型中有BN层(Batch Normalization)和Dropout,在测试时添加model.eval()。model.eval()是保证BN层能够用全部训练数据的均值和方差,即测试过程中要保证BN层的均值和方差不变。对于Dropout,model.eval()是利用到了所有网络连接,即不进行随机舍弃神经元。
④ 训练完train样本后,生成的模型model要用来测试样本。在model(test)之前,需要加上model.eval(),否则的话,有输入数据,即使不训练,它也会改变权值。这是model中含有BN层和Dropout所带来的的性质。
⑤ 在做one classification的时候,训练集和测试集的样本分布是不一样的,尤其需要注意这一点。