argmaxs
输入两个数 分别为[[0.1][0.2]] 和[0.3][0.4]
根据热编码的规则为【0,1】
argmaxs(0)表示从上到下取最大的值
argmaxs(1) 表示从左到右取最大的值
假设是argmaxs(1)取的第一个为0.2,在热编码里是属于1的位置,所以取的就是1
图中的preds=[1,1]
如果我们拿targets进行对比,preds==targets.sum()得出来的就会是[false,true]
这个在分类任务里常用。
模型编写
TensorBoard和网络框架整合出一个基础的模型训练步骤
model1.py
import torch
from torch import nn
# from torch.nn import Sequential, Conv2d, MaxPool2d, Flatten, Linear
class Test(nn.Module):
def __init__(self):
super(Test, self).__init__()
self.module1 = nn.Sequential(
nn.Conv2d(3, 32, 5, 1, 2),
nn.MaxPool2d(2),
nn.Conv2d(32, 32, 5, 1, 2),
nn.MaxPool2d(2),
nn.Conv2d(32, 64, 5, 1, 2),
nn.MaxPool2d(2),
nn.Flatten(),
nn.Linear(64*4*4, 64),
nn.Linear(64, 10)
)
def forward(self,x):
x = self.module1(x)
return x
if __name__ == '__main__':
test = Test()
input = torch.ones((64,3,32,32))
output = test(input)
print(output.shape)
train1.py
import torchvision.datasets
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from model1 import *
train_data = torchvision.datasets.CIFAR10(root="./dataset",train=True,transform=torchvision.transforms.ToTensor(),download=True)
test_data = torchvision.datasets.CIFAR10(root="./dataset",train=False,transform=torchvision.transforms.ToTensor(),download=True)
writer = SummaryWriter("logs")
#查看数据长度
train_data_size =len(train_data)
test_data_size = len(test_data)
print("训练数据集的长度为:{}".format(train_data_size))
print("测试数据集的长度为:{}".format(test_data_size))
#利用DataLoader 来加载数据集
train_dataloader = DataLoader(train_data,batch_size=64)
test_dataloader = DataLoader(test_data,batch_size=64)
#创建网络模型
test = Test()
#损失函数
loss_fn = nn.CrossEntropyLoss()
#优化器
#1e-2 = 1*(10)^(-2) = 1/100 = 0.01
learning_rate = 1e-2
optimizer= torch.optim.SGD(test.parameters(),lr=0.01)
#设置训练网络参数
#训练次数
total_train_step = 0
total_test_step = 0
epoch = 10
for i in range(epoch):
print("-----第{}轮训练开始-----".format(i+1))
for data in train_dataloader:
imgs,targets = data
outputs = test(imgs)
loss = loss_fn(outputs,targets)
#优化器优化模型
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_train_step = total_train_step + 1
if total_train_step % 100 == 0:
print("训练次数:{},Loss{}".format(total_train_step,loss))
writer.add_scalar("train_loss",loss.item(),total_train_step)
#测试步骤开始 为了查看模型是否能达到需求
total_test_loss = 0
total_accuracy = 0
with torch.no_grad():
for data in test_dataloader:
imgs,targets = data
outputs = test(imgs)
loss =loss_fn(outputs,targets)
total_test_loss = total_test_loss + loss.item()
accuracy = (outputs.argmax(1) == targets).sum()
total_accuracy = total_accuracy +accuracy
print("整体测试集上的Loss:{}".format(total_test_loss))
print("整个测试集上的正确率:{}".format(total_accuracy/test_data_size))
writer.add_scalar("test_loss",total_test_loss,total_test_step)
total_test_step = total_test_step +1
#这样可以保存每一次运行的模型结果
torch.save(test,"test_{}.pth".format(i))
print("模型已保存")
writer.close()
TensorBoard
在控制台输入:tensorboard --logdir="logs" 点击网址