1.GPU加速
方法1:
理论:一共有三种地方可以加速:网络模型、数据、损失函数,用.cuda()就可以调用GPU进行加速,更新后项目完整代码如下(新增处标黄):
import torch.cuda
rch
import torchvision
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
#导数
train_data=torchvision.datasets.CIFAR10(root="../data",train=True,transform=torchvision.transforms.ToTensor()) #此处..表示数据保存在代码文件上层的上层
test_data=torchvision.datasets.CIFAR10(root="../data",train=False,transform=torchvision.transforms.ToTensor())
train_data_size=len(train_data)
test_data_size=len(test_data)
print("训练数据集长度:{}".format(train_data_size))
print("测试数据集长度:{}".format(test_data_size))
#切片
train_dataloader=DataLoader(train_data,batch_size=64)
test_dataloader=DataLoader(test_data,batch_size=64)
#搭建网络
class Tudui(nn.Module): #缺少模块名字时可以直接右键import
def __init__(self):
super(Tudui,self).__init__()
self.model=nn.Sequential(#用一个sequenti构建网络,方便后续调用
nn.Conv2d(3,32,5,1,2),
nn.MaxPool2d(2),
nn.Conv2d(32,32,5,1,2),
nn.MaxPool2d(2),
nn.Conv2d(32,64,5,1,2),
nn.MaxPool2d(2),
nn.Flatten(),
nn.Linear(64*4*4,64),
nn.Linear(64,10)
)
def forward(self,x):
x=self.model(x)
return x
tudui = Tudui()
if torch.cuda.is_available(): #确保设备可用
tudui=tudui.cuda()
#损失函数
loss_fn=nn.CrossEntropyLoss()
if torch.cuda.is_available():
loss_fn=loss_fn.cuda()
#优化器
learning_rate=1e-2
optimizer=torch.optim.SGD(tudui.parameters(),lr=learning_rate)
#设置参数
total_train_step=0
total_test_step=0
epoch=2
#添加tensorboard
writer = SummaryWriter("../logs_train") #加了个画板
for i in range(epoch):
print("----------第{}轮训练开始-------".format(i+1))
for data in train_dataloader:
imgs,targets=data
if torch.cuda.is_available():
imgs=imgs.cuda()
targets=targets.cuda()
outputs=tudui(imgs)
loss=loss_fn(outputs,targets)
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_train_step=total_train_step+1
if total_train_step%100==0:
print("训练次数:{},loss:{}".format(total_train_step, loss.item()))#item作用是把tensor转为数字
writer.add_scalar("train_loss",loss.item(),total_train_step)#后两个是横纵坐标
total_test_loss=0
total_accuracy=0
with torch.no_grad():#因为是做测试,确保不会计算梯度
for data in test_dataloader:
imgs,targets =data
if torch.cuda.is_available():
imgs=imgs.cuda()
targets=targets.cuda()
outputs=tudui(imgs)
loss=loss_fn(outputs,targets)
total_test_loss=total_test_loss+loss.item()#计算累计测试误差
accyracy=(outputs.argmax(1)==targets).sum()
total_accuracy=total_accuracy+accyracy
print("整体测试集上的loss:{}".format(total_test_loss))
print("整体测试集上的正确率:{}".format(total_accuracy/test_data_size))
writer.add_scalar("test_loss",total_test_loss, total_test_step)
writer.add_scalar("test_accuracy", total_accuracy/test_data_size, total_test_step)
total_test_step=total_test_step+1
torch.save(tudui,"tudui_{}.pth".format(i))
writer.close()
方法2:
定义训练设备:device=torch.device("cuda"if torch.cuda is_available lese "cpu")
之后把标黄处改为如tudui=tudui.to(device)即可用gpu