GPU训练(方式一)
GPU训练主要有三部分,网络模型、数据(输入、标注)、损失函数,这三部分放到GPU上。
网络模型转移到cuda上
if torch.cuda.is_available():
tudui = tudui.cuda()
损失函数转移到cuda上
if torch.cuda.is_available():
loss_fn = loss_fn.cuda()
数据放到cuda上
imgs, targets = data
if torch.cuda.is_available():
imgs = imgs.cuda()
targets = targets.cuda()
GPU训练时间
引入time
import time
记录开始时间
start_time = time.time()
记录结束时间
end_time = time.time()
运行训练一百次后的时间间隔
print(end_time - start_time)
利用GPU训练(方式二)
电脑上有两个显卡时,可以用指定cuda:0、cuda:1
定义训练设备
cpu训练
device = torch.device("cpu")
训练方式一
device = torch.device("cuda")
训练方式二
device = torch.device("cuda:0")
选择设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
把设备赋值给tudui网络,也可以不赋值,直接 tudui.to(device)
tudui = tudui.to(device)
损失函数
loss_fn = loss_fn.to(device)
数据
imgs = imgs.to(device)
targets = targets.to(device)
整体代码
import torchvision
import torch
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import time
#device = torch.device("cpu")
#device = torch.device("cuda") # 使用 GPU 方式一
#device = torch.device("cuda:0") # 使用 GPU 方式二
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class Tudui(nn.Module):
def __init__(self):
super(Tudui, self).__init__()
self.model1 = nn.Sequential(
nn.Conv2d(3,32,5,1,2),
nn.MaxPool2d(2),
nn.Conv2d(32,32,5,1,2),
nn.MaxPool2d(2),
nn.Conv2d(32,64,5,1,2),
nn.MaxPool2d(2),
nn.Flatten(),
nn.Linear(64*4*4,64),
nn.Linear(64,10)
)
def forward(self, x):
x = self.model1(x)
return x
train_data = torchvision.datasets.CIFAR10("./dataset",train=True,transform=torchvision.transforms.ToTensor(),download=True)
test_data = torchvision.datasets.CIFAR10("./dataset",train=False,transform=torchvision.transforms.ToTensor(),download=True)
train_data_size = len(train_data)
test_data_size = len(test_data)
print("训练数据集的长度:{}".format(train_data_size))
print("测试数据集的长度:{}".format(test_data_size))
train_dataloader = DataLoader(train_data, batch_size=64)
test_dataloader = DataLoader(test_data, batch_size=64)
tudui = Tudui()
tudui = tudui.to(device)
#if torch.cuda.is_available():
# tudui = tudui.cuda()
loss_fn = nn.CrossEntropyLoss()
loss_fn = loss_fn.to(device)
#if torch.cuda.is_available():
# loss_fn = loss_fn.cuda()
learning = 0.01
optimizer = torch.optim.SGD(tudui.parameters(),learning)
total_train_step = 0
total_test_step = 0
epoch = 10
writer = SummaryWriter("logs")
start_time = time.time()
for i in range(epoch):
print("-----第 {} 轮训练开始-----".format(i+1))
tudui.train()
for data in train_dataloader:
imgs, targets = data
imgs = imgs.to(device)
targets = targets.to(device)
#if torch.cuda.is_available():
# imgs = imgs.cuda()
# targets = targets.cuda()
outputs = tudui(imgs)
loss = loss_fn(outputs, targets)
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_train_step = total_train_step + 1
if total_train_step % 100 == 0:
end_time = time.time()
print(end_time - start_time)
print("训练次数:{},Loss:{}".format(total_train_step,loss.item()))
writer.add_scalar("train_loss",loss.item(),total_train_step)
tudui.eval()
total_test_loss = 0
total_accuracy = 0
with torch.no_grad():
for data in test_dataloader:
imgs, targets = data
imgs = imgs.to(device)
targets = targets.to(device)
#if torch.cuda.is_available():
# imgs = imgs.cuda()
# targets = targets.cuda()
outputs = tudui(imgs)
loss = loss_fn(outputs, targets)
total_test_loss = total_test_loss + loss.item()
accuracy = (outputs.argmax(1) == targets).sum()
total_accuracy = total_accuracy + accuracy
print("整体测试集上的Loss:{}".format(total_test_loss))
print("整体测试集上的正确率:{}".format(total_accuracy/test_data_size))
writer.add_scalar("test_loss",total_test_loss,total_test_step)
writer.add_scalar("test_accuracy",total_accuracy/test_data_size,total_test_step)
total_test_step = total_test_step + 1
torch.save(tudui, "./model/tudui_{}.pth".format(i))
#torch.save(tudui.state_dict(),"tudui_{}.path".format(i))
print("模型已保存")
writer.close()
7102

被折叠的 条评论
为什么被折叠?



