有两个方法可以使用GPU训练
cuda方法
torch给我们提供了cuda的方法,可以将训练的模型加载到显卡上
可以在GPU上训练的数据有:
1.网络模型
2.数据
3.损失函数
源代码:
import torch
import torchvision.datasets
import torch.nn as nn
from torch.nn import Sequential, Conv2d, MaxPool2d, Flatten, Linear
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import time
class Tudui(nn.Module):
def __init__(self):
super(Tudui, self).__init__()
self.model1 = Sequential(Conv2d(3,32,5,padding=2)
, MaxPool2d(2)
, Conv2d(32,32,5,padding=2)
, MaxPool2d(2)
, Conv2d(32,64,5,padding=2)
, MaxPool2d(2)
, Flatten()
, Linear(1024,64)
, Linear(64,10))
def forward(self,x):
x = self.model1(x)
return x
train_data = torchvision.datasets.CIFAR10(root="./dataset2", train=True, transform=torchvision.transforms.ToTensor(),download=True)
test_data = torchvision.datasets.CIFAR10(root="./dataset2", train=False, transform=torchvision.transforms.ToTensor(),download=True)
#length 长度
train_data_size = len(train_data)
test_data_size = len(test_data)
print("训练数据集的长度为:{}".format(train_data_size))
print("测试数据集的长度为:{}".format(test_data_size))
#利用DataLoader加载数据集
train_data = DataLoader(train_data,batch_size=64)
test_data = DataLoader(test_data,batch_size=64)
start_time = time.time()
#创建网络模型
tudui = Tudui()
tudui = tudui.cuda()
#损失函数
loss_fn = nn.CrossEntropyLoss()
loss_fn = loss_fn.cuda()
#优化器
learning_rate = 1e-2
optimizer = torch.optim.SGD(tudui.parameters(),lr=learning_rate)
#设置训练网络的一些参数
#记录训练的次数
total_train_step = 0
#记录测试的次数
total_test_step = 0
#训练轮数
epoch = 10
writer = SummaryWriter("./logs")
for i in range(epoch):
print("第{}轮训练开始".format(i+1))
#训练步骤开始
tudui.train()
for data in train_data:
img,targets = data
img = img.cuda()
targets = targets.cuda()
output = tudui(img)
loss = loss_fn(output,targets)
writer.add_scalar("train", loss.item(),total_train_step)
#优化器优化模型
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_train_step = total_train_step + 1
if total_train_step % 100 == 0:
end_time = time.time()
print("运行的时间:", end="")
print(end_time - start_time)
print("训练次数:{},Loss:{}".format(total_train_step,loss))
total_test_loss = 0
total_test_accuracy = 0
#测试步骤开始:
tudui.eval()
#因为我们的梯度是当模型有参数后自动计算的,这里是测试数据,不需要对网络进行优化,所以把自动计算梯度关了
with torch.no_grad():
for data in test_data:
img,targets = data
img = img.cuda()
targets = targets.cuda()
output = tudui(img)
loss = loss_fn(output, targets)
writer.add_scalar("test", loss.item(), total_test_step)
total_test_loss = total_test_loss + loss.item()
total_test_step = total_test_step + 1
accuracy = (output.argmax(1) == targets).sum()
total_test_accuracy = total_test_accuracy + accuracy
print("整体测试集上的loss{}".format(total_test_loss))
print("整体测试集上的正确率{}".format(total_test_accuracy/test_data_size))
#保存每一轮训练模型,但不会自动创建tudui_train文件夹,需要手动创建
torch.save(tudui,"./tudui_train/tudui_{}.pth".format(i))
writer.close()
device方法
device = torch.device(“cuda:0” if torch.cuda.is_available() else “cpu”)
源代码:
#第二种使用GPU训练的方式
import torch
import torchvision.datasets
import torch.nn as nn
from torch.nn import Sequential, Conv2d, MaxPool2d, Flatten, Linear
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import time
#定义驱动
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
class Tudui(nn.Module):
def __init__(self):
super(Tudui, self).__init__()
self.model1 = Sequential(Conv2d(3,32,5,padding=2)
, MaxPool2d(2)
, Conv2d(32,32,5,padding=2)
, MaxPool2d(2)
, Conv2d(32,64,5,padding=2)
, MaxPool2d(2)
, Flatten()
, Linear(1024,64)
, Linear(64,10))
def forward(self,x):
x = self.model1(x)
return x
train_data = torchvision.datasets.CIFAR10(root="./dataset2", train=True, transform=torchvision.transforms.ToTensor(),download=True)
test_data = torchvision.datasets.CIFAR10(root="./dataset2", train=False, transform=torchvision.transforms.ToTensor(),download=True)
#length 长度
train_data_size = len(train_data)
test_data_size = len(test_data)
print("训练数据集的长度为:{}".format(train_data_size))
print("测试数据集的长度为:{}".format(test_data_size))
#利用DataLoader加载数据集
train_data = DataLoader(train_data,batch_size=64)
test_data = DataLoader(test_data,batch_size=64)
#创建网络模型
tudui = Tudui()
tudui = tudui.to(device)
#损失函数
loss_fn = nn.CrossEntropyLoss()
loss_fn = loss_fn.to(device)
#优化器
learning_rate = 1e-2
optimizer = torch.optim.SGD(tudui.parameters(),lr=learning_rate)
#设置训练网络的一些参数
#记录训练的次数
total_train_step = 0
#记录测试的次数
total_test_step = 0
#训练轮数
epoch = 10
writer = SummaryWriter("./logs")
start_time = time.time()
for i in range(epoch):
print("第{}轮训练开始".format(i+1))
#训练步骤开始
tudui.train()
for data in train_data:
img,targets = data
img = img.to(device)
targets = targets.to(device)
output = tudui(img)
loss = loss_fn(output,targets)
writer.add_scalar("train", loss.item(),total_train_step)
#优化器优化模型
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_train_step = total_train_step + 1
if total_train_step % 100 == 0:
end_time = time.time()
print("运行的时间:", end="")
print(end_time - start_time)
print("训练次数:{},Loss:{}".format(total_train_step,loss))
total_test_loss = 0
total_test_accuracy = 0
#测试步骤开始:
tudui.eval()
#因为我们的梯度是当模型有参数后自动计算的,这里是测试数据,不需要对网络进行优化,所以把自动计算梯度关了
with torch.no_grad():
for data in test_data:
img,targets = data
img = img.to(device)
targets = targets.to(device)
output = tudui(img)
loss = loss_fn(output, targets)
writer.add_scalar("test", loss.item(), total_test_step)
total_test_loss = total_test_loss + loss.item()
total_test_step = total_test_step + 1
accuracy = (output.argmax(1) == targets).sum()
total_test_accuracy = total_test_accuracy + accuracy
print("整体测试集上的loss{}".format(total_test_loss))
print("整体测试集上的正确率{}".format(total_test_accuracy/test_data_size))
#保存每一轮训练模型,但不会自动创建tudui_train文件夹,需要手动创建
torch.save(tudui,"./tudui_train/tudui_{}.pth".format(i))
writer.close()
总结
通过运行程序,对比时间。
gpu训练只需要2秒多,但是使用cpu要5秒种
(我gpu比较垃圾,max350)