Pytorch极简入门教程(十一)—— GPU上训练模型

# -*- coding: utf-8 -*-
"""
手写数字分类   卷积池化模型
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
from torch.utils.data import DataLoader
from torch.utils.data import TensorDataset
import matplotlib.pyplot as plt
from tqdm import tqdm

from torchvision import datasets, transforms

transformation = transforms.Compose([
    transforms.ToTensor()
])

train_ds = datasets.MNIST("./data", train=True, transform=transformation, download=True)

test_ds = datasets.MNIST("./data", train=False, transform=transformation, download=True)

train_dl = DataLoader(train_ds, batch_size=128, shuffle=True)
test_dl = DataLoader(test_ds, batch_size=256)
imgs, labels = next(iter(train_dl))

print("labels[0]:\t", labels[0].item())

"""
创建模型
"""
class Model(nn.Module):
    def __init__(self):
        super().__init__()
        self.conv1 = nn.Conv2d(1, 6, 5)
        self.pool = nn.MaxPool2d((2,2)) #kernel_size=2
        self.conv2 = nn.Conv2d(6, 16, 5) #input= 6  num_conv = 16
        self.liner_1 = nn.Linear(16*4*4, 256) #torch.Size([64, 16, 4, 4]) input:展平后的长度
        self.liner_2 = nn.Linear(256, 10)
    def forward(self, input):
        x = F.relu(self.conv1(input)) # 28*28
        x = self.pool(x)              # 14*14
        x = F.relu(self.conv2(x))     # 14*14
        x = self.pool(x)              # 7 * 7
        #print("x.size():\t", x.size()) # 打印出torch.size()的大小
        x = x.view(x.size(0), -1) # x = x.view(-1 , 16*4*4)
        x = F.relu(self.liner_1(x))
        x = self.liner_2(x)
        return x

#print("model(imgs):\t", model(imgs)) #查看size()大小

# 查看GPU是否可用 torch.cuda.is_available()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("device", device)
"""
在GPU上训练只需要两步: 
1. 将模型转移到GPU
2. 将每一个批次的训练数据转移到GPU
"""
model = Model()

model = model.to(device)  # 将模型移至GPU
print("model:\t", model)

loss_fn = torch.nn.CrossEntropyLoss()  # 损失函数


def fit(epoch, model, trainloader, testloader):
    correct = 0
    total = 0
    running_loss = 0
    for x, y in trainloader:
        x, y = x.to(device), y.to(device)
        y_pred = model(x)
        loss = loss_fn(y_pred, y)
        optim.zero_grad()
        loss.backward()
        optim.step()
        with torch.no_grad():
            y_pred = torch.argmax(y_pred, dim=1)
            correct += (y_pred == y).sum().item()
            total += y.size(0)
            running_loss += loss.item()

    epoch_loss = running_loss / len(trainloader.dataset)
    epoch_acc = correct / total

    test_correct = 0
    test_total = 0
    test_running_loss = 0

    with torch.no_grad():
        for x, y in testloader:
            x, y = x.to(device), y.to(device)
            y_pred = model(x)
            loss = loss_fn(y_pred, y)
            y_pred = torch.argmax(y_pred, dim=1)
            test_correct += (y_pred == y).sum().item()
            test_total += y.size(0)
            test_running_loss += loss.item()

    epoch_test_loss = test_running_loss / len(testloader.dataset)
    epoch_test_acc = test_correct / test_total

    print('epoch: ', epoch,
          'loss: ', round(epoch_loss, 3),
          'accuracy:', round(epoch_acc, 3),
          'test_loss: ', round(epoch_test_loss, 3),
          'test_accuracy:', round(epoch_test_acc, 3)
          )

    return epoch_loss, epoch_acc, epoch_test_loss, epoch_test_acc

optim = torch.optim.Adam(model.parameters(), lr=0.001)
epochs = 20
train_loss = []
train_acc = []
test_loss = []
test_acc = []

for epoch in range(epochs):
    epoch_loss, epoch_acc, epoch_test_loss, epoch_test_acc = fit(epoch,
                                                                 model,
                                                                 train_dl,
                                                                 test_dl)
    train_loss.append(epoch_loss)
    train_acc.append(epoch_acc)
    test_loss.append(epoch_test_loss)
    test_acc.append(epoch_test_acc)

labels[0]:	 5
device cuda:0
model:	 Model(
  (conv1): Conv2d(1, 6, kernel_size=(5, 5), stride=(1, 1))
  (pool): MaxPool2d(kernel_size=(2, 2), stride=(2, 2), padding=0, dilation=1, ceil_mode=False)
  (conv2): Conv2d(6, 16, kernel_size=(5, 5), stride=(1, 1))
  (liner_1): Linear(in_features=256, out_features=256, bias=True)
  (liner_2): Linear(in_features=256, out_features=10, bias=True)
)
epoch:  0 loss:  0.003 accuracy: 0.898 test_loss:  0.0 test_accuracy: 0.972
epoch:  1 loss:  0.001 accuracy: 0.974 test_loss:  0.0 test_accuracy: 0.983
epoch:  2 loss:  0.0 accuracy: 0.981 test_loss:  0.0 test_accuracy: 0.986
epoch:  3 loss:  0.0 accuracy: 0.985 test_loss:  0.0 test_accuracy: 0.986
epoch:  4 loss:  0.0 accuracy: 0.988 test_loss:  0.0 test_accuracy: 0.988
epoch:  5 loss:  0.0 accuracy: 0.989 test_loss:  0.0 test_accuracy: 0.989
epoch:  6 loss:  0.0 accuracy: 0.991 test_loss:  0.0 test_accuracy: 0.988
epoch:  7 loss:  0.0 accuracy: 0.992 test_loss:  0.0 test_accuracy: 0.988
epoch:  8 loss:  0.0 accuracy: 0.993 test_loss:  0.0 test_accuracy: 0.99
epoch:  9 loss:  0.0 accuracy: 0.994 test_loss:  0.0 test_accuracy: 0.99
epoch:  10 loss:  0.0 accuracy: 0.995 test_loss:  0.0 test_accuracy: 0.99
epoch:  11 loss:  0.0 accuracy: 0.996 test_loss:  0.0 test_accuracy: 0.989
epoch:  12 loss:  0.0 accuracy: 0.997 test_loss:  0.0 test_accuracy: 0.988
epoch:  13 loss:  0.0 accuracy: 0.996 test_loss:  0.0 test_accuracy: 0.988
epoch:  14 loss:  0.0 accuracy: 0.997 test_loss:  0.0 test_accuracy: 0.989
epoch:  15 loss:  0.0 accuracy: 0.997 test_loss:  0.0 test_accuracy: 0.989
epoch:  16 loss:  0.0 accuracy: 0.998 test_loss:  0.0 test_accuracy: 0.99
epoch:  17 loss:  0.0 accuracy: 0.997 test_loss:  0.0 test_accuracy: 0.989
epoch:  18 loss:  0.0 accuracy: 0.998 test_loss:  0.0 test_accuracy: 0.991
epoch:  19 loss:  0.0 accuracy: 0.998 test_loss:  0.0 test_accuracy: 0.991
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值