Accelerate多GPUs训练推理在代码中如何使用

6 篇文章 0 订阅

主要有四部分:

1.使用Accelerate库修改单GPU代码,实现多GPUs训练及推理

2.多GPUs训练时设置正确的batch size

3.Accelerate参数与混合精度训练

4.多GPUs训练时在主进程打印信息

1.Accelerate实现多GPU 

 先看一下单GPU的代码

各部分的解释我都用带⭐的注释写出来了

%%writefile train.py
# ⭐导入相对应的库
import pandas as pd
import numpy as np
import cv2

import torch
import torch.nn as nn
from torch.optim import Adam
from torch.utils.data import Dataset, DataLoader, Subset
from torch.nn import CrossEntropyLoss

# ⭐这部分是用来数据增强的
import albumentations as A
from albumentations.pytorch import ToTensorV2
# ⭐引入pytorch的一些视觉模型
import timm
from tqdm import tqdm

# ⭐对数据增强做转换的函数
def get_transform(image_size, train=True):
    if train:
        return A.Compose([
                    A.HorizontalFlip(p=0.5),
                    A.VerticalFlip(p=0.5),
                    A.RandomBrightnessContrast(p=0.2),
                    A.Resize(image_size, image_size, interpolation=cv2.INTER_LANCZOS4),
                    A.Normalize(0.1310, 0.30854),
                    ToTensorV2(),
                ])
    else:
        return A.Compose([
                    A.Resize(image_size, image_size, interpolation=cv2.INTER_LANCZOS4),
                    A.Normalize(0.1310, 0.30854),
                    ToTensorV2(),
                ])
    
# ⭐数据集的定义,跟平常写代码一样    
class MiniDataSet(Dataset):
    
    def __init__(self, images, labels=None, transform=None):
        self.images = images.astype("float32")
        self.labels = labels
        self.transform = transform
    
    def __len__(self):
        return len(self.images)
    
    def __getitem__(self, idx):
        ret = {}
        img = self.images[idx]
        
        if self.transform is not None:
            img = self.transform(image=img)["image"]
        ret["image"] = img
        
        if self.labels is not None:
            ret["label"] = self.labels[idx]
        
        return ret
    
# ⭐模型部分    
class TimmModel(nn.Module):
    
    def __init__(self, backbone, num_class, pretrained=False):
        
        super().__init__()
        self.model = timm.create_model(backbone, pretrained=pretrained, in_chans=1, num_classes=num_class)
    
    def forward(self, image):
        logit = self.model(image)
        return logit

# ⭐训练函数
def train_fn(args, model, optimizer, dataloader):
    model.to(args.device)
    model.train()
    train_loss = []
    loss_fn = torch.nn.CrossEntropyLoss()
    
# ⭐遍历每个batch,每个batch里的图像输入给模型,模型的输出和label做一个损失loss,然后反向传播
    for batch in tqdm(dataloader):
        logits = model(batch["image"].to(args.device))
        optimizer.zero_grad()
        loss = loss_fn(logits, batch["label"].to(args.device))
        loss.backward()
        optimizer.step()
        train_loss.append(loss.item())
    
    return np.mean(train_loss)

# ⭐预测
@torch.no_grad()
def predict_fn(args, model, dataloader):
    model.to(args.device)
    model.eval()
    predictions = []
    
    for step, batch in enumerate(dataloader):
        output = model(batch["image"].to(args.device))
        prediction = torch.argmax(output, 1)
        predictions.append(prediction.cpu().numpy())
    
    predictions = np.concatenate(predictions, axis=0)
    return predictions

# ⭐对训练过程做封装,打印一些信息
def fit_model(args, model, optimizer, train_dl, val_dl):
    best_score = 0.
    for ep in range(args.ep):
        train_loss = train_fn(args, model, optimizer, train_dl)
        val_pred = predict_fn(args, model, val_dl)
        val_acc = np.mean(val_pred == val_dl.dataset.labels)
        print(f"Epoch {ep+1}, train loss {train_loss:.4f}, val acc {val_acc:.4f}")
        if val_acc > best_score:
            best_score = val_acc
            torch.save(model.state_dict(), "model.bin")
    model.load_state_dict(torch.load("model.bin"))
    return model
    
    

if __name__ == "__main__":
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument("--ep", default=5, type=int)
    parser.add_argument("--lr", default=0.00005, type=float)
    parser.add_argument("--bs", default=256, type=int)
    parser.add_argument("--device", default=0, type=int)
    parser.add_argument("--model", default="convnext_small")
    parser.add_argument("--image_size", default=56, type=int)
    args = parser.parse_args()
    
    train = pd.read_csv("/kaggle/input/digit-recognizer/train.csv")
    train_images = train.iloc[:, 1:].values.reshape(-1, 28, 28)
    train_labels = train.iloc[:, 0].values
    
    test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv")
    test_images = test.values.reshape(-1, 28, 28)
    
    submission = pd.read_csv("/kaggle/input/digit-recognizer/sample_submission.csv")

    
    train_transform = get_transform(args.image_size, True)
    valid_transform = get_transform(args.image_size, False)
    train_ds = MiniDataSet(train_images[:40000], train_labels[:40000], train_transform)
    val_ds = MiniDataSet(train_images[40000:], train_labels[40000:], valid_transform)
    test_ds = MiniDataSet(test_images, transform=valid_transform)
    
    train_dl = DataLoader(
    train_ds, 
    batch_size=args.bs, 
    num_workers=2, 
    shuffle=True, 
    drop_last=True)

    val_dl = DataLoader(
        val_ds, 
        batch_size=args.bs * 2, 
        num_workers=2, 
        shuffle=False, 
        drop_last=False)

    test_dl = DataLoader(
        test_ds, 
        batch_size=args.bs * 2, 
        num_workers=2, 
        shuffle=False, 
        drop_last=False)


    train_ds = MiniDataSet(train_images[:40000], train_labels[:40000], train_transform)
    val_ds = MiniDataSet(train_images[40000:], train_labels[40000:], valid_transform)
    test_ds = MiniDataSet(test_images, transform=valid_transform)
    
    model = TimmModel(args.model, 10, pretrained=True)
    optimizer = Adam(model.parameters(), lr=args.lr)
    model = fit_model(args, model, optimizer, train_dl, val_dl)
    
    test_pred = predict_fn(args, model, test_dl)
    submission["Label"] = test_pred
    submission.to_csv("./submission.csv", index=False)

多GPU训练的代码

主要修改处:

 首先就是增加了accelerate的库来GPU加速

from accelerate import Accelerator

其次看下修改部分,代码中加了备注,主要看⭐部分和备注

%%writefile ddp_train.py

import pandas as pd
import numpy as np
import cv2
from easydict import EasyDict
import yaml

import torch
import torch.nn as nn
from torch.optim import Adam, SGD, AdamW
from torch.utils.data import Dataset, DataLoader, Subset
from torch.nn import CrossEntropyLoss, NLLLoss

import albumentations as A
from albumentations.pytorch import ToTensorV2
# ⭐新增:添加这个accelerate库
from accelerate import Accelerator
import timm
from tqdm import tqdm

# disable warning
import warnings
warnings.filterwarnings("ignore")

def get_transform(image_size, train=True):
    if train:
        return A.Compose([
                    A.HorizontalFlip(p=0.5),
                    A.VerticalFlip(p=0.5),
                    A.RandomBrightnessContrast(p=0.2),
                    A.Resize(image_size, image_size, interpolation=cv2.INTER_LANCZOS4),
                    A.Normalize(0.1310, 0.30854),
                    ToTensorV2(),
                ])
    else:
        return A.Compose([
                    A.Resize(image_size, image_size, interpolation=cv2.INTER_LANCZOS4),
                    A.Normalize(0.1310, 0.30854),
                    ToTensorV2(),
                ])
    
    
class MiniDataSet(Dataset):
    
    def __init__(self, images, labels=None, transform=None):
        self.images = images.astype("float32")
        self.labels = labels
        self.transform = transform
    
    def __len__(self):
        return len(self.images)
    
    def __getitem__(self, idx):
        ret = {}
        img = self.images[idx]
        
        if self.transform is not None:
            img = self.transform(image=img)["image"]
        ret["image"] = img
        
        if self.labels is not None:
            ret["label"] = self.labels[idx]
        
        return ret
    
    
class TimmModel(nn.Module):
    
    def __init__(self, backbone, num_class, pretrained=False):
        
        super().__init__()
        self.model = timm.create_model(backbone, pretrained=pretrained, in_chans=1, num_classes=num_class)
    
    def forward(self, image):
        logit = self.model(image)
        return logit
    
    
def train_fn(args, model, optimizer, dataloader):
# ⭐改动1:首先不需要将模型或数据放在某一个GPU下,因为有一个提前有一个GPU设置,等下会看到
#     删除 to(device)
#     model.to(args.device)
    model.train()
    train_loss = []
    loss_fn = torch.nn.CrossEntropyLoss()

#     ⭐改动3:增加disable=not args.accelerator.is_main_process,只在主进程显示进度条,避免重复显示
#     for batch in tqdm(dataloader):
    for batch in tqdm(dataloader, disable=not args.accelerator.is_main_process):
#         logits = model(batch["image"].to(args.device))
        logits = model(batch["image"])
        optimizer.zero_grad()
# ⭐改动1:首先不需要将模型或数据放在某一个GPU下,因为有一个提前有一个GPU设置,等下会看到
#         loss = loss_fn(logits, batch["label"].to(args.device))
        loss = loss_fn(logits, batch["label"])
#         loss.backward 修改为 accelerator.backward(loss)
#         loss.backward() ⭐改动2:之前使用这个做反向传播,改为args.accelerator.backward(loss)这个
        args.accelerator.backward(loss)
        optimizer.step()
        train_loss.append(loss.item())
    
    return np.mean(train_loss)

# ⭐预测的时候跟上面差不多但是要注意改动4
@torch.no_grad()
def predict_fn(args, model, dataloader):
#     删除 to(device)
#     model.to(args.device)
    model.eval()
    predictions = []
    
    for step, batch in enumerate(dataloader):
#         output = model(batch["image"].to(args.device))
        output = model(batch["image"])
        prediction = torch.argmax(output, 1)
#         ⭐改动4(新增):使用accelerator.gather_for_metrics(prediction)汇总多张GPU预测结果,记好这个函数:gather_for_metrics(样本数量不能被batch size整除的时候,这个函数也会做特殊处理来确保需要预测的样本数量是对齐的)
        prediction = args.accelerator.gather_for_metrics(prediction)
        predictions.append(prediction.cpu().numpy())
    
    predictions = np.concatenate(predictions, axis=0)
    return predictions



def fit_model(args, model, optimizer, train_dl, val_dl):
    best_score = 0.
    for ep in range(args.ep):
        train_loss = train_fn(args, model, optimizer, train_dl)
        val_pred = predict_fn(args, model, val_dl)
        val_acc = np.mean(val_pred == val_dl.dataset.labels)
        if args.accelerator.is_main_process:
            print(f"Epoch {ep+1}, train loss {train_loss:.4f}, val acc {val_acc:.4f}")
        if val_acc > best_score:
            best_score = val_acc
            torch.save(model.state_dict(), "model.bin")
    model.load_state_dict(torch.load("model.bin"))
    return model
    
    

if __name__ == "__main__":
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument("--ep", default=5, type=int)
    parser.add_argument("--lr", default=0.00005, type=float)
    parser.add_argument("--bs", default=256, type=int)
    parser.add_argument("--device", default=0, type=int)
    parser.add_argument("--model", default="convnext_small")
    parser.add_argument("--image_size", default=56, type=int)
    args = parser.parse_args()
    
    
    train = pd.read_csv("/kaggle/input/digit-recognizer/train.csv")
    train_images = train.iloc[:, 1:].values.reshape(-1, 28, 28)
    train_labels = train.iloc[:, 0].values
    
    test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv")
    test_images = test.values.reshape(-1, 28, 28)
    
    submission = pd.read_csv("/kaggle/input/digit-recognizer/sample_submission.csv")

    
    train_transform = get_transform(args.image_size, True)
    valid_transform = get_transform(args.image_size, False)
    train_ds = MiniDataSet(train_images[:40000], train_labels[:40000], train_transform)
    val_ds = MiniDataSet(train_images[40000:], train_labels[40000:], valid_transform)
    test_ds = MiniDataSet(test_images, transform=valid_transform)
    
    train_dl = DataLoader(
    train_ds, 
    batch_size=args.bs, 
    num_workers=2, 
    shuffle=True, 
    drop_last=True)

    val_dl = DataLoader(
        val_ds, 
        batch_size=args.bs * 2, 
        num_workers=2, 
        shuffle=False, 
        drop_last=False)

    test_dl = DataLoader(
        test_ds, 
        batch_size=args.bs * 2, 
        num_workers=2, 
        shuffle=False, 
        drop_last=False)
    
    model = TimmModel(args.model, 10, pretrained=True)
    optimizer = Adam(model.parameters(), lr=args.lr)
    
    # ⭐改动5:初始化Accelerator,进行实例化
    accelerator = Accelerator()
    
    # ⭐改动6:将相关的模型,参数等东西放入参数多GPU训练准备
    model, optimizer, train_dl, val_dl, test_dl = accelerator.prepare(model, optimizer, train_dl, val_dl, test_dl)
    args.accelerator = accelerator
    
    model = fit_model(args, model, optimizer, train_dl, val_dl)
    
    test_pred = predict_fn(args, model, test_dl)
    if accelerator.is_local_main_process:
        submission["Label"] = test_pred
        submission.to_csv("./submission.csv", index=False)

多GPU训练:传入了--multi_gpu来使用多GPU训练

accelerate launch --multi_gpu ddp_train.py --ep 5 --bs 256

 2.batch size的设置

因为batch size 在参数里面对应的是每个GPU的batch size,如果单个GPU是256batch size的话,为了对齐这个设定,假如是两个GPU,我们可能只能传递的batch size是128,这样才能和之前的单GPU batch size保持一致。(时间略微增长是正常的,精度会有小小的提升)

accelerate launch --multi_gpu ddp_train.py --ep 5 --bs 128

这个batch size指的是每个GPU对应的batch size

使用多GPU的时候,本质上的真正batch size是[GPU数量*每个GPU的batch size大小]

3.传递混合精度参数

--mixed_precision=fp16

accelerate launch --multi_gpu --mixed_precision=fp16 --num_processes=2 ddp_train.py --ep 5 --bs 128

不需要修改代码,添加这个混合精度参数就可以,不仅可以加速训练过程,而且可以节省GPU的内存开销。

4.主进程打印信息

就是将

for batch in tqdm(dataloader):

改为
for batch in tqdm(dataloader, disable=not args.accelerator.is_main_process):

只在主进程显示进度条,避免重复显示

学习自B站大佬爱睡觉的KKY视频

  • 7
    点赞
  • 8
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值