【PYTORCH】相同代码不同数据集图片,它们的训练速度不同

对于这一个问题,通常是因为两个数据集中图片大小不同。
如果网络需要接受224x224大小的图片,但是数据集中大部分图片都是1000x1000以上的,那么缩放所需要的时间就会很长,这个时候效率就会低很多。

GPU占用率一直高不上去?
GPU占用一直高不上去也可能是图片大小的问题,因为Transform不会提前都转换好再进行训练,而是在DataLoader加载的时候才会转换,如果图片太大,转换的速度就会变慢,CPU占用率就会变得很高,而GPU处理的始终是transform后的数据,这个数据是比较小的,所以会出现CPU占用高,GPU占用低的情况

可以使用以下代码把数据集图片先转化成224左右的大小

注意:Resize(224)的意思和Resize((224, 224))有区别。第一个的意思是最小边为224,较大的边等比例缩放;第二个的意思是长宽都是224

import os

from PIL import Image
from torchvision import transforms

transform = transforms.Compose([
    transforms.Resize(224),
])

if __name__ == '__main__':
    count = 0
    for root, dirs, files in os.walk('./dataset/data-files'):
        for file in files:
            path = os.path.join(root, file)
            image = transform(Image.open(path))
            image.save(os.path.join(root.replace('data-files', 'data-reform-small'), file))
            count += 1
            if count % 100 == 0:
                print('Processed {} images'.format(count))

下面是整一份训练代码

import enum
import os

import numpy as np
import torch
from PIL import Image
from matplotlib import pyplot as plt
from torch.utils.data import DataLoader
from torchvision import models, transforms, datasets
from torchvision.models import WeightsEnum
from torch.utils.tensorboard import SummaryWriter
import torch.nn.functional as F

# 训练集路径、测试集路径
# 提前处理好数据,把数据路径放进来
TRAIN_ROOT, TEST_ROOT = './dataset/data-reform-small/train/', './dataset/data-reform-small/val/'

# 均值、标准差(根据自己的数据集不同,应该选择不同的均值和标准差,一般采用大数据集推荐的均值和标准差)
# 这里使用了[0.5, 0.5, 0.5]
MEAN, STD = [0.5, 0.5, 0.5], [0.5, 0.5, 0.5]

SEED = 1337
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)

batch_size = 64
lr = 5e-3
epochs = 20
dropout_rate = 0.5
# gamma = 0.98
weight_decay = 5e-4 # 正则化系数
momentum = 0.9 # 冲量
num_workers = 4 # 注意,用了这个之后,加载速度会变快,但是不要在jupyter中使用,会失效(默认为0)
model_save_dir = 'model' # 保存路径
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

data_transforms = {
    'train': transforms.Compose([
        transforms.Resize((224, 224)), # 不要只写一个224,到后面训练时会出现大小不一致错误
        # transforms.CenterCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(MEAN, STD),
    ]),
    'test': transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize(MEAN, STD),
    ])
}

data_dataset = {
    'train': datasets.ImageFolder(TRAIN_ROOT, data_transforms['train']),
    'test': datasets.ImageFolder(TEST_ROOT, data_transforms['test']),
}

data_dataloader = {
    'train': DataLoader(data_dataset['train'], batch_size=batch_size, shuffle=True, num_workers=num_workers,
                        pin_memory=True),
    'test': DataLoader(data_dataset['test'], batch_size=batch_size, shuffle=False, num_workers=num_workers,
                       pin_memory=True)
}


class FeatureExtractType(enum.Enum):
    RESNET18 = 'resnet18'
    RESNET50 = 'resnet50'
    RESNET154 = 'resnet154'
    GOOGLENET = 'googlenet'
    RESNEXT = 'resnext50_32x4d'


def feature_extract(feature_type: FeatureExtractType, weight: WeightsEnum = None,
                    num_classes: int = 2) -> torch.nn.Module:
    model = None
    if feature_type == FeatureExtractType.RESNET18:
        weight = weight or models.ResNet18_Weights.DEFAULT
        model = models.resnet18(weights=weight)
        print('Loaded ResNet18 model')
    elif feature_type == FeatureExtractType.RESNET50:
        weight = weight or models.ResNet50_Weights.DEFAULT
        model = models.resnet50(weights=weight)
        print('Loaded ResNet50 model')
    elif feature_type == FeatureExtractType.RESNET154:
        weight = weight or models.ResNet152_Weights.DEFAULT
        model = models.resnet152(weights=weight)
        print('Loaded ResNet152 model')
    elif feature_type == FeatureExtractType.GOOGLENET:
        weight = weight or models.GoogLeNet_Weights.DEFAULT
        model = models.googlenet(weights=weight)
        print('Loaded GoogLeNet model')
    elif feature_type == FeatureExtractType.RESNEXT:
        weight = weight or models.ResNeXt50_32X4D_Weights.DEFAULT
        model = models.resnext50_32x4d(weights=weight)
        print('Loaded ResNeXt50 model')

    if model is None:
        raise ValueError('Invalid feature type')

    # 不需要训练模型参数
    for params in model.parameters():
        params.requires_grad = False
	# 修改一下全连接层
    in_features = model.fc.in_features
    model.fc = torch.nn.Sequential(
        torch.nn.Linear(in_features, 1024),
        torch.nn.Dropout(dropout_rate),
        torch.nn.Linear(1024, 256),
        torch.nn.Dropout(dropout_rate // 2),
        torch.nn.Linear(256, num_classes) # 不需要softmax, 因为交叉熵损失函数自带了一个softmax,这里再加softmax效果可能不会好
    )

    return model


model = feature_extract(FeatureExtractType.RESNEXT, weight=models.ResNeXt50_32X4D_Weights.IMAGENET1K_V2).to(device)
# 交叉熵损失函数,用于分类
loss_fn = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=momentum, weight_decay=weight_decay)
# milestons表示一个衰减列表,分别代表第3个、第8个、第14和18个epoch衰减,变成 lr = lr * gamma
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[3, 8, 14, 18], gamma=0.1)


def train():
    model.train()
    train_mean = np.inf

    for epoch in range(1, epochs + 1):
        train_losses = []

        print('CURRENT LEARNING RATE: ', optimizer.param_groups[0]['lr'])
        for batch_idx, (inputs, targets) in enumerate(data_dataloader['train']):

            inputs, targets = inputs.to(device), targets.to(device)
            outputs = model(inputs)
            loss = loss_fn(outputs, targets)

            train_losses.append(loss.item())

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if batch_idx % 10 == 0:
                print('Train Epoch: {} [{}/{}]\tLoss: {:.6f}\tAvg Loss: {:.6f}'.format(epoch,
                                                                                       epoch, epochs,
                                                                                       loss.item(),
                                                                                       np.mean(
                                                                                           train_losses)))
        
        # epoch 走完, 开始测试
        if epoch % 2 == 0:
            print('Testing...')
            model.eval()
            correct = 0
            with torch.no_grad():
                for batch_idx, (inputs, targets) in enumerate(data_dataloader['test']):
                    inputs, targets = inputs.to(device), targets.to(device)
                    outputs = model(inputs)
                    # val_loss = loss_fn(outputs, targets)

                    # if batch_idx % 10 == 0:
                    #     writer.add_scalar('validation loss', val_loss.detach(), val_batch_record)

                    pred = outputs.argmax(dim=1, keepdim=True)
                    correct += pred.eq(targets.view_as(pred)).sum().item()

                accuracy = 100. * correct / len(data_dataset['test'])
                print('\nTest set: Accuracy: {}/{} ({:.2f}%)\n'.format(
                    correct, len(data_dataset['test']),
                    accuracy))
        mean_loss = np.mean(train_losses)
        print('Train Epoch: {} Average loss: {:.6f}'.format(epoch, mean_loss))
        if mean_loss < train_mean:
            print('Epoch: {} Average loss: {:.6f} Before loss: {:.6f}. Model Better Than Before Training'.format(
                epoch,
                mean_loss,
                train_mean))
            torch.save(model.state_dict(), os.path.join(model_save_dir, 'datamodel_{0}.pth'.format(mean_loss)))
            train_mean = mean_loss
        scheduler.step()  # 修改学习率(注意要在epoch中修改)

# 预测
def predict(model_path: str, file_path: str) -> bool:
    m = feature_extract(FeatureExtractType.RESNEXT, weight=models.ResNeXt50_32X4D_Weights.IMAGENET1K_V2).to(device)
    m.eval()
    with torch.no_grad():
        data = Image.open(file_path)
        data = data_transforms.get('test')(data)
        output = m(data.to(device))
        pred = output.argmax(dim=1, keepdim=True).item()
        return pred == 1


if __name__ == "__main__":
    train()

  • 4
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值