使用Efficient做一个简单的图片分类项目

前段时间参加kaggle的图像隐写的比赛,第一次参加,有幸拿了银牌,使用的就是Efficient这个网络,这个网络在分类任务上确实好用,在此做个简单的记录,如何使用Efficient完成简单的分类任务。
如果有错误的地方,欢迎大家指正

如下图所示,data文件是项目的数据集,Type1~Type4文件夹代表不同类别的图片
在这里插入图片描述
首先看第一个代码块“dataset.py”

"""
dataset.py
"""
from glob import glob
from sklearn.model_selection import GroupKFold
import torch
import os
import random
import cv2
import pandas as pd
import numpy as np
import albumentations as A
from albumentations.pytorch.transforms import ToTensorV2
from torch.utils.data import Dataset, DataLoader
import warnings

warnings.filterwarnings("ignore")  # 避免烦人的各类无用报警

dataset = []
DATA_ROOT_PATH = "data"  # 数据集文件

for label, kind in enumerate(["Type1", "Type2", "Type3", "Type4"]):
    for path in glob(os.path.join(DATA_ROOT_PATH, "Type1/*.jpg")):
        dataset.append({
            'kind': kind,
            'image_name': path.split('/')[-1],
            'label': label
        })

# 将数据顺序打乱
random.shuffle(dataset)
dataset = pd.DataFrame(dataset)

# K折交叉验证
gkf = GroupKFold(n_splits=5)

dataset.loc[:, 'fold'] = 0
for fold_number, (train_index, val_index) in enumerate(gkf.split(X=dataset.index, y=dataset['label'], groups=dataset['image_name'])):
    dataset.loc[dataset.iloc[val_index].index, 'fold'] = fold_number


# 训练数据增强
def get_train_transforms():
    return A.Compose([
        A.HorizontalFlip(p=0.5),
        A.VerticalFlip(p=0.5),
        A.Resize(height=512, width=512, p=1.0),
        ToTensorV2(p=1.0),
    ], p=1.0)


def get_valid_transforms():
    return A.Compose([
        A.Resize(height=512, width=512, p=1.0),
        ToTensorV2(p=1.0),
    ], p=1.0)


# 独热处理
def onehot(size, target):
    vec = torch.zeros(size, dtype=torch.float32)
    vec[target] = 1.
    return vec


class DatasetRetriever(Dataset):
    def __init__(self, kinds, image_names, labels, transforms=None):
        super().__init__()
        self.kinds = kinds
        self.image_names = image_names
        self.labels = labels
        self.transforms = transforms

    def __getitem__(self, index: int):
        kind, image_name, label = self.kinds[index], self.image_names[index], self.labels[index]
        image = cv2.imread(f'{DATA_ROOT_PATH}/{kind}/{image_name}', cv2.IMREAD_COLOR)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)
        image /= 255.0
        if self.transforms:
            sample = {'image': image}
            sample = self.transforms(**sample)
            image = sample['image']

        target = onehot(4, label)
        return image, target

    def __len__(self) -> int:
        return self.image_names.shape[0]

    def get_labels(self):
        return list(self.labels)


fold_number = 0


def get_dataset():
    train_dataset = DatasetRetriever(
        kinds=dataset[dataset['fold'] != fold_number].kind.values,
        image_names=dataset[dataset['fold'] != fold_number].image_name.values,
        labels=dataset[dataset['fold'] != fold_number].label.values,
        transforms=get_train_transforms(),
    )

    validation_dataset = DatasetRetriever(
        kinds=dataset[dataset['fold'] == fold_number].kind.values,
        image_names=dataset[dataset['fold'] == fold_number].image_name.values,
        labels=dataset[dataset['fold'] == fold_number].label.values,
        transforms=get_valid_transforms(),
    )
    return train_dataset, validation_dataset

此处的修改内容为

DATA_ROOT_PATH = "data"  # 数据集文件,修改成自己的数据集文件路径

for label, kind in enumerate(["自定义1", "自定义2", "自定义3", "自定义4"]): 
# 改成自己分类文件名,数量可增加
    for path in glob(os.path.join(DATA_ROOT_PATH, "自定义1/*.jpg")):
        dataset.append({
            'kind': kind,
            'image_name': path.split('/')[-1],
            'label': label
        })

来看第二个代码块“utils.py”

from glob import glob
import torch
from torch import nn
import os
from datetime import datetime
import time
import albumentations as A
from albumentations.pytorch.transforms import ToTensorV2
import warnings

warnings.filterwarnings("ignore")


# 获取训练数据及训练数据增强
def get_train_transforms():
    return A.Compose([
        A.HorizontalFlip(p=0.5),
        A.VerticalFlip(p=0.5),
        A.Resize(height=512, width=512, p=1.0),
        ToTensorV2(p=1.0),
    ], p=1.0)


# 测试集
def get_valid_transforms():
    return A.Compose([
        A.Resize(height=512, width=512, p=1.0),
        ToTensorV2(p=1.0),
    ], p=1.0)


class AverageMeter(object):
    def __init__(self):
        self.reset()

    def reset(self):
        self.val = 0
        self.avg = 0
        self.sum = 0
        self.count = 0

    def update(self, val, n=1):
        self.val = val
        self.sum += val * n
        self.count += n
        self.avg = self.sum / self.count


# 标签平滑处理
class LabelSmoothing(nn.Module):
    def __init__(self, smoothing=0.05):
        super(LabelSmoothing, self).__init__()
        self.confidence = 1.0 - smoothing
        self.smoothing = smoothing

    def forward(self, x, target):
        if self.training:
            x = x.float()
            target = target.float()
            logprobs = torch.nn.functional.log_softmax(x, dim=-1)

            nll_loss = -logprobs * target
            nll_loss = nll_loss.sum(-1)

            smooth_loss = -logprobs.mean(dim=-1)

            loss = self.confidence * nll_loss + self.smoothing * smooth_loss

            return loss.mean()
        else:
            return torch.nn.functional.cross_entropy(x, target)


# 训练方法集成
class Fitter:

    def __init__(self, model, device, config):
        self.config = config
        self.epoch = 0
        self.base_dir = 'base'  
        self.log_path = f'{self.base_dir}/log.txt'
        self.best_summary_loss = 10 ** 5

        self.model = model
        self.device = device

        param_optimizer = list(self.model.named_parameters())
        no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
        optimizer_grouped_parameters = [
            {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.001},
            {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
        ]

        self.optimizer = torch.optim.AdamW(self.model.parameters(), lr=config.lr)
        self.scheduler = config.SchedulerClass(self.optimizer, **config.scheduler_params)
        self.criterion = LabelSmoothing().to(self.device)
        self.log(f'Fitter prepared. Device is {self.device}')

    def fit(self, train_loader, validation_loader):
        for e in range(self.config.n_epochs):
            if self.config.verbose:
                lr = self.optimizer.param_groups[0]['lr']
                timestamp = datetime.utcnow().isoformat()
                self.log(f'\n{timestamp}\nLR: {lr}')

            t = time.time()
            summary_loss = self.train_one_epoch(train_loader)
            self.log(
                f'[RESULT]: Train. Epoch: {self.epoch}, summary_loss: {summary_loss.avg:.5f}, time: {(time.time() - t):.5f}')
            self.save(f'{self.base_dir}/last-checkpoint.bin')

            t = time.time()
            summary_loss, final_scores = self.validation(validation_loader)

            self.log(
                f'[RESULT]: Val. Epoch: {self.epoch}, summary_loss: {summary_loss.avg:.5f}, time: {(time.time() - t):.5f}')
            if summary_loss.avg < self.best_summary_loss:
                self.best_summary_loss = summary_loss.avg
                self.model.eval()
                self.save(f'{self.base_dir}/best-checkpoint-{str(self.epoch).zfill(3)}epoch.bin')
                for path in sorted(glob(f'{self.base_dir}/best-checkpoint-*epoch.bin'))[:-3]:
                    os.remove(path)

            if self.config.validation_scheduler:
                self.scheduler.step(metrics=summary_loss.avg)

            self.epoch += 1

    def validation(self, val_loader):
        self.model.eval()
        summary_loss = AverageMeter()
        t = time.time()
        for step, (images, targets) in enumerate(val_loader):
            if self.config.verbose:
                if step % self.config.verbose_step == 0:
                    print(
                        f'Val Step {step}/{len(val_loader)}, ' + \
                        f'summary_loss: {summary_loss.avg:.5f},' + \
                        f'time: {(time.time() - t):.5f}', end='\r'
                    )
            with torch.no_grad():
                targets = targets.to(self.device).float()
                batch_size = images.shape[0]
                images = images.to(self.device).float()
                outputs = self.model(images)
                loss = self.criterion(outputs, targets)
                summary_loss.update(loss.detach().item(), batch_size)

        return summary_loss

    def train_one_epoch(self, train_loader):
        self.model.train()
        summary_loss = AverageMeter()
        t = time.time()
        for step, (images, targets) in enumerate(train_loader):
            if self.config.verbose:
                if step % self.config.verbose_step == 0:
                    print(
                        f'Train Step {step}/{len(train_loader)}, ' + \
                        f'summary_loss: {summary_loss.avg:.5f},' + \
                        f'time: {(time.time() - t):.5f}', end='\r')

            targets = targets.to(self.device).float()
            images = images.to(self.device).float()
            batch_size = images.shape[0]

            self.optimizer.zero_grad()
            outputs = self.model(images)
            loss = self.criterion(outputs, targets)
            loss.backward()

            summary_loss.update(loss.detach().item(), batch_size)

            self.optimizer.step()

            if self.config.step_scheduler:
                self.scheduler.step()

        return summary_loss,

    def save(self, path):
        self.model.eval()
        torch.save({
            'model_state_dict': self.model.state_dict(),
            'optimizer_state_dict': self.optimizer.state_dict(),
            'scheduler_state_dict': self.scheduler.state_dict(),
            'best_summary_loss': self.best_summary_loss,
            'epoch': self.epoch,
        }, path)

    def load(self, path):
        checkpoint = torch.load(path)
        self.model.load_state_dict(checkpoint['model_state_dict'])
        self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        self.scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
        self.best_summary_loss = checkpoint['best_summary_loss']
        self.epoch = checkpoint['epoch'] + 1

    def log(self, message):
        if self.config.verbose:
            print(message)
        with open(self.log_path, 'a+') as logger:
            logger.write(f'{message}\n')

此处需要修改的内容

class Fitter:

    def __init__(self, model, device, config):
        self.config = config
        self.epoch = 0
        self.base_dir = 'base'   # 此处需要保存模型和日志文件,选择自己用来保存模型和日志文件的文件夹路径
        self.log_path = f'{self.base_dir}/log.txt'
        self.best_summary_loss = 10 ** 5

        self.model = model
        self.device = device

再来看第三个代码块“train.py”


import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.sampler import SequentialSampler, RandomSampler
from efficientnet_pytorch import EfficientNet
from catalyst.data.sampler import BalanceClassSampler
from utils import Fitter
from dataset import get_dataset


# 激活网络
def get_net():
    net = EfficientNet.from_pretrained('efficientnet-b2')   # 确定使用的网络,efficient-b0~b7
    net._fc = nn.Linear(in_features=1408, out_features=4, bias=True)  # out_features改为你所需要的分类数目
    return net


net = get_net().cuda()


# 超参的定义
class TrainGlobalConfig:
    num_workers = 4
    batch_size = 8  # 显存够大的话可以适当增大batch_size
    n_epochs = 25   # 训练的epochs
    lr = 0.001  # 学习率的初始值

    verbose = True
    verbose_step = 1

    step_scheduler = False
    validation_scheduler = True

    SchedulerClass = torch.optim.lr_scheduler.ReduceLROnPlateau
    scheduler_params = dict(
        mode='min',
        factor=0.5,
        patience=1,
        verbose=False,
        threshold=0.0001,
        threshold_mode='abs',
        cooldown=0,
        min_lr=1e-8,
        eps=1e-08
    )


train_dataset, validation_dataset = get_dataset()


def run_training():
    device = torch.device('cuda:0')

    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        sampler=BalanceClassSampler(labels=train_dataset.get_labels(), mode="downsampling"),
        batch_size=TrainGlobalConfig.batch_size,
        pin_memory=False,
        drop_last=True,
        num_workers=TrainGlobalConfig.num_workers,
    )
    val_loader = torch.utils.data.DataLoader(
        validation_dataset,
        batch_size=TrainGlobalConfig.batch_size,
        num_workers=TrainGlobalConfig.num_workers,
        shuffle=False,
        sampler=SequentialSampler(validation_dataset),
        pin_memory=False,
    )
    fitter = Fitter(model=net, device=device, config=TrainGlobalConfig)
    #     fitter.load(f'{fitter.base_dir}/last-checkpoint.bin')
    fitter.fit(train_loader, val_loader)


run_training()

需要修改的部分

# 激活网络
def get_net():
    net = EfficientNet.from_pretrained('efficientnet-b2')   # 确定使用的网络,efficient-b0~b7
    net._fc = nn.Linear(in_features=1408, out_features=4, bias=True)  # out_features改为你所需要的分类数目
    return net
net = get_net().cuda()
# 超参的定义
class TrainGlobalConfig:
    num_workers = 4
    batch_size = 8  # 显存够大的话可以适当增大batch_size
    n_epochs = 25   # 训练的epochs
    lr = 0.001  # 学习率的初始值

按照顺序依次修改上述代码,运行train.py文件,即可开始愉快的训练了

如果想看模型训练的效果,使用inference.py模块进行推断

import cv2
import torch
from torch import nn
from efficientnet_pytorch import EfficientNet

path = "图片路径"
def get_net():
    net = EfficientNet.from_pretrained('efficientnet-b2')
    net._fc = nn.Linear(in_features=1408, out_features=4, bias=True)
    return net
images = cv2.imread(path)
net = get_net().cuda()
checkpoint = torch.load('base/best-checkpoint-024epoch.bin')  # 训练得到的模型路径
net.load_state_dict(checkpoint['model_state_dict'])
net.eval()
y_pred = net(images.cuda())
y_pred = nn.functional.softmax(y_pred, dim=1).data.cpu().numpy()  # 得到属于各个类别的概率

来看一下项目文件结构
在这里插入图片描述

  • 1
    点赞
  • 9
    收藏
    觉得还不错? 一键收藏
  • 15
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 15
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值