通过torch vision调用vgg16实现图像分类

该博客介绍了使用PyTorch构建的深度学习图像分类模型,包括数据集的处理、训练和测试。模型基于VGG16预训练网络,并在训练中应用了图像融合、随机裁剪和颜色抖动等增强技术。同时,定义了准确率计算模块用于评估模型性能。在训练过程中,利用TensorBoard进行可视化,并在每个评估间隔保存模型。
摘要由CSDN通过智能技术生成
1.dataset代码
import random

from torchvision import datasets,transforms
from torchvision.transforms import functional_pil as F_pil
from torch.utils.data import DataLoader
import torch.nn as nn
from PIL import Image
from torchvision.transforms import functional as F

IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp')


class MergeTwoImageTransformer(nn.Module):
    def __init__(self,image_paths):
        super(MergeTwoImageTransformer, self).__init__()
        self.image_paths=image_paths
    def forward(self,img):
        if not F_pil._is_pil_image(img):
            raise ValueError('图像合并暂时仅支持pillow对象')
        img_w,img_h=F._get_image_size(img)
        #随机选择一个路径
        img_path=random.choice(self.image_paths)
        #图像加载
        other_img=datasets.folder.pil_loader(img_path)
        #图像剪切
        i,j,h,w=transforms.RandomResizedCrop.get_params(other_img,scale=(0.5,1.0),ratio=(3./4.,4./3.))
        other_img=F.resized_crop(other_img,i,j,h,w,(img_h,img_w),transforms.InterpolationMode.BILINEAR)
        # 图像合并
        img=Image.blend(img,other_img,0.15)
        return img


class Dataset:
    def __init__(self,root_dir,batch_size=8,num_works=0,train=True,shuffle=None):
        if shuffle is None:
            shuffle = train
        self.root_dir=root_dir
        _, class_to_idx = datasets.folder.find_classes(self.root_dir)
        image_paths = datasets.ImageFolder.make_dataset(
            self.root_dir,
            class_to_idx,
            IMG_EXTENSIONS
        )
        self.image_paths = [s[0] for s in image_paths]
        transform=self.get_train_transform() if train else self.get_valid_transform()
        self.dataset=datasets.ImageFolder(
            root=self.root_dir,
            transform=transform
        )
        self.loader=DataLoader(
            dataset=self.dataset,
            shuffle=shuffle,
            num_workers=num_works,
            batch_size=batch_size,
            prefetch_factor=2 if num_works==0 else batch_size*num_works)

    def __len__(self):
        return len(self.dataset.imgs)
    def __iter__(self):
        for data in self.loader:
            yield data

    def get_train_transform(self):
        return transforms.Compose([
            MergeTwoImageTransformer(self.image_paths),
            transforms.RandomHorizontalFlip(p=0.4),
            transforms.RandomResizedCrop(size=(224,224),scale=(0.6,1.0)),
            transforms.ColorJitter(),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ])
    @staticmethod
    def get_valid_transform():
        return transforms.Compose([
            transforms.Resize((224, 224)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ])

2.测试准确率代码

import torch
import torch.nn as nn
class AccuracyScore(nn.Module):
    def __init__(self):
        super(AccuracyScore, self).__init__()

    # noinspection PyMethodMayBeStatic
    def forward(self, y_pred, y_true):
        y_pred_dim = y_pred.dim()
        y_true_dim = y_true.dim()
        if y_pred_dim == y_true_dim:
            pass
        elif y_pred_dim == y_true_dim + 1:
            y_pred = torch.argmax(y_pred, dim=1)
        else:
            raise ValueError("格式异常!")
        y_pred = y_pred.to(y_true.dtype)
        correct = (y_pred == y_true)
        return torch.mean(correct.to(torch.float32))

3.模型训练测试代码

import torch.nn as nn
from torchvision import models
import torch
from image_classify_model import dataset
from torch.utils.tensorboard import SummaryWriter
import os
from image_classify_model.metric import AccuracyScore
import torch.optim as optim
import numpy as np
from . import network

# os.environ['CUDA_DEVICE_ORDER']='PCI_BUS_ID'
# os.environ['CUDA_VISIBLE_DEVICES']='1'

class Network(nn.Module):
    def __init__(self,num_classes=2,**kwargs):
        super(Network, self).__init__()
        vgg16=models.vgg16_bn(pretrained=True)
        for module in vgg16.parameters():
            module.requires_grad=False
        vgg16.classifier[3]=nn.Linear(4096,64)
        vgg16.classifier[6]=nn.Linear(64,num_classes)
        self.vgg16=nn.DataParallel(vgg16)

    def forward(self,x):
        return self.vgg16(x)


class ImageClassifiModel(object):
    def __init__(self,num_classes=2,summary_dir=None,model_dir=None,lr=0.005,momentum=0.5,weight_decay=0.05,nesterov=True,batch_size=16,num_works=0,use_gpu=True):
        super(ImageClassifiModel, self).__init__()
        self.device = torch.device('cuda' if use_gpu and torch.cuda.is_available() else 'cpu')
        self.net=Network(num_classes=num_classes)
        self.net=self.net.to(self.device)
        self.loss_fn=nn.CrossEntropyLoss()
        self.loss_fn.to(self.device)
        self.metrics={
            'acc':AccuracyScore()
        }
        self.train_optim=optim.SGD(params=[p for p in self.net.parameters() if p.requires_grad],
                                   lr=lr,momentum=momentum,nesterov=nesterov,weight_decay=weight_decay)
        self.batch_size=batch_size
        self.num_works=num_works
        if not os.path.exists(model_dir):
            os.makedirs(model_dir)
        self.model_dir = model_dir
        if not os.path.exists(summary_dir):
            os.makedirs(summary_dir)
        writer=SummaryWriter(log_dir=summary_dir)
        writer.add_graph(self.net,torch.empty(self.batch_size,3,224,224))
        writer.close()
        self.summary_dir=summary_dir


    def training(self,train_data_dir,test_data_dir,total_epoch,summary_step_interval=200,eval_epoch_interval=1,save_epoch_interval=10):
        trainset=dataset.Dataset(root_dir=train_data_dir,batch_size=self.batch_size,num_works=self.num_works,train=True,shuffle=True)
        testset = dataset.Dataset(root_dir=test_data_dir, batch_size=self.batch_size,
                                   num_works=self.num_works, train=False, shuffle=False)
        writer = SummaryWriter(log_dir=os.path.join(self.summary_dir,'training'))
        total_samples = len(trainset)
        train_step = 0
        test_step = 0
        for epoch in range(total_epoch):
            # 训练操作
            self.net.train(True)
            train_loss = []
            for data in trainset:
                inputs, labels = data
                inputs, labels = inputs.to(self.device), labels.to(self.device)

                # 前向过程
                outputs = self.net(inputs)
                _loss = self.loss_fn(outputs, labels)
                _metrics={}
                for _key in self.metrics:
                    _metrics[_key]=self.metrics[_key](outputs,labels).cpu().numpy()

                self.train_optim.zero_grad()
                _loss.backward()
                self.train_optim.step()

                train_loss.append(_loss.item())
                if train_step % summary_step_interval == 0:
                    # 可视化输出
                    writer.add_scalar('train_loss', _loss, train_step)
                    writer.add_scalars('train_metrics', _metrics, train_step)
                    print(f"Train {epoch + 1}/{total_epoch} {train_step} "
                          f"loss:{_loss.item():.3f} accuracy:{_metrics.get('acc',-0.0):.3f}")
                train_step += 1

            # 测试操作
            if epoch%eval_epoch_interval==0:
                self.net.eval()
                test_loss = []
                for data in testset:
                    inputs, labels = data
                    inputs,labels=inputs.to(self.device),labels.to(self.device)


                    # 前向过程
                    outputs = self.net(inputs)
                    _loss = self.loss_fn(outputs, labels)
                    _metrics = {}
                    for _key in self.metrics:
                        _metrics[_key] = self.metrics[_key](outputs, labels).cpu().numpy()

                    test_loss.append(_loss.item())
                    if test_step % summary_step_interval == 0:
                        # 可视化输出
                        writer.add_scalar('test_loss', _loss, test_step)
                        writer.add_scalars('test_metrics', _metrics, test_step)
                        print(f"Test {epoch + 1}/{total_epoch} {test_step} "
                              f"loss:{_loss.item():.3f} accuracy:{_metrics.get('acc',-0.0):.3f}")
                    test_step += 1
            # 每个epoch计算损失
                writer.add_scalars('epoch_loss', {'train': np.mean(train_loss), 'test': np.mean(test_loss)}, epoch)
            else:
                writer.add_scalars('epoch_loss', {'train': np.mean(train_loss)}, epoch)
            if epoch%save_epoch_interval==0:
                    # self.net.remove_hook_fn()
                    torch.save(self.net,os.path.join(self.model_dir,f'model{epoch}.pt'))
                    # self.net.add_hook_fn()
        torch.save(self.net,os.path.join(self.model_dir,f'model{total_epoch}.pt'))
        writer.close()
    def eval(self):
        pass

4.main函数

root_dir=r'D:\pythonProject\0807'

m=model.ImageClassifiModel(model_dir=os.path.join(root_dir,'output01','model'),
                           summary_dir=os.path.join(root_dir,'output01','summary'),
                           batch_size=16,
                           use_gpu=True)
root_dir=r'D:\pythonProject\dogcat'
m.training(save_epoch_interval=2,
           total_epoch=10,
           eval_epoch_interval=2,
           summary_step_interval=100,
           train_data_dir=os.path.join(root_dir,'train'),
           test_data_dir=os.path.join(root_dir,'test')
           )

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值