基于迁移学习的DenseNet图像训练并分类

迁移学习,使用densenet网络预训练模型完成图片五分类,正确率很高

DenseNet

Github链接

img

核心是使用了DenseNet网络,它的Dense Block的每一个层会提取前面所有层的信息,可以看出该网络是在ResNet上改进得到的经典网络之一,它具有深度的同时可以更好地达到特征复用地效果。

本次实验中的数据集的train和test和val中都有五个类别的五个文件夹(比如train/土豆/ test/萝卜/),数据这里可能需要进行更改,5分类问题,所以把网络最后一层的输出改做5,根据需要改

导入第三方库

import torch
import torch.nn as nn
import torch.optim as optim
# from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import  models, transforms
import matplotlib.pyplot as plt
import os
from torch.utils.data import DataLoader, Dataset
from PIL import Image
import imagehash
import warnings
warnings.filterwarnings("ignore")
device="cuda"
batch_size=20
for i in range(1000):  # 释放显存
    torch.cuda.empty_cache()

支持分批进行模型评价的 Accuracy 类

# 支持分批进行模型评价的 Accuracy 类
class Accuracy:
    def __init__(self, is_logist=True):
        # 正确样本个数
        self.num_correct = 0
        # 样本总数
        self.num_count = 0
        self.is_logist = is_logist
    
    def update(self, outputs, labels):
        # 判断是否为二分类任务
        if outputs.shape[1] == 1:
            outputs = outputs.squeeze(-1)
            # 判断是否是logit形式的预测值
            if self.is_logist:
                preds = (outputs >= 0).long()
            else:
                preds = (outputs >= 0.5).long()
        else:
            # 多分类任务时,计算最大元素索引作为类别
            preds = torch.argmax(outputs, dim=1).long()
            
        # 获取本批数据中预测正确的样本个数
        labels = labels.squeeze(-1)
        batch_correct = (preds==labels).float().sum()
        batch_count = len(labels)
        # 更新
        self.num_correct += batch_correct
        self.num_count += batch_count
        
    def accumulate(self):
        # 使用累计的数据,计算总的评价指标
        if self.num_count == 0:
            return 0
        return self.num_correct/self.num_count
    
    def reset(self):
        self.num_correct = 0
        self.num_count = 0

用于训练,评估,预测的Runner类

class Runner(object):
    def __init__(self, model, optimizer, loss_fn, metric=None):
        self.model = model
        self.optimizer = optimizer
        self.loss_fn = loss_fn
        # 用于计算评价指标
        self.metric = metric
        
        # 记录训练过程中的评价指标变化
        self.dev_scores = []
        # 记录训练过程中的损失变化
        self.train_epoch_losses = []
        self.dev_losses = []
        # 记录全局最优评价指标
        self.best_score = 0
   
 
# 模型训练阶段
    def train(self, train_loader, dev_loader=None, **kwargs):
        # 将模型设置为训练模式,此时模型的参数会被更新
        self.model.train()
        
        num_epochs = kwargs.get('num_epochs', 0)
        log_steps = kwargs.get('log_steps', 100)
        save_path = kwargs.get('save_path','best_model.pth')
        eval_steps = kwargs.get('eval_steps', 0)
        # 运行的step数,不等于epoch数
        global_step = 0
        
        if eval_steps:
            if dev_loader is None:
                raise RuntimeError('Error: dev_loader can not be None!')
            if self.metric is None:
                raise RuntimeError('Error: Metric can not be None')
                
        # 遍历训练的轮数
        for epoch in range(num_epochs):
            total_loss = 0
            # 遍历数据集
            for step, data in enumerate(train_loader):
                x, y = data
                x, y=x.to(device), y.to(device)
                logits = self.model(x.float())
                loss = self.loss_fn(logits, y.long())
                total_loss += loss
                if step%log_steps == 0:
                    print(f'[Train]loss:{loss.item():.5f}')
                    
                loss.backward()
                self.optimizer.step()
                self.optimizer.zero_grad()
            # 每隔一定轮次进行一次验证,由eval_steps参数控制,可以采用不同的验证判断条件
            if eval_steps != 0 :
                if (epoch+1) % eval_steps ==  0:

                    dev_score, dev_loss = self.evaluate(dev_loader, global_step=global_step)
                    print(f'[Evaluate] dev score:{dev_score:.5f}, dev loss:{dev_loss:.5f}')
                
                    if dev_score > self.best_score:
                        self.save_model('best_net.pth')
                    
                        print(f'[Evaluate]best accuracy performance has been updated: {self.best_score:.5f}-->{dev_score:.5f}')
                        self.best_score = dev_score
                    
                # 验证过程结束后,请记住将模型调回训练模式   
                    self.model.train()
            
            global_step += 1
            # 保存当前轮次训练损失的累计值
            train_loss = (total_loss/len(train_loader)).item()
            self.train_epoch_losses.append((global_step,train_loss))
            
        self.save_model('best_net.pth')   
        print('[Train] Train done')
        
    # 模型评价阶段
    def evaluate(self, dev_loader, **kwargs):
        assert self.metric is not None
        # 将模型设置为验证模式,此模式下,模型的参数不会更新
        self.model.eval()
        global_step = kwargs.get('global_step',-1)
        total_loss = 0
        self.metric.reset()
        
        for batch_id, data in enumerate(dev_loader):
            x, y = data
            x, y=x.to(device), y.to(device)
            logits = self.model(x.float())
            loss = self.loss_fn(logits, y.long()).item()
            total_loss += loss 
            self.metric.update(logits, y)
            
        dev_loss = (total_loss/len(dev_loader))
        self.dev_losses.append((global_step, dev_loss))
        dev_score = self.metric.accumulate().cpu().numpy()
        self.dev_scores.append(dev_score)
        return dev_score, dev_loss
    
    # 模型预测阶段,
    def predict(self, x, **kwargs):
        self.model.eval()
        logits = self.model(x)
        return logits
    
    # 保存模型的参数
    def save_model(self, save_path):
        torch.save(self.model.state_dict(), save_path)
        
    # 读取模型的参数
    def load_model(self, model_path):
        self.model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')))

导入数据,构造迭代器

# 用于对比图片,防止重复
def compute_image_hash(image_path):
    image = Image.open(image_path)
    image_hash = imagehash.average_hash(image)
    return str(image_hash)

# 用于从文件夹中读取数据
class CustomDataset(Dataset):
    def __init__(self, root_dir, transform=None):
        self.root_dir = root_dir
        self.transform = transform
        self.classes = os.listdir(root_dir)
        self.data = []
        self.unique_hashes = set()  # 用于存储唯一的图片哈希值

        for label, class_name in enumerate(self.classes):
            class_path = os.path.join(root_dir, class_name)
            for image_name in os.listdir(class_path):
                image_path = os.path.join(class_path, image_name)
                image_hash = compute_image_hash(image_path)
                if image_hash not in self.unique_hashes:  # 检查图片哈希值是否已经存在
                    self.data.append((image_path, label))
                    self.unique_hashes.add(image_hash)  # 将图片哈希值添加到集合中

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        image_path, label = self.data[idx]
        image = Image.open(image_path).convert('RGB')

        if self.transform:
            image = self.transform(image)
        return image, label

# 训练时,对数据进行数据增强
transform_train = transforms.Compose([
    torchvision.transforms.RandomHorizontalFlip(),
    torchvision.transforms.RandomVerticalFlip(),
    torchvision.transforms.RandomResizedCrop((224, 224), scale=(0.3, 1)),
    transforms.Resize((224, 224)),
    transforms.ToTensor(),
])

# 测试和验证时,不对数据进行增强
transform_test_val= transforms.Compose([
    transforms.Resize((224, 224)),
    transforms.ToTensor()])

# 构建训练集加载器
train_data = CustomDataset(root_dir='食物分类数据集/train', transform=transform_train)
train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)

# 构建验证集加载器
val_data= CustomDataset(root_dir='食物分类数据集/val', transform=transform_test_val)
val_loader = DataLoader(val_data, batch_size=batch_size, shuffle=False)

# 构建测试集加载器
test_data= CustomDataset(root_dir='食物分类数据集/test', transform=transform_test_val)
test_loader = DataLoader(test_data, batch_size=4, shuffle=False)


# 构建 dataloaders
dataloaders = {'train': train_loader, 'val': val_loader, 'test': test_loader}

# 构建 dataset_sizes
dataset_sizes = {'train': len(train_data),'val': len(val_data) ,'test': len(test_data)}

# 查看数量
print('train:',len(train_data),'test:',len(test_data),'val:',len(val_data))


train: 134 test:110  val: 24 

导入预训练模型,更改网络的最后一层结构

net= models.densenet121(pretrained=True)  # 也可以使用预训练的权重

net.classifier = nn.Linear(net.classifier.in_features, 5)  # 5分类问题

net = net.to(device)

loss_fn = nn.CrossEntropyLoss()

optimizer = optim.SGD(net.parameters(), lr=0.0005, momentum=0.9, weight_decay=0.001)

训练,微调网络

metric = Accuracy(is_logist=True)
runner = Runner(net, optimizer, loss_fn, metric=metric)
runner.train(train_loader, num_epochs=20,dev_loader=val_loader,eval_steps=1)
[Train]loss:1.82368
[Evaluate] dev score:0.33333, dev loss:1.84212
[Evaluate]best accuracy performance has been updated: 0.00000-->0.33333
[Train]loss:1.52510
[Evaluate] dev score:0.54167, dev loss:1.61809
[Evaluate]best accuracy performance has been updated: 0.33333-->0.54167
[Train]loss:1.33757
[Evaluate] dev score:0.62500, dev loss:1.41050
[Evaluate]best accuracy performance has been updated: 0.54167-->0.62500
[Train]loss:1.04116
[Evaluate] dev score:0.79167, dev loss:1.23198
[Evaluate]best accuracy performance has been updated: 0.62500-->0.79167
[Train]loss:0.88599
[Evaluate] dev score:0.87500, dev loss:1.03205
[Evaluate]best accuracy performance has been updated: 0.79167-->0.87500
[Train]loss:0.71582
[Evaluate] dev score:0.91667, dev loss:0.84101
[Evaluate]best accuracy performance has been updated: 0.87500-->0.91667
[Train]loss:0.59828
[Evaluate] dev score:0.91667, dev loss:0.77114
[Train]loss:0.38245
[Evaluate] dev score:0.95833, dev loss:0.69001
[Evaluate]best accuracy performance has been updated: 0.91667-->0.95833
[Train]loss:0.50049
[Evaluate] dev score:0.95833, dev loss:0.64142
[Train]loss:0.25828
[Evaluate] dev score:0.95833, dev loss:0.61620
[Train]loss:0.30608
[Evaluate] dev score:0.91667, dev loss:0.63444
[Train]loss:0.34720
[Evaluate] dev score:0.91667, dev loss:0.60569
[Train]loss:0.22358
[Evaluate] dev score:0.95833, dev loss:0.55897
[Train]loss:0.31832
[Evaluate] dev score:0.95833, dev loss:0.54324
[Train]loss:0.20761
[Evaluate] dev score:0.95833, dev loss:0.54247
[Train]loss:0.28542
[Evaluate] dev score:0.95833, dev loss:0.56432
[Train]loss:0.13542
[Evaluate] dev score:0.95833, dev loss:0.56350
[Train]loss:0.28734
[Evaluate] dev score:0.95833, dev loss:0.57854
[Train]loss:0.11704
[Evaluate] dev score:0.95833, dev loss:0.56873
[Train]loss:0.17618
[Evaluate] dev score:0.95833, dev loss:0.54258
[Train] Train done
# 绘制训练集和验证集的损失变化以及验证集上的准确率变化曲线
def plot_training_loss_acc(runner, fig_name, fig_size=(16, 6), sample_step=1, loss_legend_loc='upper right', acc_legend_loc='lower right',
                          train_color = '#8E004D', dev_color = '#E20079', fontsize='x-large', train_linestyle='-', dev_linestyle='--'):
    plt.figure(figsize=fig_size)
    plt.subplot(1,2,1)
    train_items = runner.train_epoch_losses[::sample_step]
    train_steps = [x[0] for x in train_items]
    train_losses = [x[1] for x in train_items]
    
    plt.plot(train_steps, train_losses, color=train_color, linestyle=train_linestyle, label='Train loss')
    if len(runner.dev_losses) > 0:
        dev_steps = [x[0] for x in runner.dev_losses]
        dev_losses = [x[1] for x in runner.dev_losses]
        plt.plot(dev_steps, dev_losses, color=dev_color, linestyle=dev_linestyle,label='dev loss')
    
    plt.ylabel('loss')
    plt.xlabel('step')
    plt.legend(loc=loss_legend_loc)
    if len(runner.dev_scores) > 0:
        plt.subplot(1,2,2)
        plt.plot(dev_steps, runner.dev_scores, color=dev_color, linestyle=dev_linestyle, label='dev accuracy')
        
        plt.ylabel('score')
        plt.xlabel('step')
        plt.legend(loc=acc_legend_loc)
    # 将绘制结果保存
    plt.savefig(fig_name)
    plt.show()

plot_training_loss_acc(runner, 'Train_Dev_loss-Dev_Acc.pdf')


在这里插入图片描述

测试

for i in range(1000):  # 释放显存
    torch.cuda.empty_cache()
model_path = 'best_net.pth'
# 首先读入经过训练后的网络的参数
runner.load_model(model_path)
test_score, test_loss=runner.evaluate(test_loader)
print('Test score: %f, Test loss: %f' % (test_score, test_loss))
Test score: 0.945455, Test loss: 0.229771
def pre(folder_path):
    folder_path = folder_path

    # 获取文件夹中的所有图像文件
    image_files = [f for f in os.listdir(folder_path) if os.path.isfile(os.path.join(folder_path, f))]

    num_rows = (len(image_files) + 4) // 5  # 向上取整,计算行数
    num_cols = min(len(image_files), 5)  # 列数为5或图像文件数,取较小值
    # fig, axs = plt.subplots(1, num_cols, figsize=(15, 3))
    fig, axs = plt.subplots(num_rows, num_cols, figsize=(10, 2*num_rows))

    for i,image_file in enumerate(image_files):
        # 加载并预处理图像
        image_path = os.path.join(folder_path, image_file)
        image = Image.open(image_path).convert('RGB')
        image = transform_test_val(image)
        image = image.unsqueeze(0)  # 添加一个维度以创建批次张量
        image=image.to(device)

        # 将图像传递给模型进行预测
        with torch.no_grad():
            output = runner.predict(image)
            _, predicted = torch.max(output, 1)

        # 类别名称
        class_names = ['Bread', 'Hamburger', 'Kebabs', 'Noodle', 'Rice']

        # # 打印图像和预测结果
        # plt.imshow(image.squeeze().permute(1, 2, 0).cpu().numpy())
        # plt.title(f'Predicted: {class_names[predicted.item()]}')
        # plt.axis('off')
        # plt.show()
        # 在子图中显示图像和预测结果

        row_idx = i // num_cols
        col_idx = i % num_cols
        axs[row_idx, col_idx].imshow(image.squeeze().permute(1, 2, 0).cpu().numpy())
        axs[row_idx, col_idx].set_title(f'Predicted: {class_names[predicted.item()]}')
        axs[row_idx, col_idx].axis('off')


    # 调整子图之间的间距
    plt.tight_layout()

    # 显示图像
    plt.show()
main_folder_path = '食物分类数据集/test/'

# 获取主文件夹中的所有子文件夹
subfolders = [f for f in os.listdir(main_folder_path) if os.path.isdir(os.path.join(main_folder_path, f))]

# 遍历每个子文件夹
for subfolder in subfolders:
    pre(os.path.join(main_folder_path, subfolder))

  • 22
    点赞
  • 9
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

熊熊想读研究生

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值