RAF_DB数据集分类_1

基于ECANet18,迁移学习训练RAF_DB数据集

代码相关技术

1、空间注意力
2、显卡加速
3、AMP加速
4、余弦退火动态学习率
5、迁移学习

写在前面

代码基于notebook写的,每个单元格是有顺序的,官方模型需要自己下载,
官方代码,权重下载:https://github.com/BangguWu/ECANet
个人感觉这个ECA模块很轻量,而且高效。
如果你有更好的模块架构或者炼丹技巧,你可以在评论区留言,我们一起优化代码。

1、导入相关模块

# 导包
import matplotlib.pyplot as plt
import math
from torch.cuda.amp import autocast 
from torch.cuda.amp import GradScaler
import torch
import os
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from torch import optim
import torch.nn.functional as F
import numpy as np
import pandas as pd
import torch.nn as nn
import torchvision
from torchsummary import summary
import time
from torch.nn import init
from typing import Union, List, Dict, Any, Optional, cast
import warnings
warnings.filterwarnings("ignore")

2、显卡加速

torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True

3、设置相关数据

#保存训练数据和模型
data_csv_path = "./qyxx-eca_net.csv"   #修改此处文件名 
model_save_path = "./qyxx-eca_net.pkl"  #修改此处文件名
train_path = "./RAF-DB/train"
val_path = "./RAF-DB/test"
#模型批次大小
batch_size = 128
resume = True
#动态学习率,学习率和循环次数增加
lr = 5e-4
epochs = 200
D_epoch = 0 
best_acc  = 0
print("epochs:",epochs,"learning_rate:",lr,"batch_size:",batch_size)
#设备选取
flag = torch.cuda.is_available()
if flag:
    print("GPU")
else:
    print("CPU")
ngpu = 1
device = torch.device("cuda:0" if (torch.cuda.is_available() and ngpu > 0) else "cpu")
# device = torch.device("cpu")
#查看显卡名称
#torch.cuda.get_device_name()
print("divice is ", device)

#数据预处理(建议提前resize,减少每次资源的损失)放大到112x112 ,随机水平翻转
train_transform = transforms.Compose([
    transforms.Resize((224,224)),
    transforms.RandomHorizontalFlip(p=0.5),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485,0.456,0.406],std=[0.229,0.224,0.225])
])

#增加不同种transform,预测集中去除随机翻转
val_transform = transforms.Compose([
    transforms.Resize((224,224)),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485,0.456,0.406],std=[0.229,0.224,0.225])
])

4、模型定义

class eca_layer(nn.Module):
    """Constructs a ECA module.
    Args:
        channel: Number of channels of the input feature map
        k_size: Adaptive selection of kernel size
    """
    def __init__(self, channel, k_size=3):
        super(eca_layer, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.conv = nn.Conv1d(1, 1, kernel_size=k_size, padding=(k_size - 1) // 2, bias=False) 
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        # feature descriptor on the global spatial information
        y = self.avg_pool(x)

        # Two different branches of ECA module
        y = self.conv(y.squeeze(-1).transpose(-1, -2)).transpose(-1, -2).unsqueeze(-1)

        # Multi-scale information fusion
        y = self.sigmoid(y)

        return x * y.expand_as(x)
def conv3x3(in_planes, out_planes, stride=1):
    """3x3 convolution with padding"""
    return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
                     padding=1, bias=False)


class ECABasicBlock(nn.Module):
    expansion = 1

    def __init__(self, inplanes, planes, stride=1, downsample=None, k_size=3):
        super(ECABasicBlock, self).__init__()
        self.conv1 = conv3x3(inplanes, planes, stride)
        self.bn1 = nn.BatchNorm2d(planes)
        self.relu = nn.ReLU(inplace=True)
        self.conv2 = conv3x3(planes, planes, 1)
        self.bn2 = nn.BatchNorm2d(planes)
        self.eca = eca_layer(planes, k_size)
        self.downsample = downsample
        self.stride = stride

    def forward(self, x):
        residual = x
        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)

        out = self.conv2(out)
        out = self.bn2(out)
        out = self.eca(out)

        if self.downsample is not None:
            residual = self.downsample(x)

        out += residual
        out = self.relu(out)

        return out


class ECABottleneck(nn.Module):
    expansion = 4

    def __init__(self, inplanes, planes, stride=1, downsample=None, k_size=3):
        super(ECABottleneck, self).__init__()
        self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
        self.bn1 = nn.BatchNorm2d(planes)
        self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
                               padding=1, bias=False)
        self.bn2 = nn.BatchNorm2d(planes)
        self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
        self.bn3 = nn.BatchNorm2d(planes * 4)
        self.relu = nn.ReLU(inplace=True)
        self.eca = eca_layer(planes * 4, k_size)
        self.downsample = downsample
        self.stride = stride

    def forward(self, x):
        residual = x

        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)

        out = self.conv2(out)
        out = self.bn2(out)
        out = self.relu(out)

        out = self.conv3(out)
        out = self.bn3(out)
        out = self.eca(out)

        if self.downsample is not None:
            residual = self.downsample(x)

        out += residual
        out = self.relu(out)

        return out


class ResNet(nn.Module):

    def __init__(self, block, layers, num_classes=1000, k_size=[3, 3, 3, 3]):
        self.inplanes = 64
        super(ResNet, self).__init__()
        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0], int(k_size[0]))
        self.layer2 = self._make_layer(block, 128, layers[1], int(k_size[1]), stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], int(k_size[2]), stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], int(k_size[3]), stride=2)
        self.avgpool = nn.AvgPool2d(7, stride=1)
        self.fc = nn.Linear(512 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()

    def _make_layer(self, block, planes, blocks, k_size, stride=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(self.inplanes, planes * block.expansion,
                          kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(planes * block.expansion),
            )

        layers = []
        layers.append(block(self.inplanes, planes, stride, downsample, k_size))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(self.inplanes, planes, k_size=k_size))

        return nn.Sequential(*layers)

    def forward(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        x = self.maxpool(x)

        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)

        x = self.avgpool(x)
        x = x.view(x.size(0), -1)
        x = self.fc(x)

        return x


def eca_resnet18(k_size=[3, 5, 7, 7], num_classes=1_000, pretrained=False):
    """Constructs a ResNet-18 model.
    Args:
        k_size: Adaptive selection of kernel size
        pretrained (bool): If True, returns a model pre-trained on ImageNet
        num_classes:The classes of classification
    """
    model = ResNet(ECABasicBlock, [2, 2, 2, 2], num_classes=num_classes, k_size=k_size)
    model.avgpool = nn.AdaptiveAvgPool2d(1)
    return model

5、 重写全连接层

class SKNet(nn.Module):
    def __init__(self, num_class=7):
        super(SKNet, self).__init__()
        self.features = nn.Sequential(*list(eca_net.children())[:-2])
        self.avgpool = nn.AvgPool2d(7, stride=1)
        self.fc = nn.Linear(512, num_class)
    def forward(self, x):
        x = self.features(x)
        out = self.avgpool(x)
        out = torch.flatten(out,1)
        out = self.fc(out)
        return out

6、导入模型权重

from torch.nn.parallel import DistributedDataParallel
model_path = "C:\\Users\\sk\\Desktop\\eca_resnet18_k3577.pth.tar"
eca_net = eca_resnet18()
checkpoint = torch.load(model_path)
eca_net.load_state_dict({k.replace('module.',''):v for k,v in checkpoint['state_dict'].items()})
del checkpoint
eca_net

7、加载数据集

#使用torchvision.datasets.ImageFolder读取数据集指定train和test文件夹
train_data = torchvision.datasets.ImageFolder(train_path, transform=train_transform)
#drop_last舍弃未满一个批次的数据        num_workers工作区一般设置为GPU个数的4倍
data0_train = DataLoader(train_data, batch_size=batch_size, shuffle=True, drop_last=True,num_workers=4)
print(train_data)  #输出训练集相关
val_data = torchvision.datasets.ImageFolder(val_path, transform=val_transform)
data1_val = DataLoader(val_data, batch_size=batch_size, shuffle=True,drop_last=True,num_workers=4)
print(val_data)  #输出测试集相关

print(train_data.classes)  #根据分的文件夹的名字来确定的类别
print(train_data.class_to_idx) #按顺序为这些类别定义索引为0,1...
print()
print(val_data.classes)
print(val_data.class_to_idx)
print()

8、优化器,损失函数

#to(device)将模型加入GPU中加速计算
model = SKNet().to(device)
#设置优化器
params = list(model.parameters()) 
optimizer = optim.AdamW(params, lr=lr)
# 如果不用SGD的话,Adam是最好的选择,学习率5e-4一般是比较优的
# optimizer = optim.Adam(model.parameters(), lr=lr)
#设置损失函数
criteon = nn.CrossEntropyLoss().to(device)
#余弦衰减学习率 
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=80, eta_min=0)
#形如TensorFlow中的summary函数输出模型参数
summary(model, input_size=[(3, 224, 224)], batch_size=batch_size, device="cuda")
print()

9、测试函数

#测试函数
def evalute_(model,val_loader):
    model.eval()
    test_loss2 = 0.0
    test_corrects2 = 0.0
    number = 0
    for batchidx, (x, label) in enumerate(val_loader):
#         print(number)
    #torch.cuda.empty_cache()  #清除非必要GPU缓存,但是我建议不要在训练中使用此句,这可能会损失你相当多的时间
        number = number + 1
        x, label = x.to(device), label.to(device)
        #测试函数中加入no_grad(),如果不加会增加计算和显存
        with torch.no_grad():
            y1 = model(x)
            #虽然可以直接使用max函数,但是我建议在y1的比较重你最好使用F.softmax(y1,dim=1),这样可能会有更好的效果,我在训练中使用了它
            _, preds1 = torch.max(F.softmax(y1,dim=1), 1)
            loss = criteon(y1, label)  
            
            test_loss2 += loss.item()*batch_size
            test_corrects2 += torch.sum(preds1 == label.data)
    #由于使用了最后一次抛弃,我不能使用全部测试集作为分母,这样会使最后的准确率变小
    test_loss1 = test_loss2 / (number*batch_size)
    test_acc1 = test_corrects2.double() / (number*batch_size)
#     print("TestDataset loss is ", test_loss1,"TestDataset accuracy is ",test_acc1)
    return test_acc1, test_loss1
print("执行结束")

10、训练,加入AMP优化

#关于AMP自动精度求解,我也并不是很熟悉,只能使用官方给的实例进行照葫芦画瓢。
scaler = torch.cuda.amp.GradScaler()
torch.cuda.empty_cache()
for epoch in range(D_epoch, epochs):
    time_one = time.time()                         #标记训练开始时间戳
    train_acc1 = 0.0
    train_loss1 = 0.0
    train_acc = 0.0
    train_loss = 0.0
    
    val_acc = 0.0
    val_loss = 0.0
    number = 0
    model.train()
    print("epoch:",epoch)

    for batchidx , (x ,label) in enumerate(data0_train):
        x , label = x.to(device), label.to(device)
        optimizer.zero_grad()
        with torch.cuda.amp.autocast():
            y1 = model(x)
            loss = criteon(y1,label) 
        _, preds1 = torch.max(F.softmax(y1,dim=1), 1)
        #AMP优化
        scaler.scale(loss).backward()
#         loss.backward()
		#梯度剪裁 有时候加了比没加还差,emmmm,你懂得
        torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm = 5, norm_type=2)  
        scaler.step(optimizer)
#         optimizer.step()
        scaler.update()
        train_loss1 += loss.item()*batch_size
        train_acc1 += torch.sum(preds1 == label.data).double()
        number = number + 1
    time_two = time.time()             #标记训练结束时间戳
    #输出训练一轮所需时间,用于分析对比
    print("The Model-Train-Time spent  %d min %.2f s"%((time_two-time_one)//60,(time_two-time_one)%60))
    #计算训练时候的平均损失和平均准确率
    train_loss = train_loss1 / (number*batch_size)
    train_acc = train_acc1 / (number*batch_size)
    #计算测试时候的平均损失和平均准确率
    val_acc, val_loss = evalute_(model, data1_val)
    
    train_acc = train_acc.cpu()
    val_acc = val_acc.cpu()
    print('Accuracy : Train is {} , Valid is {} ;  Loss : Train is  {} ,Valid is {}'.format(train_acc, val_acc, train_loss , val_loss))
    #如果你不需要训练以及验证的准确率和损失值,你可以注释这下面的两行,它们不是非必须的,理论上只存在于汇报和论文中
    dataframe = pd.DataFrame(columns = [epoch,train_acc,train_loss,val_acc, val_loss])
    dataframe.to_csv(data_csv_path,line_terminator="\n",mode='a',index=False,sep=',')
    if val_acc > best_acc:
        print("覆盖最好的模型...")
        best_acc = val_acc 
        checkpoint = {
            'epoch': epoch,
            'model': model.state_dict(),
            'optimizer': optimizer.state_dict(),
            'best_acc':best_acc
        }
        torch.save(checkpoint,model_save_path)
#     time_three = time.time() 
#     print("测试花费时间",time_three-time_two)
    scheduler.step()  #动态学习率更新 
#如果你不是非必须,我建议你尽量不要使用n折交叉验证,使用数据增强可能效果更优于它。
评论 9
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值