细粒度分类:DBTNet(Deep Bilinear Transform)(二)代码实现


前言

本文续接细粒度分类:DBTNet(Deep Bilinear Transform)(一)论文解读,参考https://github.com/wuwusky/DBT_Net 给出了DBTNet的pytorch实现。


一、简介

原文对应的代码是基于MXNet,而本文给出的是基于pytorch的代码和训练结果。由于训练DBTNet需要在ImageNet数据集上进行预训练,数据集过大,要训练的时间过长,我的硬件条件无法满足,所以我仅在CUB200上进行了训练。
与论文中不同的地方在于激活函数并没有用tanh代替ReLU。

二、代码

1.DBTNet.py

参考:https://github.com/wuwusky/DBT_Net
这一部分主要是基于下图实现:
在这里插入图片描述

SG模块

主要作用是用于语义分组的1×1卷积和计算此模块的损失Lg。(

class GroupConv(nn.Module):
    def __init__(self, in_channels, out_channels, width, num_group):
        super(GroupConv, self).__init__()
        self.num_group = num_group
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.matrix_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
        self.bn = nn.BatchNorm2d(out_channels)
        self.relu = nn.ReLU(True)

        nn.init.constant_(self.matrix_conv.weight, 1.0)
        nn.init.constant_(self.matrix_conv.bias, 0.1)
        self.loss = 0

    def forward(self, x):
        channels = self.out_channels
        # matrix_act = super(GroupConv, self).forward(x) # 分组映射矩阵,核尺寸为1的卷积层
        matrix_act = self.matrix_conv(x)
        matrix_act = self.bn(matrix_act)
        matrix_act = self.relu(matrix_act)

        tmp = matrix_act + 0.001
        b, c, w, h = tmp.shape
        width = w
        tmp = tmp.view(int((b*c*w*h)/(width*width)), width*width)
        tmp = F.normalize(tmp, p=2)
        tmp = tmp.view(b, channels, width*width)
        tmp = tmp.permute(1, 0, 2)
        tmp = tmp.reshape(channels, b*w*h)

        tmp_T = tmp.transpose(1, 0)
        co = tmp.mm(tmp_T)
        co = co.view(1, channels*channels)
        co = co / 128

        gt = torch.ones(self.num_group).to('cuda')
        gt = gt.diag()
        gt = gt.reshape((1, 1, self.num_group, self.num_group))
        gt = gt.repeat((1, int((channels/self.num_group)*(channels/self.num_group)), 1, 1))
        gt = F.pixel_shuffle(gt, upscale_factor=int(channels/self.num_group))
        gt = gt.reshape((1, channels*channels))

        # print(co.is_cuda, gt.is_cuda)
        loss_single = torch.sum((co-gt)*(co-gt)*0.001, dim=1)
        loss = loss_single.repeat(b)
        loss = loss / ((channels/512.0)*(channels/512.0))

        self.loss = loss
        return matrix_act

GB模块

class GroupBilinear(nn.Module):
    def __init__(self, num_group, width, channels):
        super(GroupBilinear, self).__init__()
        self.num_group = num_group
        self.num_per_group = int(channels/num_group)
        self.channels = channels
        self.fc = nn.Linear(channels, channels, bias=True)
        self.bn = nn.BatchNorm2d(channels)
        # self.BL = nn.Bilinear(self.num_group, self.num_group, channels)

    def forward(self, x):
        b, c, w, h = x.shape
        width = w
        num_dim = b*c*w*h
        tmp = x.permute(0, 2, 3, 1)

        tmp = tmp.reshape(num_dim//self.channels, self.channels)
        my_tmp = self.fc(tmp)
        tmp = tmp + my_tmp

        tmp = tmp.reshape(((num_dim//self.channels), self.num_group, self.num_per_group))
        tmp_T = tmp.permute((0, 2, 1))

        tmp = torch.tanh(torch.bmm(tmp_T, tmp)/32)
        tmp = tmp.reshape((b, width, width, self.num_per_group*self.num_per_group))
        # tmp = F.upsample_bilinear(tmp, (width, c))
        tmp = F.interpolate(tmp, (width, c))
        tmp = tmp.permute((0, 3, 1, 2))

        out = x + self.bn(tmp)
        return out

ResNet_SG_GB

class Bottleneck(nn.Module):
    expansion = 4

    def __init__(self, inplanes, planes, stride=1, downsample=None, use_SG_GB=True, featuremap_size=0):
        super(Bottleneck, self).__init__()

        self.bn1 = nn.BatchNorm2d(planes)
        self.conv2 = conv3x3(planes, planes, stride)
        self.bn2 = nn.BatchNorm2d(planes)
        self.conv3 = conv1x1(planes, planes * self.expansion)
        self.bn3 = nn.BatchNorm2d(planes * self.expansion)
        self.relu = nn.ReLU(inplace=True)
        # self.act = nn.PReLU()
        self.downsample = downsample
        self.stride = stride
        self.use_SG_GB = use_SG_GB
        if self.use_SG_GB:
            self.SG = GroupConv(inplanes, planes, featuremap_size, 16)
            self.GB = GroupBilinear(16, featuremap_size, planes)
            self.conv1 = conv3x3(planes, planes)
        else:
            self.conv1 = conv1x1(inplanes, planes)

    def forward(self, x):
        identity = x

        if self.use_SG_GB:
            out = self.SG(x)
            out = self.GB(out)
            out = self.conv1(out)
        else:
            out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)

        out = self.conv2(out)
        out = self.bn2(out)
        out = self.relu(out)

        out = self.conv3(out)
        out = self.bn3(out)

        if self.downsample is not None:
            identity = self.downsample(x)

        out += identity
        out = self.relu(out)

        return out


class ResNet_SG_GB(nn.Module):

    def __init__(self, block, layers, num_classes=4, zero_init_residual=True, down_1=False):
        super(ResNet_SG_GB, self).__init__()
        self.inplanes = 64
        self.featuremap_size = 224
        self.down_1 = down_1
        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
        self.featuremap_size = int(self.featuremap_size * 0.5)
        # self.conv1_sim = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        # self.act = nn.PReLU()
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.featuremap_size = int(self.featuremap_size * 0.5)
        self.all_gconvs = []

        if down_1:
            self.layer1 = self._make_layer(block, 64, layers[0], stride=2)
        else:
            self.layer1 = self._make_layer(block, 64, layers[0])
            self.featuremap_size = int(self.featuremap_size * 0.5)
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.featuremap_size = int(self.featuremap_size * 0.5)
        self.layer3 = self._make_layer_SG_GB(block, 256, layers[2], stride=2)
        self.featuremap_size = int(self.featuremap_size * 0.5)
        self.layer4 = self._make_layer_SG_GB(block, 512, layers[3], stride=2)
        self.featuremap_size = int(self.featuremap_size * 0.5)

        self.SG_end = GroupConv(512 * block.expansion, 512 * block.expansion, self.featuremap_size, 32)
        self.all_gconvs.append(self.SG_end)
        self.GB_end = GroupBilinear(32, self.featuremap_size, 512 * block.expansion)
        self.bn_end = nn.BatchNorm2d(512*block.expansion)

        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.out = nn.Sequential(
            nn.Linear(512 * block.expansion, num_classes),
            nn.Sigmoid(),
        )

    def _make_layer(self, block, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                conv1x1(self.inplanes, planes * block.expansion, stride),
                nn.BatchNorm2d(planes * block.expansion),
            )

        layers = []
        layers.append(block(self.inplanes, planes, stride, downsample))
        self.inplanes = planes * block.expansion
        for _ in range(1, blocks):
            layers.append(block(self.inplanes, planes))

        return nn.Sequential(*layers)

    def _make_layer_SG_GB(self, block, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                conv1x1(self.inplanes, planes * block.expansion, stride),
                nn.BatchNorm2d(planes * block.expansion),
            )

        layers = []
        my_block = block(self.inplanes, planes, stride, downsample, True, self.featuremap_size)
        layers.append(my_block)
        self.all_gconvs.append(my_block.SG)
        self.inplanes = planes * block.expansion
        for _ in range(1, blocks):
            my_block = block(self.inplanes, planes, 1, None, True, self.featuremap_size)
            layers.append(my_block)
            self.all_gconvs.append(my_block.SG)
        return nn.Sequential(*layers)

    def forward(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        x = self.maxpool(x)

        x = self.layer1(x)

        x = self.layer2(x)

        x = self.layer3(x)

        x = self.layer4(x)

        x = self.SG_end(x)
        x = self.GB_end(x)
        x = self.bn_end(x)

        x = self.avgpool(x)
        # x = F.dropout2d(x, p=0.25, training=self.training)
        x = x.view(x.size(0), -1)
        x = self.out(x)

        cnt = 0
        for sg in self.all_gconvs:
            if cnt == 0:
                loss = sg.loss
            else:
                loss = loss + sg.loss
            cnt = cnt + 1
        loss_sg = loss/cnt

        return x, loss_sg

2.Train2.py

代码如下(示例):

import torch
import torch.nn as nn
import torch.optim
import torch.utils.data
import torchvision
import os
# import NetModel
import CUB200

# base_lr = 0.1
# batch_size = 24
num_epochs = 200
weight_decay = 1e-8
num_classes = 200
cub200_path = 'E:/DataSets/CUB_200_2011/'
save_model_path = 'model_saved'

device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

fc = 1
ft = 2


def train(mode, Model, model_path, base_lr, batch_size, step_num):
    # load the network.
    model = Model
    model = model.to(device)
    param_to_optim = []
    if mode == fc:
        # Load the fc parameter.
        for param in model.parameters():
            if not param.requires_grad:
                continue
            param_to_optim.append(param)
        optimizer = torch.optim.SGD(param_to_optim, lr=base_lr, momentum=0.9, weight_decay=weight_decay)
    elif mode == ft:
        # Load the saved model.
        model.load_state_dict(torch.load(os.path.join(save_model_path,
                                                      model_path),
                                         map_location=lambda storage, loc: storage))
        # Load all parameters.
        # param_to_optim = model.parameters()
        optimizer = torch.optim.SGD(model.parameters(), lr=base_lr, momentum=0.9, weight_decay=weight_decay)
        # for param in model.parameters():
        #     param_to_optim.append(param)
    # optimizer = torch.optim.SGD(model.parameters(), lr=base_lr, momentum=0.9, weight_decay=weight_decay)
    criterion = nn.CrossEntropyLoss()

    # If the incoming value does not increase for 3 consecutive times, the learning rate will be reduced by 0.1 times
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.1, patience=9, verbose=True)

    # Calculate the mean and variance of each channel of sample data,
    # run it only once, and record the corresponding value
    # get_statistic()

    # Mean and variance of CUB_200 dataset are [0.4856, 0.4994, 0.4324], [0.1817, 0.1811, 0.1927]

    # Set up the data preprocessing process
    train_transform = torchvision.transforms.Compose([torchvision.transforms.Resize(448),
                                                      torchvision.transforms.CenterCrop(448),
                                                      torchvision.transforms.RandomHorizontalFlip(),
                                                      torchvision.transforms.ToTensor(),
                                                      torchvision.transforms.Normalize([0.4856, 0.4994, 0.4324],
                                                                                       [0.1817, 0.1811, 0.1927])])
    test_transform = torchvision.transforms.Compose([torchvision.transforms.Resize(448),
                                                     torchvision.transforms.CenterCrop(448),
                                                     torchvision.transforms.ToTensor(),
                                                     torchvision.transforms.Normalize([0.4856, 0.4994, 0.4324],
                                                                                      [0.1817, 0.1811, 0.1927])])

    train_data = CUB200.CUB200(cub200_path, train=True, transform=train_transform)
    test_data = CUB200.CUB200(cub200_path, train=False, transform=test_transform)

    train_loader = torch.utils.data.DataLoader(dataset=train_data, batch_size=batch_size, shuffle=True)
    test_loader = torch.utils.data.DataLoader(dataset=test_data, batch_size=batch_size, shuffle=False)

    print('Start training ...')
    best_acc = 0.
    best_epoch = 0
    end_patient = 0
    training_accuracy = []
    testing_accuracy = []
    epochs = []
    size = len(train_loader.dataset)
    for epoch in range(num_epochs):
        correct = 0
        total = 0
        epoch_loss = 0.
        for i, (images, labels) in enumerate(train_loader):
            images = images.to(device)
            labels = labels.to(device)

            outputs, loss_sg = model(images)
            # print(outputs)
            # print(labels)
            loss = criterion(outputs, labels)
            # print(loss, loss_sg)
            loss_all = loss + 0.01*loss_sg
            loss_all = loss_all/step_num
            loss_all.backward()
            if (i+1) % step_num == 0:
                optimizer.step()
                optimizer.zero_grad()

            epoch_loss += loss_all
            _, prediction = torch.max(outputs.data, 1)
            correct += (prediction == labels).sum().item()
            total += labels.size(0)
            if (i+1) % 960 == 0:
                print('Epoch %d: Iter %d/%d, Loss %g' % (epoch + 1, (i+1) * batch_size, size, loss_all))
        train_acc = 100 * correct / total
        print('Testing on test dataset...')
        test_acc = test_accuracy(model, test_loader)
        # print(epoch + 1, num_epochs, epoch_loss, train_acc, test_acc)
        print('Epoch [{}/{}] Loss: {:.4f} Train_Acc: {:.4f}  Test1_Acc: {:.4f}'
              .format(epoch + 1, num_epochs, epoch_loss.item(), train_acc, test_acc))
        scheduler.step(test_acc)
        training_accuracy.append(train_acc)
        testing_accuracy.append(test_acc)
        epochs.append(epoch)
        if test_acc > best_acc:
            if mode == fc:
                model_file = os.path.join(save_model_path, 'CUB_200_train_fc_epoch_%d_acc_%g.pth' %
                                          (best_epoch, best_acc))
                if os.path.isfile(model_file):
                    os.remove(os.path.join(save_model_path, 'CUB_200_train_fc_epoch_%d_acc_%g.pth' %
                                           (best_epoch, best_acc)))
                end_patient = 0
                best_acc = test_acc
                best_epoch = epoch + 1
                print('The accuracy is improved, save model')
                torch.save(model.state_dict(), os.path.join(save_model_path,
                                                            'CUB_200_train_fc_epoch_%d_acc_%g.pth' %
                                                            (best_epoch, best_acc)))
            elif mode == ft:
                model_file = os.path.join(save_model_path, 'CUB_200_train_ft_epoch_%d_acc_%g.pth' %
                                          (best_epoch, best_acc))
                if os.path.isfile(model_file):
                    os.remove(os.path.join(save_model_path, 'CUB_200_train_ft_epoch_%d_acc_%g.pth' %
                                           (best_epoch, best_acc)))
                end_patient = 0
                best_acc = test_acc
                best_epoch = epoch + 1
                print('The accuracy is improved, save model')
                torch.save(model.state_dict(), os.path.join(save_model_path,
                                                            'CUB_200_train_ft_epoch_%d_acc_%g.pth' %
                                                            (best_epoch, best_acc)))
        else:
            end_patient += 1
            print('Impatient: ', end_patient)

        # If the accuracy of the 10 iteration is not improved, the training ends
        if end_patient >= 100:
            break
    print('After the training, the end of the epoch %d, the accuracy %g is the highest' % (best_epoch, best_acc))
    print('epochs:', epochs)
    print('training accuracy:', training_accuracy)
    print('testing accuracy:', testing_accuracy)

def test_accuracy(model, test_loader):
    model.eval()
    with torch.no_grad():
        correct = 0
        total = 0
        for images, labels in test_loader:
            images = images.to(device)
            labels = labels.to(device)

            outputs, loss_sg = model(images)

            _, prediction = torch.max(outputs.data, 1)
            correct += (prediction == labels).sum().item()
            total += labels.size(0)
        model.train()
        return 100 * correct / total

3.main.py

# import Train
import Train2
# import NetModel
import DBTNet

step_num1 = 8
step_num2 = 16

block = DBTNet.Bottleneck
model = DBTNet.ResNet_SG_GB(block=block, layers=(3, 4, 6, 3), num_classes=200).to('cuda')
model_path = 'CUB_200_train_fc_epoch_50_acc_79.0473.pth'
base_lr = 0.1
batch_size = 24

fc = 1
fc_base_lr = 1
fc_batch_size = int(1*step_num1/step_num1)     # max=6
ft = 2


mode = fc
if mode == fc:
    # model = NetModel.HBP(pretrained=True)
    base_lr = fc_base_lr
    batch_size = fc_batch_size
    Train2.train(mode=mode, Model=model, model_path=model_path, base_lr=base_lr,
                 batch_size=batch_size, step_num=step_num1)
elif mode == ft:
    base_lr = fc_base_lr
    batch_size = fc_batch_size
    Train2.train(mode=mode, Model=model, model_path=model_path, base_lr=base_lr,
                 batch_size=batch_size, step_num=step_num1)

4.其他

本文未给出的代码(如CUB200.py)可以参考之前的文章。


  • 2
    点赞
  • 14
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 5
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 5
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

Robust Da

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值