CenterTrack改进:优化损失函数

损失函数分析

CenterTrack主要使用了两种损失函数:FastFocalLoss和L1 Loss

class GenericLoss(torch.nn.Module):
    def __init__(self, opt):
        super(GenericLoss, self).__init__()
        self.crit = FastFocalLoss(opt=opt)     # FastFocalLoss
        self.crit_reg = RegWeightedL1Loss()    # L1 Loss
        if 'rot' in opt.heads:
            self.crit_rot = BinRotLoss()
        if 'nuscenes_att' in opt.heads:
            self.crit_nuscenes_att = WeightedBCELoss()
        self.opt = opt

   
    def forward(self, outputs, batch):
        opt = self.opt
        losses = {head: 0 for head in opt.heads}

        for s in range(opt.num_stacks):
            output = outputs[s]
            output = self._sigmoid_output(output)

            if 'hm' in output:    # hm分支使用的是FastFocalLoss
                losses['hm'] += self.crit(
                    output['hm'], batch['hm'], batch['ind'],
                    batch['mask'], batch['cat']) / opt.num_stacks

            regression_heads = [
                'reg', 'wh', 'tracking', 'ltrb', 'ltrb_amodal', 'hps',
                'dep', 'dim', 'amodel_offset', 'velocity']

            for head in regression_heads:    # 其他分支使用的L1 Loss
                if head in output:
                    losses[head] += self.crit_reg(
                        output[head], batch[head + '_mask'],
                        batch['ind'], batch[head]) / opt.num_stacks

        //......        

        losses['tot'] = 0
        for head in opt.heads:
            losses['tot'] += opt.weights[head] * losses[head]

        return losses['tot'], losses

        我在训练自己的数据集时,使用原文的L1 Loss,模型收敛的过程中会有明显的抖动,精度收敛不到更低。因此将L1 Loss换为Smooth L1 Loss,收敛效果确实能更好。

        而且对于目标框的回归使用L1 Loss分别计算中心点和宽高,明显割裂了目标框中心点和宽高之间存在的联系,因此可以增加CIoU Loss(或者EIOU Loss)来更精细的计算真实框和回归框之间的损失。

具体代码实现

使用Smooth L1 LOSS

修改loss.py中的RegWeightedL1Loss

class RegWeightedL1Loss(nn.Module):
    def __init__(self):
        super(RegWeightedL1Loss, self).__init__()

    def forward(self, output, mask, ind, target):
        pred = _tranpose_and_gather_feat(output, ind)
        # loss = F.l1_loss(pred * mask, target * mask, reduction='sum')
        # 改为smooth_l1_loss
        loss = F.smooth_l1_loss(pred * mask, target * mask, reduction='sum')
        loss = loss / (mask.sum() + 1e-4)
        return loss

添加CIOU LOSS

1.在loss.py下添加如下代码

class CIouLoss(nn.Module):
    def __init__(self):
        super(CIouLoss, self).__init__()

    def forward(self, reg, wh, mask, ind, target_reg, target_wh):
        num_pos = mask.sum() / 2
        pred_reg = _tranpose_and_gather_feat(reg, ind)*mask
        pred_wh = _tranpose_and_gather_feat(wh, ind)*mask

        # cal the box's area of boxes1 and boxess
        pred_Area = pred_wh[..., 0]*pred_wh[..., 1]
        target_Area = target_wh[..., 0]*target_wh[..., 1]

        # cal Intersection
        left_up = torch.max(pred_reg-0.5*pred_wh, target_reg-0.5*target_wh)
        right_down = torch.min(pred_reg+0.5*pred_wh, target_reg+0.5*target_wh)

        inter_section = torch.clamp(right_down - left_up, 0.0)
        inter_area = inter_section[..., 0] * inter_section[..., 1]
        union_area = pred_Area + target_Area - inter_area
        delta = 1e-10
        ious = (inter_area+delta) / (union_area+delta)

        # cal outer boxes
        outer_left_up = torch.min(pred_reg-0.5*pred_wh, target_reg-0.5*target_wh)
        outer_right_down = torch.max(pred_reg+0.5*pred_wh, target_reg+0.5*target_wh)
        outer_diagonal_line = torch.pow(outer_right_down - outer_left_up, 2).sum(-1)

        # cal center distance
        center_dis = torch.abs(pred_reg - target_reg).pow(2).sum(-1)

        # cal penalty term
        # cal width,height
        v = 0.4052847 * torch.pow(
                torch.atan(pred_wh[..., 0] / (pred_wh[..., 1]+delta)) -
                torch.atan(target_wh[..., 0] / (target_wh[..., 1]+delta)), 2)
        alpha = v / (1 - ious + v+delta)

        # cal ciou
        cious = ious - center_dis / (outer_diagonal_line + alpha * v + delta)
        return (1 - cious).sum() / num_pos

2.在train.py中添加ciou计算

from model.losses import CIouLoss


# 修改GenericLoss类
class GenericLoss(torch.nn.Module):
    def __init__(self, opt):
        super(GenericLoss, self).__init__()
        self.crit = FastFocalLoss(opt=opt)
        self.crit_reg = RegWeightedL1Loss()
        self.CIOULoss = CIouLoss()
        if 'rot' in opt.heads:
            self.crit_rot = BinRotLoss()
        if 'nuscenes_att' in opt.heads:
            self.crit_nuscenes_att = WeightedBCELoss()
        self.opt = opt

    def forward(self, outputs, batch):

            ... # 省略

            for head in regression_heads:
                if head in output:
                    losses[head] += self.crit_reg(
                        output[head], batch[head + '_mask'],
                        batch['ind'], batch[head]) / opt.num_stacks
            
            # 添加
            losses['ciou'] += self.CIOULoss(output['reg'], output['wh'], batch['reg_mask'],
                                            batch['ind'], batch['reg'], batch['wh']) / opt.num_stacks

        ... # 省略

        losses['tot'] = 0
        for head in opt.heads:
            losses['tot'] += opt.weights[head] * losses[head]
        
        # 添加
        losses['tot'] += losses['ciou'] * 1

        return losses['tot'], losses

实验情况

使用Smooth L1 LOSS后损失函数下降的会更快,收敛的也会比L1 Loss更低。

添加CIOU LOSS情况不一,可能受数据集影响。视自己实验情况而定。

  • 8
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

XaoPage

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值