YOLOv5(7) ComputeLoss和build_targets

#------------------------------------------------------------------------------------------------------------------------------------#
#  1.build_targets来建立正样本 
#        
#  2.找到与正样本对应的预测值进行计算    回归损失(位置), 分类损失,置信度损失(负样本也要参与)
#        
#                  回归损失    :利用预测框与正样本  中心(与当前grid cell左上角的偏移量)和wh 计算(1-ciou).mean()
#                  分类损失    :用BCE损失计算正样本与对应预测值的分类损失
#                  置信度损失  :正样本置信度设为预测框与正样本的ciou(回归损失计算得出) 负样本置信度为0 利用BCE计算置信度损失
#
#  3.返回                      : (lbox + lobj + lcls) * bs, torch.cat((lbox, lobj, lcls)).detach()
#------------------------------------------------------------------------------------------------------------------------------------#

class ComputeLoss:
    sort_obj_iou = False

    # Compute losses
    def __init__(self, model, autobalance=False):
        device = next(model.parameters()).device  # get model device
        h = model.hyp  # hyperparameters

        # Define criteria
        BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device))
        BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device))
 
        nn.BCELoss
        # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
        self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0))  # positive, negative BCE targets

        # Focal loss
        g = h['fl_gamma']  # focal loss gamma
        if g > 0:
            BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)

        m = de_parallel(model).model[-1]  # Detect() module
        self.balance = {3: [4.0, 1.0, 0.4]}.get(m.nl, [4.0, 1.0, 0.25, 0.06, 0.02])  # P3-P7
        self.ssi = list(m.stride).index(16) if autobalance else 0  # stride 16 index
        self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance
        self.na = m.na  # number of anchors
        self.nc = m.nc  # number of classes
        self.nl = m.nl  # number of layers
        self.anchors = m.anchors
        self.device = device


#---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#
#  pred                                               :[(bs,3,80,80,85),(bs,3,40,40,85),(bs,3,40,40,85)]
#  targets                                            :(n,6)
 
#  build_targets                                      :返回正样本(针对每一个特征层   不是原始图像)
#  tcls                                               :存放每个grid cell 对应target的类别信息 (list 含有三个特诊层) 
#  tbox     cat((gxy - gij, gwh), 1)                  :存放每个每个grid cell 对应target  对于当前grid cell 左上角的偏移量和当前target的w h (list 含有三个特诊层)
#  indices  (b,a, gj, gi)                             :存放每个grid cell 对应target所属的batch,anchor信息 以及 所属的grid cell的左上角坐标gj,gi信息(三个或一个)  (list 含有三个特诊层)
#  anchors                                            :存放每个grid cell 对应target的anchors信息
#---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#
    def __call__(self, p, targets):  # predictions, targets
        lcls = torch.zeros(1, device=self.device)  # class loss
        lbox = torch.zeros(1, device=self.device)  # box loss
        lobj = torch.zeros(1, device=self.device)  # object loss
        tcls, tbox, indices, anchors = self.build_targets(p, targets)  # targets

        # Losses
        for i, pi in enumerate(p):  # layer index, layer predictions
            #---------------------------------------------------------------------------------#
            #  取出当前层正样本对应的batch,anchor索引,grid cell坐标
            #  tobj      :将负样本置信度设为0,正样本置信度为标签框与预测框的ciou(置信度标签)
            #---------------------------------------------------------------------------------#
            b, a, gj, gi = indices[i]  # image, anchor, gridy, gridx
            tobj = torch.zeros(pi.shape[:4], dtype=pi.dtype, device=self.device)  # target obj

            n = b.shape[0]  # number of targets
            if n:
                #---------------------------------------------------------------------------------------------------------#
                #  pxy, pwh, _, pcls = pi[b, a, gj, gi].tensor_split((2, 4, 5), dim=1)  # faster, requires torch 1.8.0
                #  取出与正样本相对应的预测结果
                #---------------------------------------------------------------------------------------------------------#
                pxy, pwh, _, pcls = pi[b, a, gj, gi].split((2, 2, 1, self.nc), 1)  # target-subset of predictions
                
                #--------------------------------------------------------------------------------------------#
                #  Regression            :回归损失   (位置信息损失)
                
                #  pxy                   :预测框与其所对应grid cell左上角的偏移量
                #  pwh                   :求出预测框的在当前特征层的w,h
                #  pbox                  :拼接预测框 偏移量和 w,h
                #  iou                   :以偏移量为中心点  求出ciou
                #  lbox                  :求出1-ciou损失并求平均   加入lbox
                #                        (作为位置信息损失)
                #--------------------------------------------------------------------------------------------#
                pxy = pxy.sigmoid() * 2 - 0.5
                pwh = (pwh.sigmoid() * 2) ** 2 * anchors[i]
                pbox = torch.cat((pxy, pwh), 1)  # predicted box
                iou = bbox_iou(pbox, tbox[i], CIoU=True).squeeze()  # iou(prediction, target)
                lbox += (1.0 - iou).mean()  # iou loss
                

                # Objectness
                iou = iou.detach().clamp(0).type(tobj.dtype)
                if self.sort_obj_iou:
                    j = iou.argsort()
                    b, a, gj, gi, iou = b[j], a[j], gj[j], gi[j], iou[j]
                if self.gr < 1:
                    iou = (1.0 - self.gr) + self.gr * iou
                tobj[b, a, gj, gi] = iou  # iou ratio

                
                #--------------------------------------------------------------------------------#
                #  Classification     :分类损失

                #  pcls               :(k,80) 预测值     80个类
                #  t                  :(k,80) 真实值     每个target框对应的类标注为1
                #  lcls               :将二分类交叉熵损失作为 分类损失
                #--------------------------------------------------------------------------------#
                if self.nc > 1:  # cls loss (only if multiple classes)
                    t = torch.full_like(pcls, self.cn, device=self.device)  # targets
                    t[range(n), tcls[i]] = self.cp
                    lcls += self.BCEcls(pcls, t)  # BCE

                # Append targets to text file
                # with open('targets.txt', 'a') as file:
                #     [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
                    

            #---------------------------------------------------------------------------------------------#
            #  objectness        :置信度损失
                    
            #  tobj              :将负样本置信度设为0,正样本置信度为标签框与预测框的ciou(置信度标签)
            #  obji              :预测框置信度与标签框置信度损失
            #  lobj              :置信度损失 
            #  self.balance[i]   :对于不同大小的框给与不同的权重 (小框的惩罚权重更大)
            #---------------------------------------------------------------------------------------------#
            obji = self.BCEobj(pi[..., 4], tobj)
            lobj += obji * self.balance[i]  # obj loss
            if self.autobalance:
                self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item()

        if self.autobalance:
            self.balance = [x / self.balance[self.ssi] for x in self.balance]
        #--------------------------------------------#
        #  将各个损失乘以设定好的权重
        #--------------------------------------------#
        lbox *= self.hyp['box']
        lobj *= self.hyp['obj']
        lcls *= self.hyp['cls']
        bs = tobj.shape[0]  # batch size

        return (lbox + lobj + lcls) * bs, torch.cat((lbox, lobj, lcls)).detach()
#------------------------------------------------------------------------------------------------------------------------------------------------------------------------#
#  build_targets              :记录正样本
#                             :记录扩展的和本来就包含target  的grid cell   所对应的target信息和每个target所属类别,anchors,batch信息,与当前grid cell 左上角的偏移量信息
#                             由于一个target可能匹配多个anchor 和一个grid cell可以匹配多个target 故记录的信息数会超过一个特诊层的总grid cell 数量

#  返回
#  indices                    :存放每个grid cell 对应target所属的batch,anchor信息 以及 所属的grid cell的左上角坐标gj,gi信息(三个或一个)
#  tbox                       :存放每个每个grid cell 对应target  对于当前grid cell 左上角的偏移量和当前target的w h
#  anch                       :存放每个grid cell 对应target的anchors信息
#  tcls                       :存放每个grid cell 对应target的类别信息    



#  p                          :[(bs,3,80,80,85),(bs,3,40,40,85),(bs,3,40,40,85)]
#  targets                    :(n,6)
#------------------------------------------------------------------------------------------------------------------------------------------------------------------------#
    def build_targets(self, p, targets):
        # Build targets for compute_loss(), input targets(image,class,x,y,w,h)
        na, nt = self.na, targets.shape[0]  # number of anchors, targets
        tcls, tbox, indices, anch = [], [], [], []
        gain = torch.ones(7, device=self.device)  # normalized to gridspace gain
        ai = torch.arange(na, device=self.device).float().view(na, 1).repeat(1, nt)  # same as .repeat_interleave(nt)
        #---------------------------------------------------------------------#
        #  (na,nt,6)  (na,nt,1) => (3,nt,7)   (bt,c,x,y,w,h,index_na)
        #  把targets 复制三份 并加上一个索引维度(每个target是属于哪个anchor的)
        #---------------------------------------------------------------------#
        targets = torch.cat((targets.repeat(na, 1, 1), ai[..., None]), 2)  # append anchor indices  
        #---------------------------------------------------------------------#
        #   g            :target与anchor匹配的阈值
        #   off          :自身,左,上,右,下的偏移量(用来减的)
        #---------------------------------------------------------------------#
        g = 0.5  # bias
        off = torch.tensor(  #[0,0],[0.5,0],[0,0.5],[-0.5,0],[0,-0.5]
            [
                [0, 0],
                [1, 0],
                [0, 1],
                [-1, 0],
                [0, -1],  # j,k,l,m
                # [1, 1], [1, -1], [-1, 1], [-1, -1],  # jk,jm,lk,lm
            ],
            device=self.device).float() * g  # offsets  
        #-----------------------------------------------------------------#
        #  寻找每一个特诊层的正样本
        #-----------------------------------------------------------------#
        for i in range(self.nl):
            #-------------------------------------------------------------#
            #  anchors         :获取当前特诊层的anchor大小
            #  shape           :获取当前特诊层大小
            #-------------------------------------------------------------#
            anchors, shape = self.anchors[i], p[i].shape
            gain[2:6] = torch.tensor(shape)[[3, 2, 3, 2]]  # xyxy gain
             
            #------------------------------------------------------------------------------------------#
            #  Match targets to anchors
            #  t将标签还原到当前的特征图上   
            #  targets:(3,nt,7)  (bt , c , x ,  y ,  w ,  h , index_na)  (index_na 属于哪一个anchor的)
            #  gain:tensor      ([ 1.,  1., 80., 80., 80., 80.,  1.])
            #------------------------------------------------------------------------------------------#
            t = targets * gain  # shape(3,n,7)
            if nt:
                #--------------------------------------------------------------------------------#
                #  Matches
                #  r           :(3,nt,2)/(3,1,2)  =>  (3,nt,2)
                #  j           :(3,nt)      这个函数的目的是为了每个target匹配相应的高质量anchor
                #  t           :(k,7)       找到满足阈值的target
                #--------------------------------------------------------------------------------#
                r = t[..., 4:6] / anchors[:, None]  # wh ratio
                j = torch.max(r, 1 / r).max(2)[0] < self.hyp['anchor_t']  # compare
                # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t']  # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))
                t = t[j]  # filter

                #------------------------------------------------------------------------------------------------------------------------------------#
                #  Offsets                         :得到所有筛选后的网格的中心相对于这个要预测的真实框所在网格边界
                #  gxy                             :(k,2)     从左到右计算  
                #  gxi                             :(2,) -(k,2)  => (80,80) -(x,y)   从右到左
                #  j,k                             :j为True 要取左面一个grid   k为True 要取上面一个grid
                #  l,m                             :l为True 要取右边一个grid   m为True 要取下面一个grid
                #  t                               :得到 不同位置(自身,右上左下) 需要扩展的target

                #  j                               :(5,k)
                #  torch.zeros_like(gxy)[None]     :(1,k,2)
                #  off[:, None]                    :(5,1,2)     =>(5,k,2) 
                #  offsets                         :(n,2) 得到所有筛选后的网格的中心相对于这个要预测的真实框所在网格边界(左右上下边框)的偏移
                #                                  (与t相对应,即求的需要扩展的target 往哪个 方向扩展)
                #------------------------------------------------------------------------------------------------------------------------------------#
                gxy = t[:, 2:4]  # grid xy
                gxi = gain[[2, 3]] - gxy  # inverse 
                j, k = ((gxy % 1 < g) & (gxy > 1)).T
                l, m = ((gxi % 1 < g) & (gxi > 1)).T
                j = torch.stack((torch.ones_like(j), j, k, l, m))
                t = t.repeat((5, 1, 1))[j]
                offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]
            else:
                t = targets[0]
                offsets = 0
            #---------------------------------------------------------------------------------------------------------------#
            #  Define
            #  gij          :将需要偏移的taeget 减去相应的偏移量 得到 一个target对应的三个或1个 grid cell 的左上角坐标
            #---------------------------------------------------------------------------------------------------------------#
            bc, gxy, gwh, a = t.chunk(4, 1)  # (image, class), grid xy, grid wh, anchors
            a, (b, c) = a.long().view(-1), bc.long().T  # anchors, image, class
            gij = (gxy - offsets).long()
            gi, gj = gij.T  # grid indices


            #-------------------------------------------------------------------------------------------------------------------------------------------------------------#
            #  Append         :记录正样本
            #                 :记录扩展的和本来就包含target  的grid cell   所对应的target信息和每个target所属类别,anchors,batch信息,与当前grid cell 左上角的偏移量信息
            #                  由于一个target可能匹配多个anchor 和一个grid cell可以匹配多个anchor 故记录的信息数会超过一个特诊层的总grid cell 数量

            #  indices        :存放每个grid cell 对应target所属的batch,anchor信息 以及 所属的grid cell的左上角坐标gj,gi信息(三个或一个)
            #  tbox           :存放每个每个grid cell 对应target  对于当前grid cell 左上角的偏移量和当前target的w h
            #  anch           :存放每个grid cell 对应target的anchors信息
            #  tcls           :存放每个grid cell 对应target的类别信息
            #-------------------------------------------------------------------------------------------------------------------------------------------------------------#
            indices.append((b, a, gj.clamp_(0, shape[2] - 1), gi.clamp_(0, shape[3] - 1)))  # image, anchor, grid
            tbox.append(torch.cat((gxy - gij, gwh), 1))  # box
            anch.append(anchors[a])  # anchors
            tcls.append(c)  # class

        return tcls, tbox, indices, anch

如果对build_target不熟悉可以使用一下数据进行debug查看,需要的部分可以自己加入,这里只放了一部分执行代码

#########   数据(简化版本)
# g=0.5
# t=torch.Tensor([
#     [0,        29.00000, 47.08631,  50.07596,  2.28144,  0.76657,  0.00000],
#     [1.00000,  0.00000,  5.80370,  7.32453,  2.08485,  5.97043,  0.00000],
#     [1.00000,  0.00000,  0.98552,  1.23458,  1.97104,  5.62838,  0.00000],
#     [15.00000, 51.00000,  9.66797,  10.64564,  9.39063,  8.40045,  2.00000]

# ])
# gain=torch.Tensor([ 1.,  1., 80., 80., 80., 80.,  1.])
# off = torch.tensor(  #[0,0],[0.5,0],[0,0.5],[-0.5,0],[0,-0.5]
#             [
#                 [0, 0],
#                 [1, 0],
#                 [0, 1],
#                 [-1, 0],
#                 [0, -1],  # j,k,l,m
#                 # [1, 1], [1, -1], [-1, 1], [-1, -1],  # jk,jm,lk,lm
#             ],).float() * g 





#####  执行代码
# gxy = t[:, 2:4]  # grid xy
# gxi = gain[[2, 3]] - gxy  # inverse 
# j, k = ((gxy % 1 < g) & (gxy > 1)).T
# l, m = ((gxi % 1 < g) & (gxi > 1)).T
# j = torch.stack((torch.ones_like(j), j, k, l, m))
# t = t.repeat((5, 1, 1))[j]
# offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]


# bc, gxy, gwh, a = t.chunk(4, 1)

# a, (b, c) = a.long().view(-1), bc.long().T  # anchors, image, class
# gij = (gxy - offsets).long()
# gi, gj = gij.T  # grid indices
  • 8
    点赞
  • 8
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
以下是 YOLOv5 build_targets 函数的源码,该函数用于生成训练时所需的目标标签,其输入是一个包含了所有 Ground Truth 边界框的列表和一个预测的输出张量。它的输出是一个包含了每个预测边界框的目标类别、置信度得分以及边界框坐标调整量的张量。 ```python def build_targets(pred, targets, hyp): # pred : (batch_size, num_anchors, grid_size, grid_size, 5 + num_classes) # targets : (num_targets, 6) [batch_index, class_id, x, y, w, h] # hyp : dict containing hyperparameters ignore_threshold = hyp['ignore_thresh'] device = pred.device num_classes = hyp['num_classes'] anchors = hyp['anchors'] anchor_t = torch.tensor(anchors).float().to(device).view(1, -1, 1, 1, 2) b, a, gj, gi = targets[:, :4].long().t() target_cls = targets[:, 4].long() txywh = targets[:, 2:6] # ground truth box gxy, gwh = txywh[:, :2], txywh[:, 2:] gij = (gxy * hyp['nw']).floor() # iou of targets-anchors (using wh only) box1 = txywh[:, None, :].repeat(1, anchor_t.shape[1], 1) box2 = anchor_t.repeat(gxy.shape[0], 1, 1, 1, 1).view(-1, 4) box2 = torch.cat((torch.zeros_like(box2[:, :2]), box2[:, 2:]), 1) # convert [x,y,w,h] to [0,0,w,h] iou = bbox_iou(box1.view(-1, 4), box2, x1y1x2y2=False) # iou(box, anchor) # anchor boxes with highest IoU topi, topk = iou.topk(1) # ensure each target is matched to highest iou a = topk % anchor_t.shape[1] # anchor index b = topk // anchor_t.shape[1] # batch index # XY coordinates gxy /= hyp['stride'][0] gi *= 0 gj *= 0 # target GT box XY coordinates with respect to cell txy = gxy - torch.cat((gxy.floor(),), 1) # Width and height (yolo method) tw, th = torch.sqrt(gwh / anchor_t[b, a, gj, gi]).unbind(1) # tw, th = torch.log(gwh / anchor_t[b, a, gj, gi]).unbind(1) # yolo method # target GT box normalized width and height (yolo method) # twh = torch.log(gwh / anchor_t[b, a, gj, gi]) twh = torch.sqrt(gwh / anchor_t[b, a, gj, gi]) # cls, weight, class_mask tcls = torch.zeros_like(pred[..., 5:]) tcls[b, a, gj, gi, target_cls] = 1.0 tconf = torch.ones_like(pred[..., 4:5]) tconf[b, a, gj, gi] = 0.0 # iou of targets (using wh only) iou = bbox_iou(txywh, torch.cat((gij.float(), twh.float()), 1), x1y1x2y2=False) # iou(target, anchor) # reject anchors below ignore_threshold iou tconf[iou < ignore_threshold] = 0.0 # xy delta txy = txy - gij # wh delta twh = torch.log(gwh / anchor_t[b, a, gj, gi]) twh[(iou < ignore_threshold).view(twh.shape)] = 0.0 # set non-matching boxes to 0 # create output tensor output = torch.cat((txy, twh, tconf, tcls), -1) return output ``` 该函数首先将预测的输出张量和目标 Ground Truth 边界框的信息进行解析和匹配,然后计算每个预测边界框与其对应的 Ground Truth 边界框的 IoU 值,最后根据阈值筛选出需要忽略的边界框以及需要进行训练的边界框,并生成相应的训练标签。具体实现可以参考以上源码注释。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值