import torch
import torch.nn as nn
import torch.nn.functional as F
from mmaction.core.bbox import bbox_target
try:
from mmdet.models.builder import HEADS as MMDET_HEADS
mmdet_imported = True
except (ImportError, ModuleNotFoundError):
mmdet_imported = False
# class F_BCE(nn.Module):
# def __init__(self, pos_weight=1, reduction='mean'):
# super(F_BCE, self).__init__()
# self.pos_weight = pos_weight
# self.reduction = reduction
# def forward(self, logits, target):
# # logits: [N, *], target: [N, *]
# logits = F.sigmoid(logits)
# loss = - self.pos_weight * target * (1-logits)**2 * torch.log(logits) - \
# (1 - target) * logits**2 * torch.log(1 - logits)
# if self.reduction == 'mean':
# loss = loss.mean()
# elif self.reduction == 'sum':
# loss = loss.sum()
# return loss
# class DSCLoss(nn.Module):
# def __init__(self, alpha: float = 1.0, smooth: float = 1.0, reduction: str = "mean"):
# super().__init__()
# self.alpha = alpha
# self.smooth = smooth
# self.reduction = reduction
# def forward(self, logits, targets):
# # targets = targets.type(torch.int64)
# probs = torch.softmax(logits, dim=1)
# probs = torch.gather(probs, dim=1, index=targets)
# probs_with_factor = ((1 - probs) ** self.alpha) * probs
# loss = 1 - (2 * probs_with_factor + self.smooth) / (probs_with_factor + 1 + self.smooth)
# if self.reduction == "mean":
# return loss.mean()
class BinaryDiceLoss(nn.Module):
"""
Args:
ignore_index: Specifies a target value that is ignored and does not contribute to the input gradient
reduction: Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'
Shapes:
output: A tensor of shape [N, *] without sigmoid activation function applied
target: A tensor of shape same with output
Returns:
Loss tensor according to arg reduction
Raise:
Exception if unexpected reduction
"""
def __init__(self, ignore_index=None, reduction='mean', **kwargs):
super(BinaryDiceLoss, self).__init__()
self.smooth = 1 # suggest set a large number when target area is large,like '10|100'
self.ignore_index = ignore_index
self.reduction = reduction
self.batch_dice = False # treat a large map when True
if 'batch_loss' in kwargs.keys():
self.batch_dice = kwargs['batch_loss']
def forward(self, output, target, use_sigmoid=True):
assert output.shape[0] == target.shape[0], "output & target batch size don't match"
if use_sigmoid:
output = torch.sigmoid(output)
if self.ignore_index is not None:
validmask = (target != self.ignore_index).float()
output = output.mul(validmask) # can not use inplace for bp
target = target.float().mul(validmask)
dim0 = output.shape[0]
if self.batch_dice:
dim0 = 1
output = output.contiguous().view(dim0, -1)
target = target.contiguous().view(dim0, -1).float()
num = 2 * torch.sum(torch.mul(output, target), dim=1) + self.smooth
den = torch.sum(output.abs() + target.abs(), dim=1) + self.smooth
loss = 1 - (num / den)
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
elif self.reduction == 'none':
return loss
else:
raise Exception('Unexpected reduction {}'.format(self.reduction))
class DiceLoss(nn.Module):
"""
Args:
weight: An array of shape [num_classes,]
ignore_index: Specifies a target value that is ignored and does not contribute to the input gradient
output: A tensor of shape [N, C, *]
target: A tensor of same shape with output
other args pass to BinaryDiceLoss
Return:
same as BinaryDiceLoss
"""
def __init__(self, weight=None, ignore_index=None, **kwargs):
super(DiceLoss, self).__init__()
self.kwargs = kwargs
self.weight = weight
if isinstance(ignore_index, (int, float)):
self.ignore_index = [int(ignore_index)]
elif ignore_index is None:
self.ignore_index = []
elif isinstance(ignore_index, (list, tuple)):
self.ignore_index = ignore_index
else:
raise TypeError("Expect 'int|float|list|tuple', while get '{}'".format(type(ignore_index)))
def forward(self, output, target):
assert output.shape == target.shape, 'output & target shape do not match'
dice = BinaryDiceLoss(**self.kwargs)
total_loss = 0
output = F.softmax(output, dim=1)
for i in range(target.shape[1]):
if i not in self.ignore_index:
dice_loss = dice(output[:, i], target[:, i], use_sigmoid=False)
if self.weight is not None:
assert self.weight.shape[0] == target.shape[1], \
'Expect weight shape [{}], get[{}]'.format(target.shape[1], self.weight.shape[0])
dice_loss *= self.weights[i]
total_loss += (dice_loss)
loss = total_loss / (target.size(1) - len(self.ignore_index))
return loss
class BBoxHeadAVA(nn.Module):
"""Simplest RoI head, with only two fc layers for classification and
regression respectively.
Args:
temporal_pool_type (str): The temporal pool type. Choices are 'avg' or
'max'. Default: 'avg'.
spatial_pool_type (str): The spatial pool type. Choices are 'avg' or
'max'. Default: 'max'.
in_channels (int): The number of input channels. Default: 2048.
num_classes (int): The number of classes. Default: 81.
dropout_ratio (float): A float in [0, 1], indicates the dropout_ratio.
Default: 0.
dropout_before_pool (bool): Dropout Feature before spatial temporal
pooling. Default: True.
topk (int or tuple[int]): Parameter for evaluating multilabel accuracy.
Default: (3, 5)
multilabel (bool): Whether used for a multilabel task. Default: True.
(Only support multilabel == True now).
"""
def __init__(
self,
temporal_pool_type='avg',
spatial_pool_type='max',
in_channels=2048,
# The first class is reserved, to classify bbox as pos / neg
num_classes=81,
dropout_ratio=0,
dropout_before_pool=True,
topk=(3, 5),
multilabel=True,
loss_cfg = None):
super(BBoxHeadAVA, self).__init__()
assert temporal_pool_type in ['max', 'avg']
assert spatial_pool_type in ['max', 'avg']
self.temporal_pool_type = temporal_pool_type
self.spatial_pool_type = spatial_pool_type
self.in_channels = in_channels
self.num_classes = num_classes
self.dropout_ratio = dropout_ratio
self.dropout_before_pool = dropout_before_pool
self.multilabel = multilabel
if topk is None:
self.topk = ()
elif isinstance(topk, int):
self.topk = (topk, )
elif isinstance(topk, tuple):
assert all([isinstance(k, int) for k in topk])
self.topk = topk
else:
raise TypeError('topk should be int or tuple[int], '
f'but get {type(topk)}')
# Class 0 is ignored when calculaing multilabel accuracy,
# so topk cannot be equal to num_classes
assert all([k < num_classes for k in self.topk])
# Handle AVA first
assert self.multilabel
in_channels = self.in_channels
# Pool by default
if self.temporal_pool_type == 'avg':
self.temporal_pool = nn.AdaptiveAvgPool3d((1, None, None))
else:
self.temporal_pool = nn.AdaptiveMaxPool3d((1, None, None))
if self.spatial_pool_type == 'avg':
self.spatial_pool = nn.AdaptiveAvgPool3d((None, 1, 1))
else:
self.spatial_pool = nn.AdaptiveMaxPool3d((None, 1, 1))
if dropout_ratio > 0:
self.dropout = nn.Dropout(dropout_ratio)
self.fc_cls = nn.Linear(in_channels, num_classes)
self.debug_imgs = None
# self.f_bce = F_BCE()
# self.BN = nn.BatchNorm1d(6)
self.dice_loss = DiceLoss()
self.BN = nn.BatchNorm1d(6)
def init_weights(self):
nn.init.normal_(self.fc_cls.weight, 0, 0.01)
nn.init.constant_(self.fc_cls.bias, 0)
def forward(self, x):
if self.dropout_before_pool and self.dropout_ratio > 0:
x = self.dropout(x)
x = self.temporal_pool(x)
x = self.spatial_pool(x)
if not self.dropout_before_pool and self.dropout_ratio > 0:
x = self.dropout(x)
x = x.view(x.size(0), -1)
cls_score = self.fc_cls(x)
# We do not predict bbox, so return None
return cls_score, None
def get_targets(self, sampling_results, gt_bboxes, gt_labels,
rcnn_train_cfg):
pos_proposals = [res.pos_bboxes for res in sampling_results]
neg_proposals = [res.neg_bboxes for res in sampling_results]
pos_gt_labels = [res.pos_gt_labels for res in sampling_results]
cls_reg_targets = bbox_target(pos_proposals, neg_proposals,
pos_gt_labels, rcnn_train_cfg)
return cls_reg_targets
def recall_prec(self, pred_vec, target_vec):
"""
Args:
pred_vec (tensor[N x C]): each element is either 0 or 1
target_vec (tensor[N x C]): each element is either 0 or 1
"""
correct = pred_vec & target_vec
# Seems torch 1.5 has no auto type conversion
recall = correct.sum(1) / (target_vec.sum(1).float()+ 1e-6)
prec = correct.sum(1) / (pred_vec.sum(1) + 1e-6)
return recall.mean(), prec.mean()
def multilabel_accuracy(self, pred, target, thr=0.5):
pred = pred.sigmoid()
pred_vec = pred > thr
# Target is 0 or 1, so using 0.5 as the borderline is OK
target_vec = target > 0.5
recall_thr, prec_thr = self.recall_prec(pred_vec, target_vec)
recalls, precs = [], []
for k in self.topk:
_, pred_label = pred.topk(k, 1, True, True)
pred_vec = pred.new_full(pred.size(), 0, dtype=torch.bool)
num_sample = pred.shape[0]
for i in range(num_sample):
pred_vec[i, pred_label[i]] = 1
recall_k, prec_k = self.recall_prec(pred_vec, target_vec)
recalls.append(recall_k)
precs.append(prec_k)
return recall_thr, prec_thr, recalls, precs
def loss(self,
cls_score,
bbox_pred,
rois,
labels,
label_weights,
bbox_targets=None,
bbox_weights=None,
reduce=True):
losses = dict()
if cls_score is not None:
# Only use the cls_score
#labels = labels[:, 1:]
# pos_inds = torch.sum(labels, dim=-1) > 0
# cls_score = cls_score[pos_inds, 1:]
# labels = labels[pos_inds]
labels = labels[:, 1:]
cls_score = cls_score[:, 1:]
cls_score = self.BN(cls_score)
# f_bce_loss = self.f_bce
# losses['loss_action_cls'] = f_bce_loss(cls_score, labels)
d_dice_loss = self.dice_loss
losses['loss_action_cls'] = d_dice_loss(cls_score, labels)
#bce_loss = F.binary_cross_entropy_with_logits
#losses['loss_action_cls'] = bce_loss(cls_score, labels)
recall_thr, prec_thr, recall_k, prec_k = self.multilabel_accuracy(
cls_score, labels, thr=0.5)
losses['recall@thr=0.5'] = recall_thr
losses['prec@thr=0.5'] = prec_thr
for i, k in enumerate(self.topk):
losses[f'recall@top{k}'] = recall_k[i]
losses[f'prec@top{k}'] = prec_k[i]
return losses
def get_det_bboxes(self,
rois,
cls_score,
img_shape,
flip=False,
crop_quadruple=None,
cfg = None):
# might be used by testing w. augmentation
if isinstance(cls_score, list):
cls_score = sum(cls_score) / float(len(cls_score))
assert self.multilabel
scores = cls_score.sigmoid() if cls_score is not None else None
bboxes = rois[:, 1:]
assert bboxes.shape[-1] == 4
# First reverse the flip
img_h, img_w = img_shape
if flip:
bboxes_ = bboxes.clone()
bboxes_[:, 0] = img_w - 1 - bboxes[:, 2]
bboxes_[:, 2] = img_w - 1 - bboxes[:, 0]
bboxes = bboxes_
# Then normalize the bbox to [0, 1]
bboxes[:, 0::2] /= img_w
bboxes[:, 1::2] /= img_h
def _bbox_crop_undo(bboxes, crop_quadruple):
decropped = bboxes.clone()
if crop_quadruple is not None:
x1, y1, tw, th = crop_quadruple
decropped[:, 0::2] = bboxes[..., 0::2] * tw + x1
decropped[:, 1::2] = bboxes[..., 1::2] * th + y1
return decropped
bboxes = _bbox_crop_undo(bboxes, crop_quadruple)
return bboxes, scores
if mmdet_imported:
MMDET_HEADS.register_module()(BBoxHeadAVA)
slowfast_diceloss
最新推荐文章于 2023-01-19 11:51:56 发布