【Leetcode】cv常见操作实现汇总

这篇博客汇总了深度学习中计算机视觉领域的常见操作,包括注意力机制、NMS非极大值抑制、IOU交并比、卷积(Conv)、批量归一化(BatchNorm)及其反向传播、最大池化(Maxpooling)、PixelShuffle、top k选择、准确率(acc)、真正率(tpr)、假正率(fpr)、AUC评估指标以及k-means聚类和Masked Convolution,最后介绍了平均精度均值(mAP)的概念。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

注意力机制

class CausalSelfAttention(nn.Module):
    """
    A vanilla multi-head masked self-attention layer with a projection at the end.
    It is possible to use torch.nn.MultiheadAttention here but I am including an
    explicit implementation here to show that there is nothing too scary here.
    """

    def __init__(self, config):
        super().__init__()
        assert config.n_embd % config.n_head == 0
        # key, query, value projections for all heads
        self.key = nn.Linear(config.n_embd, config.n_embd)
        self.query = nn.Linear(config.n_embd, config.n_embd)
        self.value = nn.Linear(config.n_embd, config.n_embd)
        # regularization
        self.attn_drop = nn.Dropout(config.attn_pdrop)
        self.resid_drop = nn.Dropout(config.resid_pdrop)
        # output projection
        self.proj = nn.Linear(config.n_embd, config.n_embd)
        # causal mask to ensure that attention is only applied to the left in the input sequence
        self.register_buffer("mask", torch.tril(torch.ones(config.block_size, config.block_size))
                                     .view(1, 1, config.block_size, config.block_size))
        self.n_head = config.n_head

    def forward(self, x, layer_past=None):
        B, T, C = x.size()

        # calculate query, key, values for all heads in batch and move head forward to be the batch dim
        k = self.key(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
        q = self.query(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
        v = self.value(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)

        # causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)
        att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
        att = att.masked_fill(self.mask[:,:,:T,:T] == 0, float('-inf'))
        att = F.softmax(att, dim=-1)
        att = self.attn_drop(att)
        y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
        y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side

        # output projection
        y = self.resid_drop(self.proj(y))
        return y

NMS

import numpy as np
def nms(dets, thresh):     
    x1 = dets[:, 0]     
    y1 = dets[:, 1]     
    x2 = dets[:, 2]     
    y2 = dets[:, 3]     
    scores = dets[:, 4]     
    areas = (x2 - x1 + 1) * (y2 - y1 + 1)     
    order = scores.argsort()[::-1]      
    keep = []     
    while order.size > 0:#还有数据         
        i = order[0]         
        keep.append(i)         #计算当前概率最大矩形框与其他矩形框的相交框的坐标         
        xx1 = np.maximum(x1[i], x1[order[1:]])         
        yy1 = np.maximum(y1[i], y1[order[1:]])         
        xx2 = np.minimum(x2[i], x2[order[1:]])         
        yy2 = np.minimum(y2[i], y2[order[1:]])          #计算相交框的面积         
        w = np.maximum(0.0, xx2 - xx1 + 1)         
        h = np.maximum(0.0, yy2 - yy1 + 1)         
        inter = w * h         #计算重叠度IOU:重叠面积/(面积1+面积2-重叠面积)         
        IOU = inter / (areas[i] + areas[order[1:]] - inter)         #找到重叠度不高于阈值的矩形框索引         
        left_index = np.where(IOU <= thresh)[0]         #将order序列更新,由于前面得到的矩形框索引要比矩形框在原order序列中的索引小1,所以要把这个1加回来         
        order = order[left_index + 1]     
        print(keep)

IOU

import numpy as np
def IoU(bbox, gt):
    lt = np.maximum(bbox[:, None, :2], gt[:, :2])  # left_top [2,3,2]
    rb = np.minimum(bbox[:, None, 2:], gt[:, 2:])  # right_bottom 
    wh = np.maximum(rb - lt + 1, 0)                # inter_area 
    inter_areas = wh[:, :, 0] * wh[:, :, 1]        # shape: (n, m)
    box_areas = (bbox[:, 2] - bbox[:, 0] + 1) * (bbox[:,
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值