NMS(non_maximum_suppression)目标检测的非极大值抑制

前言

在基于深度学习的目标检测中,NMS是不可越过的步骤,无论是基于锚框还是基于中心店预测的方法中,最后都是需要NMS进行目标检测框的去重。

NMS原理及实现步骤

  1. 首先,对目标检测结果基于score 从高到低排序
  2. 选出score最高的box检测框,将其加到最终的结果中,计算此box与其他box的IOU
  3. 将上一步中计算的IOU大于阈值的框去除,其余框加入到候选框队列中
  4. 从候选框中,选出一个score最大的box,继续计算其与其他box的IOU
  5. 重复上述过程,直到候选框中没有框为止

python 代码

"""
boxes: shape(N,5) N个box结果,包括x1,y1,x2,y2,score
threadshold: IOU筛选的阈值
"""
def non_maximum_suppression(boxes,threashold):
    select_index = [] # 最终的结果列表
    scores = boxes[:,4] # 取出score
    x1 = boxes[:,0]
    y1 = boxes[:,1]
    x2 = boxes[:,2]
    y2 = boxes[:,3]
    area = (x2-x1+1) * (y2-y1+1) # 计算每个box的面积
    index = scores.argsort[::-1]#得到的是排序后的索引
    while index.size > 0: # 循环直到候选框中没有box
        choose = index[0] # 选出分数最大的那个index
        select_boxes.append(choose)
        # 计算取出来的那个和候选框中其他box的iou
        x11 = np.maximum(x1[choose],x1[index[1:]])
        y11 = np.maximum(y1[choose],y1[index[1:]])
        x22 = np.minimum(x2[choose],x2[index[1:]])
        y22 = np.minimum(y2[choose],y2[index[1:]])
        w = np.maximum(0,x22-x11)
        h = np.maximum(0,y22-y11)
        iou = w*h/(area[choose]+area[index[1:]] - w*h)
        idx = iou.where(iou<=threashold)[0]
        
        index = index[idx+1] # 将阈值小于的box加到候选框队列中
    return select_boxes

cpp代码

#include <iostream>
#include <vector>
#include <algorithm>
// 定义box
struct box{
    float x1,y1,x2,y2,score;
}
// 计算两个框的iou
float iou(box& box1,box& box2){
    float x11 = max(box1.x1, box2.x1);
    float y11 = max(box1.y1, box2.y1);
    float x22 = min(box1.x2, box2.x2);
    float y22 = min(box2.y2, box2.y2);
    float w = max(0, x22 - x11 + 1);
    float h = max(0, y22 - x22 + 1);
    float area1 = (box1.x2-box1.x1 + 1)*(box1.y2-box1.y1 + 1);
    float area2 = (box2.x2-box2.x1 + 1)*(box2.y2-box2.y1 + 1);
    float iou = w*h/(area1+area2-w*h);
    return iou;
}
 
vector<box> nonMaximumSuppression(const vector<box>& boxes,float threashold){
    vector<box> keep_boxes; // 保存最后的结果框
    vector<box> sortedboxes = boxes; // 对输入框进行排序
    sort(sortedboxes.begin(),sortedboxes.end(),[](const box& a,const box&b){
        return a.score > b.score;
    });
    // 当候选框中还有框
    while(!sortedboxes.empty()){
    	// 取出候选框中score最大的box
        const box& curr = sortedboxes.front(); 
        keep_boxes.push_back(curr);
        // 去掉候选框中的这个框
        sortedboxes.erase(sortedboxes.begin());
        // 计算选出来的框与其他的iou
        vector<box>::iterator it = sortedbox.begin();
        while(it != sortedboxes.end()){
            const box& choose = *it;
            if(iou(curr,choose) >= threashold){
                it = sortedbox.erase(it);
            }
            else{
                it++;
            }
        }
    }
    return keep_boxes;
}

cuda代码

cuda 代码会比较长,将会分成几部分
代码参考fastrcnn 等csdn

  • 主函数部分
#include <vector>
#include <iostream>
// unsigned long long 为64位,sizeof(unsigned long long)=8
// 每个Block所含有的线程数目 为64
int const threadPerBlock = sizeof(unsigned long long) * 8;

void _nms(int* keep_box,int keep_num,const float* boxes_input,int boxes_num,float threadshold){
	//1、申请gpu上需要的内存空间
	float *boxes_gpu = nullptr;
	unsigned long long mask_gpu = nullptr;
	int block_num = (boxes_num + threadPerBlock - 1)/threadPerBlock;
	CUDA_CHEAK(cudaMalloc(&boxes_gpu, boxes_num * 5 * sizeof(float)));
	// 存储每个box与其他所有的box的IOU情况
	CUDA_CHEAK(cudaMalloc(&mask_gpu, boxes_num * block_num * sizeof(unsigned long long)));
	//2、将数据从cpu拷贝到gpu
	CUDA_CHEAK(cudaMemcpy(boxes_gpu , boxes_input, boxes_num * 5 * sizeof(float), cudaMemcpyDefault);
	//3、计算box之间的IOU
	dim3 blocks(block_num, block_num);// 设置二维block
	dim3 threads(threadPerBlock); // 64
	// 计算box之间IOU的核函数,输出的结果保存在mask中
	nms_kernel<<<blocks,threads>>>(boxes_num, threadshold, boxes_gpu, mask_gpu);
	//4、将IOU结果从gpu拷贝到cpu
	vector<unsigned long long> mask_cpu(boxes_num * block_num);
	CUDA_CHEAK(cudaMemcpy(mask_cpu , mask_gpu, boxes_num * block_num* sizeof(unsigned long long), cudaMemcpyDefault);
	//5、根据计算IOU的结果进行NMS
	vector<unsigned long long> remv(block_num, 0); // 存储要移除的box的索引mask
	int keep_num = 0;//保留的box的数量,初始为0
	// 默认box 根据score进行排序,遍历box
	for(int i = 0 ;i < boxes_num ; i++){
		// 这个box 所属的block和tread的索引
		int box_blockIdx = i / threadPerThread;
		int box_threadIdx = i % threadPerThread;
		// 判断 这个box不是要移除的box
		if(!(remv[box_blockIdx] & (1 << box_treadIdx)){
			keep_box[keep_num++] = i;//记录留下的box的索引
			// 得到这个box 与其他IOU的结果
			unsigned long long *p = &mask_cpu[0] + i * block_num;
			// idx 不从0 开始,去除重复的比较
			for(int idx = box_blockIdx; idx < block_num; idx++){
				remv[idx] |= p[idx];
			}
		}
	}
	//6、释放gpu内存
	CUDA_CHECK(cudaFree(boxes_gpu));
  	CUDA_CHECK(cudaFree(mask_gpu));	
}

  • IOU计算的核函数
/*
boxes_num: 输入的box的数量
threadshold:nms的阈值
boxes_gpu:输入的box的数据
mask_gpu:输出的iou计算的结果
*/
__global__ viod nms_kernel(const int boxes_num, float threadshold, const float* boxes_gpu, unsigned long long *mask_gpu){
	int blkidx_x = blockIdx.x; // 要计算的box
	int blkidx_y = blockIdx.y; // 比较这个block内所有的box与上面的box的IOU
	// block中实际的threads数量
	int blkx_size = min(boxes_num - blkidx_x * threadsPerBlock, threadsPerBlock);
	int blky_size = min(boxes_num - blkidx_y * threadsPerBlock, threadsPerBlock);
	// 将索引为blkidx_y 的block内的box的数据拷贝到共享内存中
	__shared__ float block_boxes[threadsPerBlock * 5]; //申请共享内存
	if(treadidx.x < blky_size){
		block_boxes[threadIdx.x * 5+0] = boxes_gpu[(threadsPerBlock * blkidx_y + threadIdx.x) * 5 + 0];
		block_boxes[threadIdx.x * 5+1] = boxes_gpu[(threadsPerBlock * blkidx_y + threadIdx.x) * 5 + 1];
		block_boxes[threadIdx.x * 5+2] = boxes_gpu[(threadsPerBlock * blkidx_y + threadIdx.x) * 5 + 2];
		block_boxes[threadIdx.x * 5+3] = boxes_gpu[(threadsPerBlock * blkidx_y + threadIdx.x) * 5 + 3];
		block_boxes[threadIdx.x * 5+4] = boxes_gpu[(threadsPerBlock * blkidx_y + threadIdx.x) * 5 + 4];
	}
	__syncthreads();// 线程同步
	if(threadIdx.x < blkidx_x) {
		// 这个线程处理的box的索引
		int box_idx = threadsPerBlock * blkidx_x + threadIdx.x;
		float *box_cur = boxes_gpu + box_idx * 5;
		unsigned long long mask_temp = 0;// 记录iou mask
		int start = 0;
		// 省掉重复计算
		if(blkidx_x == blkidx_y){
			start = threadIdx.x + 1;
		}
		// 计算这个block索引为blkidx_x thread 索引为threadIdx.x的box与block索引为blkidx_y内的所有box的IOU
		for(int i = start; i < blky_size;i++){
			if(IOU(box_cur,block_boxes + i*5) > threadshold){
				mask_temp |= 1<<i;
			}
		}
		int block_num = (boxes_num + threadsPerBlock-1)/threadsPerBlock;
		// 将这个计算结果放到结果中,这个结果与拿出来的一一对应
		mask_gpu[block_num * box_idx + blkidx_y]  = t;
	}
}
  • IOU计算与CUDA_CHEAK
#define CUDA_CHECK(condition) \
  /* Code block avoids redefinition of cudaError_t error */ \
  do { \
    cudaError_t error = condition; \
    if (error != cudaSuccess) { \
      std::cout << cudaGetErrorString(error) << std::endl; \
    } \
  } while (0)

__device__ inline float 
    IOU(float const * const a, float const * const b) {
  float left = max(a[0], b[0]), right = min(a[2], b[2]);
  float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
  float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
  float interS = width * height;
  float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
  float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
  return interS / (Sa + Sb - interS);
}
  • 4
    点赞
  • 7
    收藏
    觉得还不错? 一键收藏
  • 2
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值