基于c++的马氏距离算法代码_监控警戒区基于掩码算法的简单实现(附代码)

146cefc17fd47d87f1201cc06ea6ca04.png

视频地址:https://www.ixigua.com/6870543177913205251/

这是利用图片掩码实现的一个视频监控区域警戒功能代码,当人进出警戒区域时,自动记录一张图片到本地,效果视频讲解见连接:https://www.ixigua.com/6870543177913205251/

按代码功能主要分为三个部分:

1、动态截屏

2、yolov5目标检测

3、掩码生成及检测目标是否进出该区域

完整代码:

#动态截图,识别目标,设置并记录警戒区域的目标import numpy as npfrom numpy import randomfrom PIL import ImageGrabimport cv2import timeimport win32apiimport torchimport torch.backends.cudnn as cudnnfrom models.experimental import attempt_loadfrom utils.general import (check_img_size, non_max_suppression, scale_coords, plot_one_box)from utils.torch_utils import select_device, load_classifierprint('Setup complete. Using torch %s %s' % (torch.__version__, torch.cuda.get_device_properties(0) if torch.cuda.is_available() else 'CPU'))# Initializedevice = select_device()frame_h = 480frame_w = 800obj_count = 0 #警戒区目标obj_count_old = 0 #警戒区旧目标take_photo_num = 0;#拍照次数#每个监测不一定都检测得到,所以做个缓冲区用于取平均值,因为要避免某帧的目标丢失,会造成目标数量的跳变,引发拍照记录obj_count_buf = np.array([0,0,0,0,0,0,0,0,0,0])#10个值# Load modelmodel = attempt_load('weights/yolov5s.pt', map_location=device)  # load FP32 model cuda# Get names and colorsnames = model.module.names if hasattr(model, 'module') else model.namescolors = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(names))]#imgsz = check_img_size(486, s=model.stride.max())  # check img_sizeframe_mask = np.zeros((frame_h,frame_w, 3),dtype = np.uint8)#做一个相同尺寸格式的图片maskpostion = [(413,179),(275,391),(632,381),(571,204)]#警戒区位置点cv2.fillPoly(frame_mask, [np.array(postion)], (0,0,255))#警戒区内数字填充255,0,0成为maskdef process_img(original_image):#原图处理函数    processed_img = cv2.cvtColor(original_image,cv2.COLOR_BGR2RGB)#BGR格式转换RGB    processed_img = cv2.resize(processed_img,(frame_w,frame_h))#改变输入尺寸    return processed_imgdef MouseEvent(a,b,c,d,e):#鼠标处理事件响应函数    if(a==1): #获取左键点击坐标点        print(b,c)        cv2.namedWindow('frame')cv2.setMouseCallback('frame', MouseEvent)  # 窗口与回调函数绑定while(1):    # get a frame    frame = np.array(ImageGrab.grab(bbox=(0, 100, 800,600)))#截屏    if np.shape(frame): #frame有数据才能往下执行        #processing        frame = process_img(frame)#图片处理        img = frame.copy()#拷贝一份到img        #print("img:",np.shape(img))        img = np.transpose(img,(2,0,1))#torch.Size([480, 800, 3])转torch.Size([3, 480, 800])        #print("img:",np.shape(img))        img = torch.from_numpy(img).to(device) #img为gpu格式后,常规方法不能读取        img = img.float()  # uint8 to fp32        img /= 255.0  # 0 - 255 to 0.0 - 1.0        #print(np.shape(img))#>>>torch.Size([3, 416, 352])        if img.ndimension() == 3:            img = img.unsqueeze(0)#这个函数主要是对数据维度进行扩充,在0的位置加了一维        #print(np.shape(img))#>>>torch.Size([1, 3, 416, 352])        pred = model(img)[0]        # Apply NMS 非极大值抑制        pred = non_max_suppression(pred, 0.5, 0.5)#大于0.4阈值的输出,只显示classes:>= 1,不能显示0?        #绘图        if pred != [None]:            for i,det in enumerate(pred):                # Rescale boxes from img_size to im0 size                det[:, :4] = scale_coords(img.shape[2:], det[:, :4], frame.shape).round()                # Write results                for *xyxy, conf, cls in reversed(det):                    if cls == 0:#只显示0(person)的标签,因为non_max_suppression(只显示classes:>= 1)的标签                        label = '%s %.2f' % (names[int(cls)], conf)                        plot_one_box(xyxy, frame, label=label, color=colors[int(cls)], line_thickness=1)#utils.general专用画框标注函数                        xy = torch.tensor(xyxy).tolist()#张量转换成列表形式                        x,y,x1,y1 = int(xy[0]),int(xy[1]),int(xy[2]),int(xy[3])#获取左顶右底坐标                        center_xy = (int(np.average([x,x1])),int(np.average([y,y1])))#计算中心点                        if (frame_mask[(center_xy[1],center_xy[0])] == [0,0,255]).all():#中心点在警戒区                            obj_color = (255, 0, 0)#改变中心点颜色                            obj_count += 1                        else:                            obj_color = (255, 255, 0)#改变中心点颜色                        cv2.circle(frame, center_xy, 10, obj_color, 4)#开始画点        obj_count_buf = np.append(obj_count_buf[1:],obj_count)#保持更新10个缓冲区        cbr = int(np.around(np.average(obj_count_buf)))        cv2.putText(frame, 'obj_count :%s obj take_photo: %s'%(cbr,take_photo_num), (100, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 2)#文字信息显示        frame = cv2.addWeighted(frame,1.0,frame_mask,0.1,0.0)#叠加掩码图片进实时图        if (obj_count_old != cbr) :            take_photo_num += 1            cv2.imwrite("./photo/%s.jpg"%take_photo_num, frame, [int(cv2.IMWRITE_JPEG_QUALITY),50])#保存图片            print('take photo number :%s'%take_photo_num)#显示记录的照片张数            cv2.putText(frame, 'take photo', (100, 300), cv2.FONT_HERSHEY_SIMPLEX, 3, (0, 0, 255), 3)#文字信息显示                obj_count_old =  cbr #保存上个数据        obj_count = 0#目标显示清零,等待下次探测                # show a frame        #cv2.imshow("capture", frame[:,:,::-1])        cv2.imshow("frame", frame)        cv2.imshow("frame_mask", frame_mask[:,:,::-1])                    if cv2.waitKey(1) & 0xFF == ord('q'):        break    cv2.destroyAllWindows() 
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值