yolov8 区域多类别计数

1. 基础

本项目是在 Windows+YOLOV8环境配置 的基础上实现的,测距原理可见上边文章

2. 计数功能

2.1 计数模块

在指定区域内计数模块
region_points为指定区域,可设置为任意形状

region_points = [(20, 20), (20, 600), (1200, 600), (1200, 20)]
count_class1 = 0
count_class2 = 0
for i, box in enumerate(boxes):
   x1, y1, x2, y2 = box[:4]
   x_center = (x1 + x2) / 2
   y_center = (y1 + y2) / 2
   center_point = (int(x_center), int(y_center))
   if is_inside_region(center_point, region_points):
       if box[-1] == 0:  # 类别1的标签.人
           count_class1 += 1
       elif  box[-1] == 2:  # 类别2的标签,车
           count_class2 += 1

2.2 判断模块

def is_inside_region(point, region_points):
    """
    判断点是否在指定区域内
    """
    return cv2.pointPolygonTest(np.array(region_points), point, False) >= 0

3. 初始代码


import cv2
import numpy as np
from ultralytics import YOLO


def is_inside_region(point, region_points):
    """
    判断点是否在指定区域内
    """
    return cv2.pointPolygonTest(np.array(region_points), point, False) >= 0

def detect():

    model = YOLO("yolov8n.pt")
    cv2.namedWindow('region couter', cv2.WINDOW_NORMAL)
    cv2.resizeWindow('region couter', 1280, 360)  # 设置宽高
    cap = cv2.VideoCapture('ultralytics/assets/a3.mp4')
    out_video = cv2.VideoWriter('output.avi', cv2.VideoWriter_fourcc(*'XVID'), 30, (2560, 720))

    region_points = [(20, 20), (20, 600), (1200, 600), (1200, 20)]
    region_points_np = np.array(region_points)

    assert cap.isOpened(), "Error reading video file"
    while cap.isOpened():
        success, im0 = cap.read()
        if not success:
            print("Video frame is empty or video processing has been successfully completed.")
            break
        tracks = model.track(im0, persist=True)
        annotated_frame = tracks[0].plot()
        boxes = tracks[0].boxes.data
        count_class1 = 0
        count_class2 = 0
        for i, box in enumerate(boxes):
            x1, y1, x2, y2 = box[:4]
            x_center = (x1 + x2) / 2
            y_center = (y1 + y2) / 2
            center_point = (int(x_center), int(y_center))
            if is_inside_region(center_point, region_points):
                if box[-1] == 0:  # 类别1的标签.人
                    count_class1 += 1
                elif  box[-1] == 2:  # 类别2的标签,车
                    count_class2 += 1
        cv2.polylines(annotated_frame, [region_points_np], isClosed=True, color=(255, 0, 0), thickness=2)
        print("Number of objects in class 1:", count_class1)
        print("Number of objects in class 2:", count_class2)

        cv2.imshow("region couter", annotated_frame)
        out_video.write(annotated_frame)
        if cv2.waitKey(1) == ord('q'):
            break
    out_video.release()
    cap.release()
    cv2.destroyAllWindows()
if __name__ == '__main__':
    detect()

4. 实验结果

本实验可以计数多种类别
(1)区域计数实验
在这里插入图片描述
(2)如需把区域背景换成填充色,可将上边代码里的

cv2.polylines(annotated_frame, [region_points_np], isClosed=True, color=(255, 0, 0), thickness=2)

替换为

region_mask = np.zeros_like(annotated_frame)
cv2.fillPoly(region_mask, [region_points_np], color=(255, 0, 0))
# 使用透明度将填充后的区域与原始帧混合
alpha = 0.2  # 调整透明度
annotated_frame = cv2.addWeighted(annotated_frame, 1 - alpha, region_mask, alpha, 0)

请添加图片描述
(3)计数在图上显示
annotated_frame = cv2.addWeighted(annotated_frame, 1 - alpha, region_mask, alpha, 0)添加

text1 = "count_class1:%d" % count_class1
text2 = "count_class2:%d" % count_class2
cv2.putText(annotated_frame, text1, (20, 30), cv2.FONT_ITALIC, 1.0, (0, 255, 255), 2)
cv2.putText(annotated_frame, text2, (20, 60), cv2.FONT_ITALIC, 1.0, (0, 255, 255), 2)

在这里插入图片描述

5. 完整代码

import cv2
import numpy as np
from ultralytics import YOLO


def is_inside_region(point, region_points):
    """
    判断点是否在指定区域内
    """
    return cv2.pointPolygonTest(np.array(region_points), point, False) >= 0

def detect():

    model = YOLO("yolov8n.pt")
    cv2.namedWindow('region couter', cv2.WINDOW_NORMAL)
    cv2.resizeWindow('region couter', 1280, 360)  # 设置宽高
    cap = cv2.VideoCapture('ultralytics/assets/a3.mp4')
    out_video = cv2.VideoWriter('output.avi', cv2.VideoWriter_fourcc(*'XVID'), 30, (2560, 720))

    region_points = [(20, 20), (20, 600), (1200, 600), (1200, 20)]
    region_points_np = np.array(region_points)

    assert cap.isOpened(), "Error reading video file"
    while cap.isOpened():
        success, im0 = cap.read()
        if not success:
            print("Video frame is empty or video processing has been successfully completed.")
            break
        tracks = model.track(im0, persist=True)
        annotated_frame = tracks[0].plot()
        boxes = tracks[0].boxes.data
        count_class1 = 0
        count_class2 = 0
        for i, box in enumerate(boxes):
            x1, y1, x2, y2 = box[:4]
            x_center = (x1 + x2) / 2
            y_center = (y1 + y2) / 2
            center_point = (int(x_center), int(y_center))
            if is_inside_region(center_point, region_points):
                if box[-1] == 0:  # 类别1的标签.人
                    count_class1 += 1
                elif  box[-1] == 2:  # 类别2的标签,车
                    count_class2 += 1
        # cv2.polylines(annotated_frame, [region_points_np], isClosed=True, color=(255, 0, 0), thickness=2)
        region_mask = np.zeros_like(annotated_frame)
        cv2.fillPoly(region_mask, [region_points_np], color=(255, 0, 0))
        # 使用透明度将填充后的区域与原始帧混合
        alpha = 0.2  # 调整透明度
        annotated_frame = cv2.addWeighted(annotated_frame, 1 - alpha, region_mask, alpha, 0)

        text1 = "count_class1:%d" % count_class1
        text2 = "count_class2:%d" % count_class2
        cv2.putText(annotated_frame, text1, (20, 30), cv2.FONT_ITALIC, 1.0, (0, 255, 255), 2)
        cv2.putText(annotated_frame, text2, (20, 60), cv2.FONT_ITALIC, 1.0, (0, 255, 255), 2)
        print("Number of objects in class 1:", count_class1)
        print("Number of objects in class 2:", count_class2)

        cv2.imshow("region couter", annotated_frame)
        out_video.write(annotated_frame)
        if cv2.waitKey(1) == ord('q'):
            break
    out_video.release()
    cap.release()
    cv2.destroyAllWindows()
if __name__ == '__main__':
    detect()

6. 源码

可以去 Windows+YOLOV8环境配置 下载源码,然后把上边主代码贴进去运行即可

智慧交通基于YOLOv8的行人车辆检测计数系统源码(部署教程+训练好的模型+各项评估指标曲线).zip 平均准确率:0.91 类别:person、truck、car、bus、traffic light 【资源介绍】 1、ultralytics-main ultralytics-main为YOLOv8源代码,里面涵盖基于yolov8分类、目标检测额、姿态估计、图像分割四部分代码,我们使用的是detect部分,也就是目标检测代码 2、搭建环境 安装anaconda 和 pycharm windows系统、mac系统、Linux系统都适配 在anaconda中新建一个新的envs虚拟空间(可以参考博客来),命令窗口执行:conda create -n YOLOv8 python==3.8 创建完YOLOv8-GUI虚拟空间后,命令窗口执行:source activate YOLOv8 激活虚拟空间 然后就在YOLOv8虚拟空间内安装requirements.txt中的所有安装包,命令窗口执行:pip install -r requirements.txt 使用清华源安装更快 3、训练模型过程 进入到\ultralytics-main\ultralytics\yolo\v8\detect\文件夹下,datasets即为我们需要准备好的数据集,训练其他模型同理。 data文件夹下的bicycle.yaml文件为数据集配置文件,该文件为本人训练自行车检测模型时创建,训练其他模型,可自行创建。博文有介绍https://blog.csdn.net/DeepLearning_?spm=1011.2415.3001.5343 train.py中238行,修改为data = cfg.data or './bicycle.yaml' # or yolo.ClassificationDataset("mnist") 237行修改自己使用的预训练模型 若自己有显卡,修改239行,如我有四张显卡,即改成args = dict(model=model, data=data, device=”0,1,2,3“) 以上配置完成后运行train.py开始训练模型,训练完毕后会在runs/detect/文件夹下生成train*文件夹,里面包含模型和评估指标等 4、推理测试 训练好模型,打开predict.py,修改87行,model = cfg.model or 'yolov8n.pt',把yolov8n.pt换成我们刚才训练完生成的模型路径(在\ultralytics-main\ultralytics\yolo\v8\detect\runs\detect文件夹下),待测试的图片或者视频存放于ultralytics\ultralytics\assets文件夹, 运行predict.py即可,检测结果会在runs/detect/train文件夹下生成。
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

积极向上的mr.d

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值