yolov8 区域多类别计数

1. 基础

本项目是在 Windows+YOLOV8环境配置 的基础上实现的,测距原理可见上边文章

2. 计数功能

2.1 计数模块

在指定区域内计数模块
region_points为指定区域,可设置为任意形状

region_points = [(20, 20), (20, 600), (1200, 600), (1200, 20)]
count_class1 = 0
count_class2 = 0
for i, box in enumerate(boxes):
   x1, y1, x2, y2 = box[:4]
   x_center = (x1 + x2) / 2
   y_center = (y1 + y2) / 2
   center_point = (int(x_center), int(y_center))
   if is_inside_region(center_point, region_points):
       if box[-1] == 0:  # 类别1的标签.人
           count_class1 += 1
       elif  box[-1] == 2:  # 类别2的标签,车
           count_class2 += 1

2.2 判断模块

def is_inside_region(point, region_points):
    """
    判断点是否在指定区域内
    """
    return cv2.pointPolygonTest(np.array(region_points), point, False) >= 0

3. 初始代码


import cv2
import numpy as np
from ultralytics import YOLO


def is_inside_region(point, region_points):
    """
    判断点是否在指定区域内
    """
    return cv2.pointPolygonTest(np.array(region_points), point, False) >= 0

def detect():

    model = YOLO("yolov8n.pt")
    cv2.namedWindow('region couter', cv2.WINDOW_NORMAL)
    cv2.resizeWindow('region couter', 1280, 360)  # 设置宽高
    cap = cv2.VideoCapture('ultralytics/assets/a3.mp4')
    out_video = cv2.VideoWriter('output.avi', cv2.VideoWriter_fourcc(*'XVID'), 30, (2560, 720))

    region_points = [(20, 20), (20, 600), (1200, 600), (1200, 20)]
    region_points_np = np.array(region_points)

    assert cap.isOpened(), "Error reading video file"
    while cap.isOpened():
        success, im0 = cap.read()
        if not success:
            print("Video frame is empty or video processing has been successfully completed.")
            break
        tracks = model.track(im0, persist=True)
        annotated_frame = tracks[0].plot()
        boxes = tracks[0].boxes.data
        count_class1 = 0
        count_class2 = 0
        for i, box in enumerate(boxes):
            x1, y1, x2, y2 = box[:4]
            x_center = (x1 + x2) / 2
            y_center = (y1 + y2) / 2
            center_point = (int(x_center), int(y_center))
            if is_inside_region(center_point, region_points):
                if box[-1] == 0:  # 类别1的标签.人
                    count_class1 += 1
                elif  box[-1] == 2:  # 类别2的标签,车
                    count_class2 += 1
        cv2.polylines(annotated_frame, [region_points_np], isClosed=True, color=(255, 0, 0), thickness=2)
        print("Number of objects in class 1:", count_class1)
        print("Number of objects in class 2:", count_class2)

        cv2.imshow("region couter", annotated_frame)
        out_video.write(annotated_frame)
        if cv2.waitKey(1) == ord('q'):
            break
    out_video.release()
    cap.release()
    cv2.destroyAllWindows()
if __name__ == '__main__':
    detect()

4. 实验结果

本实验可以计数多种类别
(1)区域计数实验
在这里插入图片描述
(2)如需把区域背景换成填充色,可将上边代码里的

cv2.polylines(annotated_frame, [region_points_np], isClosed=True, color=(255, 0, 0), thickness=2)

替换为

region_mask = np.zeros_like(annotated_frame)
cv2.fillPoly(region_mask, [region_points_np], color=(255, 0, 0))
# 使用透明度将填充后的区域与原始帧混合
alpha = 0.2  # 调整透明度
annotated_frame = cv2.addWeighted(annotated_frame, 1 - alpha, region_mask, alpha, 0)

请添加图片描述
(3)计数在图上显示
annotated_frame = cv2.addWeighted(annotated_frame, 1 - alpha, region_mask, alpha, 0)添加

text1 = "count_class1:%d" % count_class1
text2 = "count_class2:%d" % count_class2
cv2.putText(annotated_frame, text1, (20, 30), cv2.FONT_ITALIC, 1.0, (0, 255, 255), 2)
cv2.putText(annotated_frame, text2, (20, 60), cv2.FONT_ITALIC, 1.0, (0, 255, 255), 2)

在这里插入图片描述

5. 完整代码

import cv2
import numpy as np
from ultralytics import YOLO


def is_inside_region(point, region_points):
    """
    判断点是否在指定区域内
    """
    return cv2.pointPolygonTest(np.array(region_points), point, False) >= 0

def detect():

    model = YOLO("yolov8n.pt")
    cv2.namedWindow('region couter', cv2.WINDOW_NORMAL)
    cv2.resizeWindow('region couter', 1280, 360)  # 设置宽高
    cap = cv2.VideoCapture('ultralytics/assets/a3.mp4')
    out_video = cv2.VideoWriter('output.avi', cv2.VideoWriter_fourcc(*'XVID'), 30, (2560, 720))

    region_points = [(20, 20), (20, 600), (1200, 600), (1200, 20)]
    region_points_np = np.array(region_points)

    assert cap.isOpened(), "Error reading video file"
    while cap.isOpened():
        success, im0 = cap.read()
        if not success:
            print("Video frame is empty or video processing has been successfully completed.")
            break
        tracks = model.track(im0, persist=True)
        annotated_frame = tracks[0].plot()
        boxes = tracks[0].boxes.data
        count_class1 = 0
        count_class2 = 0
        for i, box in enumerate(boxes):
            x1, y1, x2, y2 = box[:4]
            x_center = (x1 + x2) / 2
            y_center = (y1 + y2) / 2
            center_point = (int(x_center), int(y_center))
            if is_inside_region(center_point, region_points):
                if box[-1] == 0:  # 类别1的标签.人
                    count_class1 += 1
                elif  box[-1] == 2:  # 类别2的标签,车
                    count_class2 += 1
        # cv2.polylines(annotated_frame, [region_points_np], isClosed=True, color=(255, 0, 0), thickness=2)
        region_mask = np.zeros_like(annotated_frame)
        cv2.fillPoly(region_mask, [region_points_np], color=(255, 0, 0))
        # 使用透明度将填充后的区域与原始帧混合
        alpha = 0.2  # 调整透明度
        annotated_frame = cv2.addWeighted(annotated_frame, 1 - alpha, region_mask, alpha, 0)

        text1 = "count_class1:%d" % count_class1
        text2 = "count_class2:%d" % count_class2
        cv2.putText(annotated_frame, text1, (20, 30), cv2.FONT_ITALIC, 1.0, (0, 255, 255), 2)
        cv2.putText(annotated_frame, text2, (20, 60), cv2.FONT_ITALIC, 1.0, (0, 255, 255), 2)
        print("Number of objects in class 1:", count_class1)
        print("Number of objects in class 2:", count_class2)

        cv2.imshow("region couter", annotated_frame)
        out_video.write(annotated_frame)
        if cv2.waitKey(1) == ord('q'):
            break
    out_video.release()
    cap.release()
    cv2.destroyAllWindows()
if __name__ == '__main__':
    detect()

6. 源码

可以去 Windows+YOLOV8环境配置 下载源码,然后把上边主代码贴进去运行即可

智慧交通基于YOLOv8的行人车辆检测计数系统源码(部署教程+训练好的模型+各项评估指标曲线).zip 平均准确率:0.91 类别:person、truck、car、bus、traffic light 【资源介绍】 1、ultralytics-main ultralytics-main为YOLOv8源代码,里面涵盖基于yolov8分类、目标检测额、姿态估计、图像分割四部分代码,我们使用的是detect部分,也就是目标检测代码 2、搭建环境 安装anaconda 和 pycharm windows系统、mac系统、Linux系统都适配 在anaconda中新建一个新的envs虚拟空间(可以参考博客来),命令窗口执行:conda create -n YOLOv8 python==3.8 创建完YOLOv8-GUI虚拟空间后,命令窗口执行:source activate YOLOv8 激活虚拟空间 然后就在YOLOv8虚拟空间内安装requirements.txt中的所有安装包,命令窗口执行:pip install -r requirements.txt 使用清华源安装更快 3、训练模型过程 进入到\ultralytics-main\ultralytics\yolo\v8\detect\文件夹下,datasets即为我们需要准备好的数据集,训练其他模型同理。 data文件夹下的bicycle.yaml文件为数据集配置文件,该文件为本人训练自行车检测模型时创建,训练其他模型,可自行创建。博文有介绍https://blog.csdn.net/DeepLearning_?spm=1011.2415.3001.5343 train.py中238行,修改为data = cfg.data or './bicycle.yaml' # or yolo.ClassificationDataset("mnist") 237行修改自己使用的预训练模型 若自己有显卡,修改239行,如我有四张显卡,即改成args = dict(model=model, data=data, device=”0,1,2,3“) 以上配置完成后运行train.py开始训练模型,训练完毕后会在runs/detect/文件夹下生成train*文件夹,里面包含模型和评估指标等 4、推理测试 训练好模型,打开predict.py,修改87行,model = cfg.model or 'yolov8n.pt',把yolov8n.pt换成我们刚才训练完生成的模型路径(在\ultralytics-main\ultralytics\yolo\v8\detect\runs\detect文件夹下),待测试的图片或者视频存放于ultralytics\ultralytics\assets文件夹, 运行predict.py即可,检测结果会在runs/detect/train文件夹下生成。
YOLOv8是一种目标检测算法,它是YOLO(You Only Look Once)系列算法的最新版本。YOLOv8计数代码可以用于在图像或视频中检测和计数特定目标的数量。以下是一个简单的YOLOv8计数代码示例: ```python import cv2 import numpy as np # 加载YOLOv8模型 net = cv2.dnn.readNetFromDarknet('yolov8.cfg', 'yolov8.weights') # 获取输出层信息 layer_names = net.getLayerNames() output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()] # 加载类别标签 classes = [] with open('coco.names', 'r') as f: classes = [line.strip() for line in f.readlines()] # 加载图像 image = cv2.imread('image.jpg') height, width, channels = image.shape # 对图像进行预处理 blob = cv2.dnn.blobFromImage(image, 0.00392, (416, 416), (0, 0, 0), True, crop=False) net.setInput(blob) # 运行前向传播 outs = net.forward(output_layers) # 解析检测结果 class_ids = [] confidences = [] boxes = [] for out in outs: for detection in out: scores = detection[5:] class_id = np.argmax(scores) confidence = scores[class_id] if confidence > 0.5: # 设置置信度阈值 # 计算边界位置 center_x = int(detection[0] * width) center_y = int(detection[1] * height) w = int(detection[2] * width) h = int(detection[3] * height) # 计算边界的左上角坐标 x = int(center_x - w / 2) y = int(center_y - h / 2) # 保存检测结果 class_ids.append(class_id) confidences.append(float(confidence)) boxes.append([x, y, w, h]) # 进行非最大抑制 indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4) # 绘制边界和标签 font = cv2.FONT_HERSHEY_SIMPLEX for i in range(len(boxes)): if i in indexes: x, y, w, h = boxes[i] label = str(classes[class_ids[i]]) cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2) cv2.putText(image, label, (x, y - 10), font, 0.5, (0, 255, 0), 2) # 显示结果图像 cv2.imshow("Image", image) cv2.waitKey(0) cv2.destroyAllWindows() ``` 这段代码使用OpenCV库加载YOLOv8模型,并使用该模型在图像中进行目标检测和计数。你需要将YOLOv8的配置文件(yolov8.cfg)、权重文件(yolov8.weights)和类别标签文件(coco.names)放在同一目录下,并将待检测的图像命名为image.jpg。代码会在图像上绘制检测到的目标边界和标签,并显示结果图像。
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

积极向上的mr.d

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值