用yoloV5做一个口罩检测的全流程实现

制作数据集

收集相关图片:
可以使用爬虫在百度爬取。爬虫代码如下:

# -*- coding: UTF-8 -*-"""
import requests
import tqdm


def configs(search, page, number):
    """

    :param search:
    :param page:
    :param number:
    :return:
    """
    url = 'https://image.baidu.com/search/acjson'
    params = {
        "tn": "resultjson_com",
        "logid": "11555092689241190059",
        "ipn": "rj",
        "ct": "201326592",
        "is": "",
        "fp": "result",
        "queryWord": search,
        "cl": "2",
        "lm": "-1",
        "ie": "utf-8",
        "oe": "utf-8",
        "adpicid": "",
        "st": "-1",
        "z": "",
        "ic": "0",
        "hd": "",
        "latest": "",
        "copyright": "",
        "word": search,
        "s": "",
        "se": "",
        "tab": "",
        "width": "",
        "height": "",
        "face": "0",
        "istype": "2",
        "qc": "",
        "nc": "1",
        "fr": "",
        "expermode": "",
        "force": "",
        "pn": str(60 * page),
        "rn": number,
        "gsm": "1e",
        "1617626956685": ""
    }
    return url, params


def loadpic(number, page):
    """

    :param number:
    :param page:
    :return:
    """
    while (True):
        if number == 0:
            break
        url, params = configs(search, page, number)
        result = requests.get(url, headers=header, params=params).json()
        url_list = []
        for data in result['data'][:-1]:
            url_list.append(data['thumbURL'])
        for i in range(len(url_list)):
            getImg(url_list[i], 60 * page + i, path)
            bar.update(1)
            number -= 1
            if number == 0:
                break
        page += 1
    print("\nfinish!")


def getImg(url, idx, path):
    """

    :param url:
    :param idx:
    :param path:
    :return:
    """
    img = requests.get(url, headers=header)
    file = open(path + 'maintenanceWorker21_' + str(idx + 1) + '.jpg', 'wb')
    file.write(img.content)
    file.close()


if __name__ == '__main__':
    search = input("请输入搜索内容:")
    number = int(input("请输入需求数量:"))
    path = './car'
    header = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36'}

    bar = tqdm.tqdm(total=number)
    page = 0
    loadpic(number, page)

安装标注工具labelme

pip insatll lablelme

cmd中输入命令label.exe打开软件。

标注技巧

常用快捷键:D(打开上一张图片),A(打开下一张图片),Ctrl+Z撤销上一个点。
在这里插入图片描述
最好将图片和标记后的json文件放在一个文件夹下。

转换格式

安装labelme2yolo工具:pip install labelme2yolo
通过labelme2yolo --help可以查看用法:
在这里插入图片描述

在cmd中使用如下命令即可以完成转换:

labelme2yolo --json_dir="json文件的路径" --val_size=0.2 --test_size=0 --output_format="bbox"

转换完成就就会在json文件的目录下生成已经划分好的数据集:YOLODataset
在这里插入图片描述

下载yoloV5

  • 注意:python版本只能是3.7,3.8,3.9,除此之外其他版本都会报错。
    github下载yoloV5
    在这里插入图片描述
    安装相关的库:
    在安装前,如果环境以及安装了torch,就将requirements.txt中的torch部分注释掉,如果尚未安装torch,也不要使用国内镜像取安装torch,国内镜像安装的是cpu版本。
    在这里插入图片描述
pip install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple

下面直接使用命令行来实现训练,就不需要取改文件的路径;

  • 使用YOLOv5的train.py脚本来训练模型,具体命令如下:
python3 train.py --img 640 --batch 16 --epochs 50 --data data.yaml --weights yolov5s.pt --device 0

–img:输入图像大小(这里为640x640)。
–batch:批次大小(这里为16)。
–epochs:训练轮数(这里为50)。
–data:数据集配置文件路径(这里为data.yaml)。
–weights:预训练模型权重文件(这里为yolov5s.pt)。
–device:设备编号(0表示使用第一个GPU)。

  • 使用YOLOv5的val.py脚本来验证模型性能,具体命令如下:
python3 val.py --img 640 --batch 16 --data data.yaml --weights runs/train/exp/weights/best.pt --device 0

–weights:训练好的模型权重文件路径(这里为runs/train/exp/weights/best.pt)

  • 使用YOLOv5的detect.py脚本来进行目标检测,具体命令如下:
python3 detect.py --source ../coco/val2017 --weights runs/train/exp/weights/best.pt --img 640 --conf 0.25 --device 0

–source:待检测的图像或视频文件路径(这里为…/coco/val2017)。
–conf:置信度阈值(这里为0.25)。

将训练好的YOLOv5模型转换为ONNX格式

确保你已经安装了必要的依赖项,尤其是onnx和onnx-simplifier:

pip install onnx onnx-simplifier
  • 使用export.py脚本导出ONNX模型
    在YOLOv5目录中运行以下命令,将训练好的模型转换为ONNX格式:
python3 export.py --weights runs/train/exp/weights/best.pt --img 640 --batch 1 --device 0 --simplify

–weights:训练好的模型权重文件路径(这里为runs/train/exp/weights/best.pt)。
–img:输入图像大小(这里为640x640)。
–batch:批次大小(通常为1)。
–device:设备编号(0表示使用第一个GPU)。
–simplify:使用ONNX Simplifier来简化模型。

加载.onnx代码

import onnxruntime as ort
import numpy as np
import cv2

def preprocess(image_path, input_size):
    # 加载图像
    img = cv2.imread(image_path)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    
    # 调整图像大小
    img = cv2.resize(img, (input_size, input_size))
    
    # 标准化图像
    img = img / 255.0
    
    # 转换为NCHW格式
    img = np.transpose(img, (2, 0, 1)).astype(np.float32)
    
    # 添加批次维度
    img = np.expand_dims(img, axis=0)
    return img

def postprocess(output, conf_threshold=0.5, iou_threshold=0.4):
    # 假设输出为 (1, num_boxes, 6) 形状,6 包含 (x, y, w, h, conf, class_id)
    output = output[0]  # 提取第一个批次的输出
    boxes = output[:, :4]
    scores = output[:, 4]
    class_ids = output[:, 5].astype(np.int32)

    # 过滤低置信度检测
    mask = scores > conf_threshold
    boxes = boxes[mask]
    scores = scores[mask]
    class_ids = class_ids[mask]

    # 非极大值抑制
    indices = cv2.dnn.NMSBoxes(boxes.tolist(), scores.tolist(), conf_threshold, iou_threshold)
    boxes = [boxes[i] for i in indices]
    scores = [scores[i] for i in indices]
    class_ids = [class_ids[i] for i in indices]

    return boxes, scores, class_ids

def infer(image_path, model_path, input_size=640):
    # 加载ONNX模型
    session = ort.InferenceSession(model_path)

    # 获取模型输入名称
    input_name = session.get_inputs()[0].name

    # 预处理图像
    img = preprocess(image_path, input_size)

    # 推理
    outputs = session.run(None, {input_name: img})

    # 打印输出内容
    for i, output in enumerate(outputs):
        print(f"Output {i}: {output.shape}")

    # 后处理
    boxes, scores, class_ids = postprocess(outputs[0])

    return boxes, scores, class_ids

# 推理示例
image_path = './image.jpg'
model_path = './model.onnx'

boxes, scores, class_ids = infer(image_path, model_path)

# 打印检测结果
for box, score, class_id in zip(boxes, scores, class_ids):
    print(f"Class ID: {class_id}, Score: {score}, Box: {box}")

在这里插入图片描述

详细解释
  • Class ID:这是检测到的对象的类别ID。在你的例子中,所有检测的对象类别ID为0。你可以根据你训练模型时的类别列表(data.yaml文件中的names字段)来确定类别ID对应的实际对象名称。
  • Score:这是置信度分数,表示模型认为这个检测框中包含对象的概率。在你的例子中,置信度分数分别是0.838, 0.784, 0.700, 0.534。
  • Box:这是检测框的坐标和尺寸。格式为 [中心点x坐标, 中心点y坐标, 宽度, 高度]。

用onnx对图片进行实时推理:

import os
import cv2
import numpy as np
import onnxruntime

# coco80类别
CLASSES = ['kouzhao','person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
           'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
           'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
           'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
           'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
           'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
           'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
           'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
           'hair drier', 'toothbrush']


class YOLOV5():
    def __init__(self, onnxpath):
        self.onnx_session = onnxruntime.InferenceSession(onnxpath)
        self.input_name = self.get_input_name()
        self.output_name = self.get_output_name()

    # 获取输入输出的名字
    def get_input_name(self):
        input_name = []
        for node in self.onnx_session.get_inputs():
            input_name.append(node.name)
        return input_name

    def get_output_name(self):
        output_name = []
        for node in self.onnx_session.get_outputs():
            output_name.append(node.name)
        return output_name

    # 输入图像
    def get_input_feed(self, img_tensor):
        input_feed = {}
        for name in self.input_name:
            input_feed[name] = img_tensor
        return input_feed

    # 图像预处理并推理
    def inference(self, img_path):
        img = cv2.imread(img_path)
        or_img = cv2.resize(img, (640, 640))
        img = or_img[:, :, ::-1].transpose(2, 0, 1)  # BGR2RGB和HWC2CHW
        img = img.astype(dtype=np.float32)
        img /= 255.0
        img = np.expand_dims(img, axis=0)
        input_feed = self.get_input_feed(img)
        pred = self.onnx_session.run(None, input_feed)[0]
        return pred, or_img


# NMS非极大抑制
def nms(dets, thresh):
    x1 = dets[:, 0]
    y1 = dets[:, 1]
    x2 = dets[:, 2]
    y2 = dets[:, 3]
    areas = (y2 - y1 + 1) * (x2 - x1 + 1)
    scores = dets[:, 4]
    keep = []
    index = scores.argsort()[::-1]

    while index.size > 0:
        i = index[0]
        keep.append(i)
        x11 = np.maximum(x1[i], x1[index[1:]])
        y11 = np.maximum(y1[i], y1[index[1:]])
        x22 = np.minimum(x2[i], x2[index[1:]])
        y22 = np.minimum(y2[i], y2[index[1:]])

        w = np.maximum(0, x22 - x11 + 1)
        h = np.maximum(0, y22 - y11 + 1)

        overlaps = w * h
        ious = overlaps / (areas[i] + areas[index[1:]] - overlaps)
        idx = np.where(ious <= thresh)[0]
        index = index[idx + 1]
    return keep


def xywh2xyxy(x):
    y = np.copy(x)
    y[:, 0] = x[:, 0] - x[:, 2] / 2
    y[:, 1] = x[:, 1] - x[:, 3] / 2
    y[:, 2] = x[:, 0] + x[:, 2] / 2
    y[:, 3] = x[:, 1] + x[:, 3] / 2
    return y


def filter_box(org_box, conf_thres, iou_thres):  # 过滤掉无用的框
    org_box = np.squeeze(org_box)
    conf = org_box[..., 4] > conf_thres
    box = org_box[conf == True]
    cls_cinf = box[..., 5:]
    cls = []
    for i in range(len(cls_cinf)):
        cls.append(int(np.argmax(cls_cinf[i])))
    all_cls = list(set(cls))
    output = []
    for i in range(len(all_cls)):
        curr_cls = all_cls[i]
        curr_cls_box = []
        curr_out_box = []
        for j in range(len(cls)):
            if cls[j] == curr_cls:
                box[j][5] = curr_cls
                curr_cls_box.append(box[j][:6])
        curr_cls_box = np.array(curr_cls_box)
        curr_cls_box = xywh2xyxy(curr_cls_box)
        curr_out_box = nms(curr_cls_box, iou_thres)
        for k in curr_out_box:
            output.append(curr_cls_box[k])
    output = np.array(output)
    return output


def draw(image, box_data):
    boxes = box_data[..., :4].astype(np.int32)
    scores = box_data[..., 4]
    classes = box_data[..., 5].astype(np.int32)

    for box, score, cl in zip(boxes, scores, classes):
        top, left, right, bottom = box
        print('class: {}, score: {}'.format(CLASSES[cl], score))
        print('box coordinate left,top,right,down: [{}, {}, {}, {}]'.format(top, left, right, bottom))

        cv2.rectangle(image, (top, left), (right, bottom), (255, 0, 0), 2)
        cv2.putText(image, '{0} {1:.2f}'.format(CLASSES[cl], score),
                    (top, left),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    0.6, (0, 0, 255), 2)


if __name__ == "__main__":
    onnx_path = 'test.onnx'  # 替换为你的ONNX模型路径
    model = YOLOV5(onnx_path)
    output, or_img = model.inference('test.jpg')  # 替换为你的测试图片路径
    outbox = filter_box(output, 0.5, 0.5)
    draw(or_img, outbox)
    cv2.imwrite('res.jpg', or_img)

视频实时推理

import os
import cv2
import numpy as np
import onnxruntime

# coco80类别
CLASSES = ['kouzhao', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
           'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
           'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
           'skis']

class YOLOV5():
    def __init__(self, onnxpath):
        self.onnx_session = onnxruntime.InferenceSession(onnxpath)
        self.input_name = self.get_input_name()
        self.output_name = self.get_output_name()

    # 获取输入输出的名字
    def get_input_name(self):
        input_name = []
        for node in self.onnx_session.get_inputs():
            input_name.append(node.name)
        return input_name

    def get_output_name(self):
        output_name = []
        for node in self.onnx_session.get_outputs():
            output_name.append(node.name)
        return output_name

    # 输入图像
    def get_input_feed(self, img_tensor):
        input_feed = {}
        for name in self.input_name:
            input_feed[name] = img_tensor
        return input_feed

    # 图像预处理并推理
    def inference(self, img):
        or_img = cv2.resize(img, (640, 640))
        img = or_img[:, :, ::-1].transpose(2, 0, 1)  # BGR2RGB和HWC2CHW
        img = img.astype(dtype=np.float32)
        img /= 255.0
        img = np.expand_dims(img, axis=0)
        input_feed = self.get_input_feed(img)
        pred = self.onnx_session.run(None, input_feed)[0]
        return pred, or_img

# NMS非极大抑制
def nms(dets, thresh):
    x1 = dets[:, 0]
    y1 = dets[:, 1]
    x2 = dets[:, 2]
    y2 = dets[:, 3]
    areas = (y2 - y1 + 1) * (x2 - x1 + 1)
    scores = dets[:, 4]
    keep = []
    index = scores.argsort()[::-1]

    while index.size > 0:
        i = index[0]
        keep.append(i)
        x11 = np.maximum(x1[i], x1[index[1:]])
        y11 = np.maximum(y1[i], y1[index[1:]])
        x22 = np.minimum(x2[i], x2[index[1:]])
        y22 = np.minimum(y2[i], y2[index[1:]])

        w = np.maximum(0, x22 - x11 + 1)
        h = np.maximum(0, y22 - y11 + 1)

        overlaps = w * h
        ious = overlaps / (areas[i] + areas[index[1:]] - overlaps)
        idx = np.where(ious <= thresh)[0]
        index = index[idx + 1]
    return keep

def xywh2xyxy(x):
    y = np.copy(x)
    y[:, 0] = x[:, 0] - x[:, 2] / 2
    y[:, 1] = x[:, 1] - x[:, 3] / 2
    y[:, 2] = x[:, 0] + x[:, 2] / 2
    y[:, 3] = x[:, 1] + x[:, 3] / 2
    return y

def filter_box(org_box, conf_thres, iou_thres):  # 过滤掉无用的框
    org_box = np.squeeze(org_box)
    conf = org_box[..., 4] > conf_thres
    box = org_box[conf == True]
    cls_cinf = box[..., 5:]
    cls = []
    for i in range(len(cls_cinf)):
        cls.append(int(np.argmax(cls_cinf[i])))
    all_cls = list(set(cls))
    output = []
    for i in range(len(all_cls)):
        curr_cls = all_cls[i]
        curr_cls_box = []
        curr_out_box = []
        for j in range(len(cls)):
            if cls[j] == curr_cls:
                box[j][5] = curr_cls
                curr_cls_box.append(box[j][:6])
        curr_cls_box = np.array(curr_cls_box)
        curr_cls_box = xywh2xyxy(curr_cls_box)
        curr_out_box = nms(curr_cls_box, iou_thres)
        for k in curr_out_box:
            output.append(curr_cls_box[k])
    output = np.array(output)
    return output

def draw(image, box_data):
    if len(box_data) == 0:
        return

    boxes = box_data[..., :4].astype(np.int32)
    scores = box_data[..., 4]
    classes = box_data[..., 5].astype(np.int32)

    for box, score, cl in zip(boxes, scores, classes):
        top, left, right, bottom = box
        print('class: {}, score: {}'.format(CLASSES[cl], score))
        print('box coordinate left,top,right,down: [{}, {}, {}, {}]'.format(top, left, right, bottom))

        cv2.rectangle(image, (top, left), (right, bottom), (255, 0, 0), 2)
        cv2.putText(image, '{0} {1:.2f}'.format(CLASSES[cl], score),
                    (top, left),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    0.6, (0, 0, 255), 2)

def process_video(onnx_path, input_video_path, output_video_path):
    model = YOLOV5(onnx_path)

    cap = cv2.VideoCapture(input_video_path)
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps = cap.get(cv2.CAP_PROP_FPS)

    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    out = cv2.VideoWriter(output_video_path, fourcc, fps, (width, height))

    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break

        output, processed_frame = model.inference(frame)
        outbox = filter_box(output, 0.5, 0.5)
        draw(processed_frame, outbox)

        out.write(processed_frame)

        cv2.imshow('YOLOv5 Inference', processed_frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    out.release()
    cv2.destroyAllWindows()

if __name__ == "__main__":
    onnx_path = 'test.onnx'  # 替换为你的ONNX模型路径
    input_video_path = 'test.mp4'  # 替换为你的输入视频路径
    output_video_path = 'output.mp4'  # 替换为你的输出视频路径

    process_video(onnx_path, input_video_path, output_video_path)

在这里插入图片描述

  • 13
    点赞
  • 19
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值