基于YOLO和PyQt实现工人安全防护装备检测

1.GUI的功能如下所示。

2.实现代码。
 

import sys
import os
import cv2
import winsound
from datetime import datetime, timedelta
from ultralytics import YOLO
import shutil
import re
import time
import threading
from wxpy import Bot
from ultralytics.utils import LOGGER, colorstr
from ultralytics.utils.plotting import Annotator, colors
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QButtonGroup, QSlider, QRadioButton, QApplication, QMainWindow, QWidget, QVBoxLayout, QHBoxLayout, QFormLayout, QLabel, QLineEdit, QCheckBox, QPushButton, QFileDialog, QSpinBox, QDoubleSpinBox, QComboBox


def message(my_friend, string, wechat_lock, emphasis_frequency=1, delay=True):
    with wechat_lock:
        for j in range(1, emphasis_frequency + 1):
            try:
                my_friend.send_msg(str(j) + '-' + string)
            except:
                LOGGER.info(f"{colorstr('red', '⚠ERROR: Failed to send WeChat message.')}")
                break
            if delay:
                time.sleep(3)


class wechatTracker:
    def __init__(self, message_interval_time, message_send_count):
        self.send_history = {}
        self.message_interval_time = message_interval_time
        self.message_send_count = message_send_count

    def update_tracking(self, track_id, frame_time):
        if track_id not in self.send_history:
            self.send_history[track_id] = {'last_send': frame_time, 'send_count': 1}
            return True
        else:
            if self.message_interval_time <= (frame_time - self.send_history[track_id]['last_send']).total_seconds() and self.send_history[track_id]['send_count'] < self.message_send_count:
                self.send_history[track_id]['last_send'] = frame_time
                self.send_history[track_id]['send_count'] += 1
                return True
        return False


class ViolationTracker:
    def __init__(self, save_interval_time, number_of_saves):
        self.track_history = {}
        self.last_alarm_time = datetime(1970, 1, 1, 0, 0, 0)
        self.last_print_time = self.last_alarm_time
        self.save_interval_time = save_interval_time
        self.number_of_saves = number_of_saves

    def update_tracking(self, track_id, frame_time):
        if track_id not in self.track_history:
            self.track_history[track_id] = {'last_saved': frame_time, 'save_count': 1}
            return True
        else:
            if self.save_interval_time <= (frame_time - self.track_history[track_id]['last_saved']).total_seconds() and self.track_history[track_id]['save_count'] < self.number_of_saves:
                self.track_history[track_id]['last_saved'] = frame_time
                self.track_history[track_id]['save_count'] += 1
                return True
        return False


def process(config, model, output_folders):
    open_wechat = config["open_wechat"]
    monitor_model = config["monitor_model"]
    image_model = config["image_model"]
    location_info = config["location_info"]
    friend_name = config["friend_name"]
    input_image_path = config["input_image_path"]
    if open_wechat:
        message_threads = []
        wechat_lock = threading.Lock()
        LOGGER.info(f"{colorstr('yellow', '请扫二维码登陆微信.')}")
        bot = Bot()
        my_friend = bot.friends().search(friend_name)[0]
        LOGGER.info(f"{colorstr('green', '微信登陆成功,将给微信好友')}{colorstr('red', friend_name)}{colorstr('green', '发送违规情况.')}")
        if not image_model:
            wechater = wechatTracker(config["message_interval_time"], config["message_send_count"])
    if config["show_live"]:
        frame_w = int(1706 * config["zoom_ratio"])
        frame_h = int(1066 * config["zoom_ratio"])
        window_name = 'Safety Protective Equipment Detection(Powered By Ultralytics And Anperlanch)'
    if image_model:
        num_images = len(os.listdir(input_image_path))
        for i, filename in enumerate(os.listdir(input_image_path), 1):
            img = cv2.imread(os.path.join(input_image_path, filename))
            annotator = Annotator(img, line_width=1)
            base_name = os.path.splitext(filename)[0]
            results = model.predict(img, classes=[1, 3] if config["detect_helmet_and_reflective_vest"] else [1], verbose=False, conf=config["conf_threshold"], iou=config["iou_threshold"])
            for result in results:
                cls_tensor = result.boxes.cls
                total_class1 = (cls_tensor == 1).sum().item()
                total_class3 = (cls_tensor == 3).sum().item()
                helmet_index = 0
                reflective_index = 0
                for box in result.boxes:
                    cls_id = int(box.cls[0])
                    x1, y1, x2, y2 = map(int, box.xyxy[0])
                    annotator.box_label((x1, y1, x2, y2), label=config["new_names"][cls_id], color=colors(cls_id, True))
                    if (not config["open_restricted_cropping"]) or (config["min_and_max_size"][0] <= (y2 - y1) <= config["min_and_max_size"][1] and config["min_and_max_size"][0] <= (x2 - x1) <= config["min_and_max_size"][1]):
                        if cls_id == 1:
                            save_folder = output_folders['noHelmet']
                            helmet_index += 1
                            save_name = f"{base_name} Type='NoHelmet' Index={helmet_index:02}({total_class1:02}) Time={config['image_time']} Region={location_info}.jpg"
                        else:
                            save_folder = output_folders['noReflectiveVest']
                            reflective_index += 1
                            save_name = f"{base_name} Type='NoReflectiveVest' Index={reflective_index:02}({total_class3:02}) Time={config['image_time']} Region={location_info}.jpg"
                        cv2.imwrite(os.path.join(save_folder, save_name), img[y1:y2, x1:x2])
                warning_msg = ""
                if total_class1 and total_class3:
                    warning_msg = f"NoHelmet({total_class1}) And NoReflectiveVest({total_class3})"
                    sound_type = 1
                elif total_class1:
                    warning_msg = f"NoHelmet({total_class1})"
                    sound_type = 2
                elif total_class3:
                    warning_msg = f"NoReflectiveVest({total_class3})"
                    sound_type = 3
                LOGGER.info(f"{base_name}({i}/{num_images}). " + colorstr('red', 'Detection time: ' + datetime.now().strftime('%Y-%m-%d %H:%M:%S')) + '. ' + colorstr('blue', warning_msg + '.' if warning_msg else 'No violations.'))
                if warning_msg:
                    if config["open_sound"]:
                        LOGGER.info(colorstr('red', '⚠⚠⚠警告⚠⚠⚠:检测图片时若播放报警声将大幅减慢检测速度!'))
                        winsound.Beep(1000 if sound_type == 1 else (2000 if sound_type == 2 else 3000), config["alarm_duration"])
                    if open_wechat:
                        t = threading.Thread(target=message, args=(my_friend, f"{base_name}: {warning_msg}.", wechat_lock))
                        t.start()
                        message_threads.append(t)
                cv2.imwrite(os.path.join(output_folders['result'], f"{base_name} {warning_msg if warning_msg else 'NoViolations'}.jpg"), annotator.im)
            if config["show_live"]:
                cv2.imshow(window_name, cv2.resize(annotator.im, (frame_w, frame_h)))
                if cv2.waitKey(1) & 0xFF == 27:
                    break
    else:
        tracker = ViolationTracker(config["save_interval_time"], config["number_of_saves"])
        if monitor_model:
            cap = cv2.VideoCapture(0)
            config["time_coordinates_and_line_thickness"][0] = 10
            config["time_coordinates_and_line_thickness"][1] = 20
        else:
            cap = cv2.VideoCapture(config["input_video_path"])
            start_time = datetime(*map(int, re.split('[-:\s]+', config["video_start_time"])))
            total_frame = int(cap.get(7))
        video_writer = cv2.VideoWriter(os.path.join(output_folders['result'], 'output_video.mp4'), cv2.VideoWriter_fourcc(*'mp4v'), cap.get(cv2.CAP_PROP_FPS), (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))))
        while cap.isOpened():
            success, frame = cap.read()
            if not success:
                break
            if monitor_model:
                current_time = datetime.now()
                frame_time_str = f"Current time: {current_time.strftime('%Y-%m-%d %H:%M:%S')}"
            else:
                current_time = start_time + timedelta(milliseconds=cap.get(cv2.CAP_PROP_POS_MSEC))
                frame_time_str = f"Frame time: {current_time.strftime('%Y-%m-%d %H:%M:%S')} {int(cap.get(1))}/{total_frame}"
            results = model.track(frame, persist=True, tracker="botsort.yaml", classes=[1, 3] if config["detect_helmet_and_reflective_vest"] else [1], verbose=False, conf=config["conf_threshold"], iou=config["iou_threshold"])
            annotated_frame = results[0].plot(conf=False, line_width=1, new_names=config["new_names"])
            cv2.putText(annotated_frame, frame_time_str, (config["time_coordinates_and_line_thickness"][0], config["time_coordinates_and_line_thickness"][1]), cv2.FONT_HERSHEY_COMPLEX, 0.7, (0, 0, 255), config["time_coordinates_and_line_thickness"][2])
            if results[0].boxes.id is not None:
                for box, track_id in zip(results[0].boxes, results[0].boxes.id):
                    track_id = int(track_id)
                    cls_id = int(box.cls[0])
                    x1, y1, x2, y2 = map(int, box.xyxy[0])
                    if config["open_sound"] and (current_time - tracker.last_alarm_time).total_seconds() >= config["alarm_interval_time"]:
                        threading.Thread(target=winsound.Beep, args=(1000 if cls_id == 1 else 2000, config["alarm_duration"])).start()
                        tracker.last_alarm_time = current_time
                    if open_wechat and wechater.update_tracking(track_id, current_time):
                        t = threading.Thread(target=message, args=(my_friend, f"Type={'NoHelmet' if cls_id == 1 else 'NoReflectiveVest'} ID={track_id:04} Time={current_time.strftime('%Y-%m-%d %H:%M:%S')} Count={wechater.send_history[track_id]['send_count']} Region={location_info}", wechat_lock))
                        t.start()
                        message_threads.append(t)
                    if (not config["open_restricted_cropping"]) or (config["min_and_max_size"][0] <= (y2 - y1) <= config["min_and_max_size"][1] and config["min_and_max_size"][0] <= (x2 - x1) <= config["min_and_max_size"][1]):
                        should_save = tracker.update_tracking(track_id, current_time)
                    else:
                        should_save = False
                    if should_save:
                        save_folder = output_folders['noHelmet'] if cls_id == 1 else output_folders['noReflectiveVest']
                        filename = f"Type={'NoHelmet' if cls_id == 1 else 'NoReflectiveVest'} ID={track_id:04} Time={current_time.strftime('%Y-%m-%d %H-%M-%S')} Count={tracker.track_history[track_id]['save_count']} Region={location_info}.jpg"
                        cv2.imwrite(os.path.join(save_folder, filename), frame[y1:y2, x1:x2])
                if (current_time - tracker.last_print_time).total_seconds() >= config["print_interval_time"]:
                    tracker.last_print_time = current_time
                    cls_tensor = results[0].boxes.cls
                    total_class1 = (cls_tensor == 1).sum().item()
                    total_class3 = (cls_tensor == 3).sum().item()
                    if total_class1 and total_class3:
                        warning_msg = f"{frame_time_str}. NoHelmet({total_class1}) And NoReflectiveVest({total_class3})."
                    elif total_class1:
                        warning_msg = f"{frame_time_str}. NoHelmet({total_class1})."
                    elif total_class3:
                        warning_msg = f"{frame_time_str}. NoReflectiveVest({total_class3})."
                    LOGGER.info(f"{colorstr('red', warning_msg)}")
            video_writer.write(annotated_frame)
            if config["show_live"]:
                cv2.imshow(window_name, cv2.resize(annotated_frame, (frame_w, frame_h)))
                if cv2.waitKey(1) & 0xFF == 27:
                    break
        cap.release()
        video_writer.release()
    if config["show_live"]:
        cv2.destroyAllWindows()
    if open_wechat:
        for t in message_threads:
            t.join()
        bot.logout()
        LOGGER.info(f"{colorstr('blue', '微信消息全部发送完毕.')}")
    LOGGER.info(f"{colorstr('green', '图片检测结束.' if image_model else '视频检测结束.')}")


def run_detection(config):
    if os.path.exists(config["output_folder"]):
        shutil.rmtree(config["output_folder"])
    output_folders = {
        'result': os.path.join(config["output_folder"], 'result'),
        'noHelmet': os.path.join(config["output_folder"], 'noHelmet'),
        'noReflectiveVest': os.path.join(config["output_folder"], 'noReflectiveVest')
    }
    for folder in output_folders.values():
        os.makedirs(folder, exist_ok=True)
    model = YOLO(config["model_path"] if config["detect_helmet_and_reflective_vest"] else config["helmet_model_path"])
    model.to('cuda') if config["device"] == 'GPU(Faster)' else model.to('cpu')
    process(config, model, output_folders)


class MainWindow(QMainWindow):
    def __init__(self):
        super().__init__()
        self.setWindowTitle("工人安全防护装备检测操作界面(Powered By Ultralytics And Anperlanch)")
        central_widget = QWidget()
        self.setCentralWidget(central_widget)
        layout = QVBoxLayout()
        form_layout = QFormLayout()

        self.monitor_mode_radio = QRadioButton("实时监控")
        self.local_video_mode_radio = QRadioButton("本地视频")
        self.local_video_mode_radio.setChecked(True)
        self.image_detection_mode_radio = QRadioButton("图片检测")
        h_mode_layout = QHBoxLayout()
        h_mode_layout.addWidget(self.monitor_mode_radio)
        h_mode_layout.addWidget(self.local_video_mode_radio)
        h_mode_layout.addWidget(self.image_detection_mode_radio)
        form_layout.addRow("检测模式:", h_mode_layout)
        mode_group = QButtonGroup(self)
        mode_group.addButton(self.monitor_mode_radio)
        mode_group.addButton(self.local_video_mode_radio)
        mode_group.addButton(self.image_detection_mode_radio)

        self.detect_helmet_and_reflective_vest_radio = QRadioButton("安全帽和反光衣")
        self.detect_helmet_and_reflective_vest_radio.setChecked(True)
        self.detect_helmet_radio = QRadioButton("安全帽")
        h_detect_layout = QHBoxLayout()
        h_detect_layout.addWidget(self.detect_helmet_and_reflective_vest_radio)
        h_detect_layout.addWidget(self.detect_helmet_radio)
        form_layout.addRow("检测类别:", h_detect_layout)
        detect_group = QButtonGroup(self)
        detect_group.addButton(self.detect_helmet_and_reflective_vest_radio)
        detect_group.addButton(self.detect_helmet_radio)

        self.input_image_path_lineedit = QLineEdit()
        self.input_image_path_lineedit.setText("C:/Users/15231/Desktop/images")
        self.input_image_path_button = QPushButton("浏览")
        self.input_image_path_button.clicked.connect(self.browse_image)
        h_image = QHBoxLayout()
        h_image.addWidget(self.input_image_path_lineedit)
        h_image.addWidget(self.input_image_path_button)
        form_layout.addRow("本地图片文件夹路径:", h_image)

        self.image_time_info_lineedit = QLineEdit()
        self.image_time_info_lineedit.setText("2025-03-12")
        form_layout.addRow("图片时间信息:", self.image_time_info_lineedit)

        self.input_video_path_lineedit = QLineEdit()
        self.input_video_path_lineedit.setText("C:/Users/15231/Desktop/video.mp4")
        self.input_video_path_button = QPushButton("浏览")
        self.input_video_path_button.clicked.connect(self.browse_video)
        h_video = QHBoxLayout()
        h_video.addWidget(self.input_video_path_lineedit)
        h_video.addWidget(self.input_video_path_button)
        form_layout.addRow("本地视频文件路径:", h_video)

        self.video_start_time_lineedit = QLineEdit()
        self.video_start_time_lineedit.setText("2023-01-04 16:07:45")
        form_layout.addRow("本地视频开始时间(YYYY-MM-DD HH:MM:SS):", self.video_start_time_lineedit)

        self.location_info_lineedit = QLineEdit()
        self.location_info_lineedit.setText("Gate1Monitoring")
        form_layout.addRow("图片或视频位置信息:", self.location_info_lineedit)

        self.coord_x_spinbox = QSpinBox()
        self.coord_x_spinbox.setRange(10, 1000)
        self.coord_x_spinbox.setValue(430)
        self.coord_y_spinbox = QSpinBox()
        self.coord_y_spinbox.setRange(10, 1000)
        self.coord_y_spinbox.setValue(30)
        self.thickness_spinbox = QSpinBox()
        self.thickness_spinbox.setRange(1, 5)
        self.thickness_spinbox.setValue(2)
        h_coord = QHBoxLayout()
        h_coord.addWidget(QLabel("X轴:"))
        h_coord.addWidget(self.coord_x_spinbox)
        h_coord.addWidget(QLabel("Y轴:"))
        h_coord.addWidget(self.coord_y_spinbox)
        h_coord.addWidget(QLabel("线宽:"))
        h_coord.addWidget(self.thickness_spinbox)
        form_layout.addRow("本地视频时间信息位置和线宽:", h_coord)

        self.model_path_lineedit = QLineEdit()
        self.model_path_lineedit.setText("../../runs/detect/train154/weights/best.pt")
        self.model_path_button = QPushButton("浏览")
        self.model_path_button.clicked.connect(self.browse_model_path)
        h_model = QHBoxLayout()
        h_model.addWidget(self.model_path_lineedit)
        h_model.addWidget(self.model_path_button)
        form_layout.addRow("YOLO模型文件路径(安全帽与反光衣):", h_model)

        self.helmet_model_path_lineedit = QLineEdit()
        self.helmet_model_path_lineedit.setText("D:/personalFiles/objectDetection/comparativeExperiment_smallPaperOne/yolov8Andrtdetr/ultralytics/models/yolo/detect/runs/detect/train58/weights/best.pt")
        self.helmet_model_path_button = QPushButton("浏览")
        self.helmet_model_path_button.clicked.connect(self.browse_helmet_model_path)
        h_helmet_model = QHBoxLayout()
        h_helmet_model.addWidget(self.helmet_model_path_lineedit)
        h_helmet_model.addWidget(self.helmet_model_path_button)
        form_layout.addRow("YOLO模型文件路径(安全帽):", h_helmet_model)

        self.output_folder_lineedit = QLineEdit()
        self.output_folder_lineedit.setText("C:/Users/15231/Desktop/detection_results")
        self.output_folder_button = QPushButton("浏览")
        self.output_folder_button.clicked.connect(self.browse_output_folder)
        h_output = QHBoxLayout()
        h_output.addWidget(self.output_folder_lineedit)
        h_output.addWidget(self.output_folder_button)
        form_layout.addRow("检测结果输出路径:", h_output)

        self.zoom_ratio_dspinbox = QDoubleSpinBox()
        self.zoom_ratio_dspinbox.setRange(0.1, 1.0)
        self.zoom_ratio_dspinbox.setSingleStep(0.1)
        self.zoom_ratio_dspinbox.setValue(0.5)
        form_layout.addRow("显示检测画面缩放比:", self.zoom_ratio_dspinbox)

        self.number_of_saves_spinbox = QSpinBox()
        self.number_of_saves_spinbox.setRange(1, 10)
        self.number_of_saves_spinbox.setValue(3)
        form_layout.addRow("违规图片保存次数:", self.number_of_saves_spinbox)

        self.save_interval_time_spinbox = QSpinBox()
        self.save_interval_time_spinbox.setRange(1, 60)
        self.save_interval_time_spinbox.setValue(10)
        form_layout.addRow("违规图片保存间隔时间:", self.save_interval_time_spinbox)

        self.print_interval_time_spinbox = QSpinBox()
        self.print_interval_time_spinbox.setRange(1, 60)
        self.print_interval_time_spinbox.setValue(1)
        form_layout.addRow("违规行为详情显示间隔时间:", self.print_interval_time_spinbox)

        self.open_sound_checkbox = QCheckBox()
        self.open_sound_checkbox.setChecked(True)
        form_layout.addRow("声音报警:", self.open_sound_checkbox)

        self.alarm_duration_spinbox = QSpinBox()
        self.alarm_duration_spinbox.setRange(500, 2000)
        self.alarm_duration_spinbox.setSingleStep(100)
        self.alarm_duration_spinbox.setValue(1000)
        form_layout.addRow("报警声持续时间:", self.alarm_duration_spinbox)

        self.alarm_interval_time_spinbox = QSpinBox()
        self.alarm_interval_time_spinbox.setRange(1, 60)
        self.alarm_interval_time_spinbox.setValue(5)
        form_layout.addRow("报警声间隔时间:", self.alarm_interval_time_spinbox)

        self.open_wechat_checkbox = QCheckBox()
        self.open_wechat_checkbox.setChecked(False)
        form_layout.addRow("微信报警信息推送:", self.open_wechat_checkbox)

        self.friend_name_lineedit = QLineEdit()
        self.friend_name_lineedit.setText("又几个黄昏")
        # self.friend_name_lineedit.setText("寥落半伤感")
        form_layout.addRow("微信消息接收者昵称:", self.friend_name_lineedit)

        self.message_send_count_spinbox = QSpinBox()
        self.message_send_count_spinbox.setRange(1, 10)
        self.message_send_count_spinbox.setValue(1)
        form_layout.addRow("微信消息发送次数:", self.message_send_count_spinbox)

        self.message_interval_spinbox = QSpinBox()
        self.message_interval_spinbox.setRange(1, 60)
        self.message_interval_spinbox.setValue(10)
        form_layout.addRow("发送微信消息间隔时间:", self.message_interval_spinbox)

        self.show_live_checkbox = QCheckBox()
        self.show_live_checkbox.setChecked(True)
        form_layout.addRow("显示实时检测画面:", self.show_live_checkbox)

        self.device_combobox = QComboBox()
        self.device_combobox.addItems(["GPU(Faster)", "CPU(Slower)"])
        form_layout.addRow("检测运行设备:", self.device_combobox)

        self.new_names_lineedit = QLineEdit()
        self.new_names_lineedit.setText("h,nh,r,nr")
        form_layout.addRow("自定义目标名称:", self.new_names_lineedit)

        self.iou_slider = QSlider()
        self.iou_slider.setOrientation(Qt.Horizontal)
        self.iou_slider.setRange(0, 100)
        self.iou_slider.setValue(45)
        self.iou_slider.setTickInterval(1)
        self.iou_slider.setTickPosition(QSlider.TicksBelow)
        self.iou_slider.valueChanged.connect(self.update_iou_threshold)
        self.iou_label = QLabel("0.45")
        h_layout_iou = QHBoxLayout()
        h_layout_iou.addWidget(self.iou_slider)
        h_layout_iou.addWidget(self.iou_label)
        form_layout.addRow("IoU阈值调节:", h_layout_iou)
        self.update_iou_threshold()

        self.conf_slider = QSlider()
        self.conf_slider.setOrientation(Qt.Horizontal)
        self.conf_slider.setRange(0, 100)
        self.conf_slider.setValue(50)
        self.conf_slider.setTickInterval(1)
        self.conf_slider.setTickPosition(QSlider.TicksBelow)
        self.conf_slider.valueChanged.connect(self.update_conf_threshold)
        self.conf_label = QLabel("0.50")
        h_layout_conf = QHBoxLayout()
        h_layout_conf.addWidget(self.conf_slider)
        h_layout_conf.addWidget(self.conf_label)
        form_layout.addRow("Conf阈值调节:", h_layout_conf)
        self.update_conf_threshold()

        self.open_restricted_cropping_checkbox = QCheckBox()
        self.open_restricted_cropping_checkbox.setChecked(False)
        form_layout.addRow("限制截图尺寸:", self.open_restricted_cropping_checkbox)

        self.min_size_spinbox = QSpinBox()
        self.min_size_spinbox.setRange(1, 4000)
        self.min_size_spinbox.setValue(50)
        self.max_size_spinbox = QSpinBox()
        self.max_size_spinbox.setRange(1, 4000)
        self.max_size_spinbox.setValue(100)
        h_size = QHBoxLayout()
        h_size.addWidget(QLabel("最小:"))
        h_size.addWidget(self.min_size_spinbox)
        h_size.addWidget(QLabel("最大:"))
        h_size.addWidget(self.max_size_spinbox)
        form_layout.addRow("截图尺寸范围:", h_size)

        layout.addLayout(form_layout)
        self.start_button = QPushButton("检测 启动")
        self.start_button.clicked.connect(self.start_detection)
        layout.addWidget(self.start_button)
        central_widget.setLayout(layout)
        self.resize(870, 1000)

    def update_iou_threshold(self):
        self.iou_threshold = self.iou_slider.value() / 100.0
        self.iou_label.setText("{:.2f}".format(self.iou_threshold))

    def update_conf_threshold(self):
        self.conf_threshold = self.conf_slider.value() / 100.0
        self.conf_label.setText("{:.2f}".format(self.conf_threshold))

    def browse_image(self):
        folder = QFileDialog.getExistingDirectory(self, "选择图片检测输入文件夹")
        if folder:
            self.input_image_path_lineedit.setText(folder)

    def browse_video(self):
        filename, _ = QFileDialog.getOpenFileName(self, "选择输入视频文件", "", "Video Files (*.mp4 *.avi *.mkv)")
        if filename:
            self.input_video_path_lineedit.setText(filename)

    def browse_output_folder(self):
        folder = QFileDialog.getExistingDirectory(self, "选择检测结果输出文件夹")
        if folder:
            self.output_folder_lineedit.setText(folder)

    def browse_model_path(self):
        filename, _ = QFileDialog.getOpenFileName(self, "选择YOLO模型文件(安全帽与反光衣)", "", "Model Files (*.pt)")
        if filename:
            self.model_path_lineedit.setText(filename)

    def browse_helmet_model_path(self):
        filename, _ = QFileDialog.getOpenFileName(self, "选择YOLO模型文件(安全帽)", "", "Model Files (*.pt)")
        if filename:
            self.helmet_model_path_lineedit.setText(filename)

    def start_detection(self):
        LOGGER.info(colorstr('blue', '开始检测') + colorstr('red', ('图片.' if self.image_detection_mode_radio.isChecked() else (('监控' if self.monitor_mode_radio.isChecked() else '本地') + '视频.'))))
        config = {
            "monitor_model": self.monitor_mode_radio.isChecked(),
            "video_mode": self.local_video_mode_radio.isChecked(),
            "image_model": self.image_detection_mode_radio.isChecked(),
            "detect_helmet_and_reflective_vest": self.detect_helmet_and_reflective_vest_radio.isChecked(),
            "detect_helmet": self.detect_helmet_radio.isChecked(),
            "input_image_path": self.input_image_path_lineedit.text(),
            "image_time": self.image_time_info_lineedit.text(),
            "input_video_path": self.input_video_path_lineedit.text(),
            "video_start_time": self.video_start_time_lineedit.text(),
            "location_info": self.location_info_lineedit.text(),
            "time_coordinates_and_line_thickness": [self.coord_x_spinbox.value(), self.coord_y_spinbox.value(), self.thickness_spinbox.value()],
            "model_path": self.model_path_lineedit.text(),
            "helmet_model_path": self.helmet_model_path_lineedit.text(),
            "output_folder": self.output_folder_lineedit.text(),
            "zoom_ratio": self.zoom_ratio_dspinbox.value(),
            "number_of_saves": self.number_of_saves_spinbox.value(),
            "save_interval_time": self.save_interval_time_spinbox.value(),
            "print_interval_time": self.print_interval_time_spinbox.value(),
            "open_sound": self.open_sound_checkbox.isChecked(),
            "alarm_duration": self.alarm_duration_spinbox.value(),
            "alarm_interval_time": self.alarm_interval_time_spinbox.value(),
            "open_wechat": self.open_wechat_checkbox.isChecked(),
            "friend_name": self.friend_name_lineedit.text(),
            "message_send_count": self.message_send_count_spinbox.value(),
            "message_interval_time": self.message_interval_spinbox.value(),
            "show_live": self.show_live_checkbox.isChecked(),
            "device": self.device_combobox.currentText(),
            "new_names": [name.strip() for name in self.new_names_lineedit.text().split(',')],
            "iou_threshold": self.iou_threshold,
            "conf_threshold": self.conf_threshold,
            "open_restricted_cropping": self.open_restricted_cropping_checkbox.isChecked(),
            "min_and_max_size": (self.min_size_spinbox.value(), self.max_size_spinbox.value()),
        }
        detection_thread = threading.Thread(target=run_detection, args=(config,))
        detection_thread.start()


if __name__ == '__main__':
    app = QApplication(sys.argv)
    window = MainWindow()
    window.show()
    sys.exit(app.exec_())

### 工厂场景下工人危险行为的目标检测及其实现方法 #### 背景介绍 随着工业自动化的发展,工厂环境下的安全生产成为关注的重点。为了保障工人的安全并提高生产效率,利用先进的计算机视觉技术来监测工人的操作行为变得尤为必要[^1]。 #### YOLO模型的选择与优化 YOLO系列模型因其高效性准确性,在目标检测领域得到了广泛应用。针对工厂场景下的工人危险行为检测任务,可以选择适合的YOLO版本进行定制化开发。例如,YOLOv5、YOLOv7 YOLOv8 等版本均具有较高的推理速度精度,可以根据具体需求选择合适的模型架构[^2][^3]。 #### 数据准备 在实施目标检测之前,需要收集并标注大量反映实际工作场景的数据集。这些数据应包括但不限于以下类别: - **未佩戴防护装备的行为**(如不戴头盔、手套或护目镜) - **违规操作设备的动作** - **进入禁区或其他高危区域** 可以通过拍摄现场照片或者录制视频片段的方式获取原始素材,并借助标签工具完成图像中标记对象类别的过程。通常情况下,采用 Pascal VOC 或 COCO 格式的 XML 文件存储每张图片对应的边界框坐标及其所属分类信息。 #### 训练流程概述 以下是构建基于改进版 YOLO工人危险行为检测系统的几个核心环节: ##### 1. 模型初始化 加载预训练权重文件以加速收敛并改善泛化能力。如果选用的是官方发布的基础网络,则可以直接下载对应框架所提供的 checkpoint;若是自定义调整后的结构,则需重新设计 backbone 层次并通过迁移学习策略微调参数设置。 ##### 2. 参数调节 根据硬件资源情况设定批量大小(batch size),学习率(learning rate)以及其他超参组合形式。考虑到工业环境下可能存在光照变化剧烈等问题,建议引入数据增强(Data Augmentation)手段进一步扩充样本多样性,从而提升鲁棒性表现。 ##### 3. 测试评估 当经过若干轮迭代之后达到预期效果时,应当选取独立测试集合验证整体性能指标,比如平均精确度(mAP@IoU=0.5:0.95)等统计量衡量算法优劣程度。此外还可以绘制 PR 曲线直观展现不同阈值条件下召回率(recall)-查准率(precision)之间的权衡关系图谱。 #### 实际部署方案 一旦完成了上述阶段的任务后即可着手考虑如何将成果应用于真实世界当中去。这里推荐两种主流途径供参考借鉴: - 利用 Python 编程语言配合 PyQt GUI 库打造桌面应用程序接口(API); - 结合 OpenCV 提供的强大图像处理函数库实现实时流媒体捕获与渲染显示功能的同时记录异常事件发生时刻的具体位置以便后续审查追溯用途。 ```python import cv2 from ultralytics import YOLO model = YOLO('best.pt') # 加载最佳模型权重 def detect_dangerous_behavior(frame): results = model.predict(source=frame, conf=0.65) for result in results: boxes = result.boxes.cpu().numpy() for box in boxes: r = box.xyxy[0].astype(int) cv2.rectangle(frame, (r[0], r[1]), (r[2], r[3]), color=(0, 0, 255), thickness=2) cap = cv2.VideoCapture(0) while True: ret, frame = cap.read() if not ret: break detect_dangerous_behavior(frame) cv2.imshow("Danger Detection", frame) key = cv2.waitKey(1) if key & 0xFF == ord('q'): break cap.release() cv2.destroyAllWindows() ```
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值