pyside6结合yolov5摄像头物体检测

功能:使用pytorch hub加载yolov5模型,利用opencv打开摄像头设备,这一部分使用线程方法实现。
参考链接:
[1] https://learnopencv.com/object-detection-using-yolov5-and-opencv-dnn-in-c-and-python/

# -*- coding: utf-8 -*-
"""
Created on Wed Jul  6 10:05:38 2022

@author: wenqingzhou@gmail.com
"""

import uuid
import cv2
import sys
from PySide6.QtCore import Qt, QSize, QTimer, QThread, Slot, Signal, QRunnable, QThreadPool, QObject
from PySide6.QtWidgets import QApplication, QWidget, QGridLayout, QLabel, QMainWindow, QStatusBar, QMainWindow
from PySide6.QtGui import QPixmap, QImage, QIcon
import torch
from time import time
import numpy as np
# model = torch.hub.load('ultralytics/yolov5', 'yolov5s')
model = torch.hub.load(r"D:\Github\yolov5", "yolov5m6", source='local', pretrained=True)
model.to('cpu')

# img = cv2.imread(PATH_TO_IMAGE)
classes = model.names
# results = model(imgs, size=640)  # includes NMS
def plot_boxes(results, frame):
        labels, cord = results
        n = len(labels)
        x_shape, y_shape = frame.shape[1], frame.shape[0]
        for i in range(n):
            row = cord[i]
            if row[4] >= 0.2:
                x1, y1, x2, y2 = int(row[0]*x_shape), int(row[1]*y_shape), int(row[2]*x_shape), int(row[3]*y_shape)
                bgr = (0, 255, 0)
                cv2.rectangle(frame, (x1, y1), (x2, y2), bgr, 2)
                cv2.putText(frame, classes[int(labels[i])], (x1, y1), cv2.FONT_HERSHEY_SIMPLEX, 0.9, bgr, 2)

        return frame

def score_frame(frame):
    """
    转换标签和坐标
    """
    frame = [frame]
    results = model(frame,size=640)
    labels, cord = results.xyxyn[0][:, -1].cpu().numpy(), results.xyxyn[0][:, :-1].cpu().numpy()
    return labels, cord

class Thread(QThread):
    changePixmap = Signal(QImage)

    def run(self):
        cap = cv2.VideoCapture(0)
        while True:
            ret, frame = cap.read()
            if ret:
                # https://stackoverflow.com/a/55468544/6622587
                results = score_frame(frame)  # includes NMS
                frame = plot_boxes(results, frame)

                rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

                h, w, ch = rgbImage.shape
                bytesPerLine = ch * w
                convertToQtFormat = QImage(rgbImage.data, w, h, bytesPerLine, QImage.Format_RGB888)
                p = convertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio)
                self.changePixmap.emit(p)

class WorkerSignal(QObject):
    data = Signal(QImage)
    process_time = Signal(str)

class Worker(QRunnable):
    def __init__(self):
        super().__init__()
        self.job_id = uuid.uuid4().hex
        self.signal = WorkerSignal()
        
    def run(self):
        cap = cv2.VideoCapture(0)
        while True:
            ret, frame = cap.read()
            if ret:
                # https://stackoverflow.com/a/55468544/6622587
                start_time = time()
                # 模型推理及绘制结果
                results = score_frame(frame)  # includes NMS
                frame = plot_boxes(results, frame)

                rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                h, w, ch = rgbImage.shape
                bytesPerLine = ch * w
                convertToQtFormat = QImage(rgbImage.data, w, h, bytesPerLine, QImage.Format_RGB888)
                p = convertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio)
                end_time = time()
                fps = 1/np.round(end_time - start_time, 3)
                print(f"Frames Per Second : {fps:.2f}")
                self.signal.data.emit(p)
                self.signal.process_time.emit(f'{fps:.2f}')

class App(QMainWindow):
    def __init__(self):
        super().__init__()
        self.setWindowIcon(QIcon(r"E:\smile.ico"))
        self.initUI()

    def initUI(self):
        self.setWindowTitle('App')
        self.resize(640, 480)
        self.label = QLabel(self)
        self.label.resize(640, 480)
        
        self.statusbar = self.statusBar()
        # self.statusbar = QStatusBar()
        self.statusbar.showMessage('Ready')

        # QThread方法
        # self.th = Thread(self)
        # self.th.changePixmap.connect(self.setImage) # 信号与槽
        # self.th.start()

        # QThreadPool+QRunnable方法
        self.thread_pool = QThreadPool()
        self.worker = Worker()
        self.worker.signal.data.connect(self.setImage)
        self.worker.signal.process_time.connect(self.showFPS)
        self.thread_pool.start(self.worker)

        self.show()
    
    @Slot(QImage)
    def setImage(self, image):
        self.label.setPixmap(QPixmap.fromImage(image))

    @Slot(str)
    def showFPS(self, fps):
        self.statusbar.showMessage(fps)
if __name__ == '__main__':

    # main()
    # 创建Qt应用程序
    # app = QApplication(sys.argv)
    if not QApplication.instance():
        app = QApplication(sys.argv)
    else:
        app = QApplication.instance()
    # app.setQuitOnLastWindowClosed(False)
    win = App()
    # win.show()
    sys.exit(app.exec())
  • 2
    点赞
  • 19
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
pyside6是一个Python的GUI框架,而yolov5是一个目标检测算法。它们之间并没有直接的联系,但是可以通过编写代码将它们结合起来使用。 具体来说,可以使用pyside6编写一个GUI界面,让用户选择一张图片,然后将这张图片输入到yolov5模型中进行目标检测,最后将检测结果显示在GUI界面上。 以下是一个简单的示例代码: ```python import sys from PySide6.QtWidgets import QApplication, QMainWindow, QLabel, QPushButton, QFileDialog from PySide6.QtGui import QPixmap import torch import cv2 from models.experimental import attempt_load from utils.general import non_max_suppression, scale_coords from utils.plots import plot_one_box class MainWindow(QMainWindow): def __init__(self): super().__init__() # 创建GUI界面上的控件 self.label = QLabel(self) self.label.setGeometry(50, 50, 400, 400) self.button = QPushButton('选择图片', self) self.button.setGeometry(200, 500, 100, 50) self.button.clicked.connect(self.select_image) # 加载yolov5模型 self.model = attempt_load('yolov5s.pt', map_location=torch.device('cpu')) def select_image(self): # 打开文件选择对话框,让用户选择一张图片 file_path, _ = QFileDialog.getOpenFileName(self, '选择图片', '', 'Images (*.png *.xpm *.jpg)') # 加载图片并进行目标检测 img = cv2.imread(file_path) results = self.detect(img) # 在图片上绘制检测结果 for x1, y1, x2, y2, conf, cls in results: plot_one_box((x1, y1, x2, y2), img, label=cls, color=(0, 255, 0)) # 将图片显示在GUI界面上 img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) h, w, c = img.shape qimg = QPixmap.fromImage(QImage(img.data, w, h, c * w, QImage.Format_RGB888)) self.label.setPixmap(qimg) def detect(self, img): # 对图片进行预处理 img = cv2.resize(img, (640, 640)) img = img[:, :, ::-1].transpose(2, 0, 1) img = torch.from_numpy(img).float() / 255.0 img = img.unsqueeze(0) # 使用yolov5模型进行目标检测 with torch.no_grad(): outputs = self.model(img) results = non_max_suppression(outputs['pred'], conf_thres=0.5) # 将检测结果转换为opencv格式 results = results[0].cpu().numpy() results[:, :4] = scale_coords(img.shape[2:], results[:, :4], img.shape[2:]).round() return results if __name__ == '__main__': app = QApplication(sys.argv) window = MainWindow() window.show() sys.exit(app.exec_()) ``` 在这个示例代码中,我们创建了一个MainWindow类,继承自QMainWindow类,用于显示GUI界面。在GUI界面上,我们创建了一个QLabel控件用于显示图片,一个QPushButton控件用于选择图片。当用户点击选择图片按钮时,会弹出文件选择对话框,让用户选择一张图片。然后我们调用detect方法对这张图片进行目标检测,并将检测结果绘制在图片上,最后将图片显示在GUI界面上。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值