pyqt5+yolov5的AI功能实现……

项目场景:

基于yolov5封装在UI上的功能实现。此次项目目标是实现监测手机为label的功能实现UI


配置:

windows 7 64位
Jetson NX (不做处理的话默认达到10帧。勉强能跑)
gtx 1080Ti
yolov5 4.0 + pytorch1.6 + opencv4.5.x +cuda 10.2
Anaconda -------pyqt5
海康网络监控摄像头——rtsp
Jetson Xavier NX ——英伟达家的开发板

实现效果:

1.图档侦测
2.视频侦测
3.USB物体侦测
4.rtsp物体侦测
5.截图、存图、LOG档
6.最大化、最小化、固定窗
7.实时滚动图——侦测Label 显示最近

后续补充优化:
1、GPIO输出语音提示模块
2、工业PLC 网络模块交握
3、侦测计数统计图 total
在这里插入图片描述

# 移植yolov5 detect.py 模型初始化:
import os, threading
import sys
import cv2
import argparse
import random
import torch
import numpy as np
import torch.backends.cudnn as cudnn
import time

from PyQt5 import QtCore, QtGui, QtWidgets

from utils.torch_utils import select_device
from models.experimental import attempt_load
from utils.general import check_img_size, non_max_suppression, scale_coords
from utils.datasets import letterbox
from utils.plots import plot_one_box
from PyQt5.QtWidgets import *

flag = False
flag2= False

class Ui_MainWindow(QtWidgets.QMainWindow):
    def __init__(self, parent=None):
        super(Ui_MainWindow, self).__init__(parent)
        self.timer_video = QtCore.QTimer()
        self.timer_video_1 = QtCore.QTimer()
        self.setupUi(self)
        self.init_slots()
        self.cap = cv2.VideoCapture()



####---模型初始化---
        parser = argparse.ArgumentParser()
        parser.add_argument('--weights', nargs='+', type=str, default='weights/yolov5s.pt', help='model.pt path(s)')
        parser.add_argument('--source', type=str, default='data/images', help='source')  # file/folder, 0 for webcam
        parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
        parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')
        parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
        parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
        parser.add_argument('--view-img', action='store_true', help='display results')
        parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
        parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
        parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
        parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
        parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
        parser.add_argument('--augment', action='store_true', help='augmented inference')
        parser.add_argument('--update', action='store_true', help='update all models')
        parser.add_argument('--project', default='runs/detect', help='save results to project/name')
        parser.add_argument('--name', default='exp', help='save results to project/name')
        parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
        self.opt = parser.parse_args()
        print(self.opt)

        source, weights, view_img, save_txt, imgsz = self.opt.source, self.opt.weights, self.opt.view_img, self.opt.save_txt, self.opt.img_size

        self.device = select_device(self.opt.device)
        self.half = self.device.type != 'cpu'  # half precision only supported on CUDA

        cudnn.benchmark = True

        # Load model
        self.model = attempt_load(weights, map_location=self.device)  # load FP32 model
        stride = int(self.model.stride.max())  # model stride
        self.imgsz = check_img_size(imgsz, s=stride)  # check img_size
        if self.half:
            self.model.half()  # to FP16

        # Get names and colors
        self.names = self.model.module.names if hasattr(self.model, 'module') else self.model.names
        self.colors = [[random.randint(0, 255) for _ in range(3)] for _ in self.names]


# 基于Anaconda 底下 designer.exe   配置pyqt5 封装UI

#######---UI界面---
    def setupUi(self, MainWindow):
        MainWindow.setObjectName("MainWindow")
        MainWindow.resize(1270, 855)
        MainWindow.setBaseSize(QtCore.QSize(5, 0))
        MainWindow.setFixedSize(self.width(), self.height())
        MainWindow.setStyleSheet("\n" "#MainWindow {background-image: url(C:/Users/Administrator/Anaconda3/envs/pytorch1.6/yolov5/img/pyqt5 backup.png);}")
        self.centralwidget = QtWidgets.QWidget(MainWindow)
        self.centralwidget.setObjectName("centralwidget")
        self.frame = QtWidgets.QFrame(self.centralwidget)
        self.frame.setGeometry(QtCore.QRect(9, 10, 1251, 741))
        self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
        self.frame.setFrameShadow(QtWidgets.QFrame.Sunken)
        self.frame.setObjectName("frame")
        self.label = QtWidgets.QLabel(self.frame)
        self.label.setGeometry(QtCore.QRect(7, 14, 1231, 721))
       # self.label.setStyleSheet("background-color: rgb(15, 15, 15);")
        self.label.setTextFormat(QtCore.Qt.AutoText)
        self.label.setObjectName("label")
        self.pushButton = QtWidgets.QPushButton(self.frame)
        self.pushButton.setGeometry(QtCore.QRect(1220, 717, 21, 21))
        self.pushButton.setStyleSheet("")
        self.pushButton.setText("")
        icon = QtGui.QIcon()
        icon.addPixmap(QtGui.QPixmap("C:/Users/Administrator/Anaconda3/envs/pytorch1.6/yolov5/img/mm.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
        self.pushButton.setIcon(icon)
        self.pushButton.setIconSize(QtCore.QSize(18, 18))
        self.pushButton.setObjectName("pushButton")
        self.train = QtWidgets.QPushButton(self.frame)
        self.train.setGeometry(QtCore.QRect(1190, 717, 21, 21))
        self.train.setStyleSheet("")
        self.train.setText("")
        icon1 = QtGui.QIcon()
        icon1.addPixmap(QtGui.QPixmap("C:/Users/Administrator/Anaconda3/envs/pytorch1.6/yolov5/img/ppn.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
        self.train.setIcon(icon1)
        self.train.setIconSize(QtCore.QSize(20, 20))
        self.train.setObjectName("train")
        self.pushButton_3 = QtWidgets.QPushButton(self.frame)
        self.pushButton_3.setGeometry(QtCore.QRect(1160, 717, 21, 21))
        self.pushButton_3.setStyleSheet("")
        self.pushButton_3.setText("")
        icon2 = QtGui.QIcon()
        icon2.addPixmap(QtGui.QPixmap("C:/Users/Administrator/Anaconda3/envs/pytorch1.6/yolov5/img/min.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
        self.pushButton_3.setIcon(icon2)
        self.pushButton_3.setIconSize(QtCore.QSize(18, 18))
        self.pushButton_3.setObjectName("pushButton_3")
        self.label_2 = QtWidgets.QLabel(self.frame)
        self.label_2.setGeometry(QtCore.QRect(430, 720, 511, 411))
        self.label_2.setObjectName("label_2")
        self.btn_opencam_img = QtWidgets.QPushButton(self.centralwidget)
        self.btn_opencam_img.setGeometry(QtCore.QRect(100, 760, 131, 61))
        self.btn_opencam_img.setStyleSheet("color: rgb(255, 255, 255);\n"
"\n"
"font: 75 9pt \"Aharoni\";\n"
"color: rgb(0, 0, 0);\n"
"font: 75 9pt \"微软雅黑\";")
        self.btn_opencam_img.setObjectName("btn_opencam_img")
        self.btn_opencam_video = QtWidgets.QPushButton(self.centralwidget)
        self.btn_opencam_video.setGeometry(QtCore.QRect(310, 760, 131, 61))
        self.btn_opencam_video.setStyleSheet("color: rgb(255, 255, 255);\n"
"font: 75 9pt \"微软雅黑\";\n"
"color: rgb(2, 2, 2);")
        self.btn_opencam_video.setObjectName("btn_opencam_video")
        self.btn_opencam = QtWidgets.QPushButton(self.centralwidget)
        self.btn_opencam.setGeometry(QtCore.QRect(540, 760, 131, 61))
        self.btn_opencam.setStyleSheet("color: rgb(255, 255, 255);\n"
"font: 75 9pt \"微软雅黑\";\n"
"color: rgb(0, 0, 0);")
        self.btn_opencam.setObjectName("btn_opencam")
        self.frame_2 = QtWidgets.QFrame(self.centralwidget)
        self.frame_2.setGeometry(QtCore.QRect(9, 750, 1251, 81))
        self.frame_2.setFrameShape(QtWidgets.QFrame.StyledPanel)
        self.frame_2.setFrameShadow(QtWidgets.QFrame.Sunken)
        self.frame_2.setObjectName("frame_2")
        self.btn_opencam_2 = QtWidgets.QPushButton(self.frame_2)
        self.btn_opencam_2.setGeometry(QtCore.QRect(760, 10, 131, 61))
        self.btn_opencam_2.setStyleSheet("color: rgb(255, 255, 255);\n"
"font: 75 9pt \"微软雅黑\";\n"
"color: rgb(0, 0, 0);")
        self.btn_opencam_2.setObjectName("btn_opencam_2")
        self.iccon4 = QtWidgets.QLabel(self.frame_2)
        self.iccon4.setGeometry(QtCore.QRect(850, 40, 41, 31))
        self.iccon4.setStyleSheet("\n"
"image: url(C:/Users/Administrator/Anaconda3/envs/pytorch1.6/yolov5/img/0.png);")
        self.iccon4.setText("")
        self.iccon4.setObjectName("iccon4")
        self.btn_opencam_3 = QtWidgets.QPushButton(self.frame_2)
        self.btn_opencam_3.setGeometry(QtCore.QRect(990, 10, 141, 61))
        self.btn_opencam_3.setStyleSheet("color: rgb(255, 255, 255);\n"
"background-color: rgb(127, 127, 127);\n"
"font: 75 14pt \"Aharoni\";\n"
"color: rgb(255, 255, 255);")
        self.btn_opencam_3.setObjectName("btn_opencam_3")
        self.iccon5 = QtWidgets.QLabel(self.frame_2)
        self.iccon5.setGeometry(QtCore.QRect(1090, 40, 41, 31))
       # self.iccon5.setStyleSheet("\n"
#"image: url(C:/Users/Administrator/Anaconda3/envs/pytorch1.6/yolov5/img/0.png);")
        self.iccon5.setText("")
        self.iccon5.setObjectName("iccon5")
        self.down = QtWidgets.QLabel(self.frame_2)
        self.down.setGeometry(QtCore.QRect(1150, -10, 101, 101))
        self.down.setStyleSheet("font: 75 18pt \"Aharoni\";\n"
"border-image: url(C:/Users/Administrator/Anaconda3/envs/pytorch1.6/yolov5/img/down.png);")
        self.down.setText("")
        self.down.setObjectName("down")
        self.iccon = QtWidgets.QLabel(self.centralwidget)
        self.iccon.setGeometry(QtCore.QRect(200, 790, 41, 31))
        self.iccon.setStyleSheet("\n"
"image: url(C:/Users/Administrator/Anaconda3/envs/pytorch1.6/yolov5/img/0.png);")
        self.iccon.setText("")
        self.iccon.setObjectName("iccon")
        self.iccon2 = QtWidgets.QLabel(self.centralwidget)
        self.iccon2.setGeometry(QtCore.QRect(400, 790, 41, 31))
        self.iccon2.setStyleSheet("\n"
"image: url(C:/Users/Administrator/Anaconda3/envs/pytorch1.6/yolov5/img/0.png);")
        self.iccon2.setText("")
        self.iccon2.setObjectName("iccon2")
        self.iccon3 = QtWidgets.QLabel(self.centralwidget)
        self.iccon3.setGeometry(QtCore.QRect(630, 790, 41, 31))
        self.iccon3.setStyleSheet("\n"
"image: url(C:/Users/Administrator/Anaconda3/envs/pytorch1.6/yolov5/img/0.png);")
        self.iccon3.setText("")
        self.iccon3.setObjectName("iccon3")
        self.frame_2.raise_()
        self.frame.raise_()
        self.btn_opencam_img.raise_()
        self.btn_opencam_video.raise_()
        self.btn_opencam.raise_()
        self.iccon.raise_()
        self.iccon2.raise_()
        self.iccon3.raise_()
        MainWindow.setCentralWidget(self.centralwidget)
        self.menubar = QtWidgets.QMenuBar(MainWindow)
        self.menubar.setGeometry(QtCore.QRect(0, 0, 1270, 23))
        self.menubar.setObjectName("menubar")
        MainWindow.setMenuBar(self.menubar)
        self.statusbar = QtWidgets.QStatusBar(MainWindow)
        self.statusbar.setObjectName("statusbar")
        MainWindow.setStatusBar(self.statusbar)

        self.retranslateUi(MainWindow)
        QtCore.QMetaObject.connectSlotsByName(MainWindow)

    def retranslateUi(self, MainWindow):
        _translate = QtCore.QCoreApplication.translate
        MainWindow.setWindowTitle(_translate("MainWindow", "实习工厂_SC"))
        self.label.setText(_translate("MainWindow", "显示区"))
        self.label_2.setText(_translate("MainWindow", "显示区"))
        self.btn_opencam_img.setText(_translate("MainWindow", "Images\n"
"打开图档"))
        self.btn_opencam_video.setText(_translate("MainWindow", "Video\n"
"视频选择"))
        self.btn_opencam.setText(_translate("MainWindow", "Camear\n"
"USB摄像头"))
        self.btn_opencam_2.setText(_translate("MainWindow", "Rtsp\n"
"网络摄像头"))
        self.btn_opencam_3.setText(_translate("MainWindow", "Action\n"
"超级播放器"))



 # 功能实现
   def init_slots(self):
        self.btn_opencam_img.clicked.connect(self.button_image_open)
        self.btn_opencam_video.clicked.connect(self.button_video_open)
        self.timer_video.timeout.connect(self.show_video_frame)
        self.btn_opencam.clicked.connect(self.button_opencameras_click_0)
        self.btn_opencam_2.clicked.connect(self.button_opencameras_click)
        self.timer_video_1.timeout.connect(self.show_video_frame_1)
        self.train.clicked.connect(self.Openvideo_click)
#########---算法描框---
    def show_video_frame(self):
        name_list = []

        flag, img = self.cap.read()
        if img is not None:
            showimg = img
            with torch.no_grad():
                img = letterbox(img, new_shape=self.opt.img_size)[0]
                # Convert
                img = img[:, :, ::-1].transpose(2, 0, 1)  # BGR to RGB, to 3x416x416
                img = np.ascontiguousarray(img)
                img = torch.from_numpy(img).to(self.device)
                img = img.half() if self.half else img.float()  # uint8 to fp16/32
                img /= 255.0  # 0 - 255 to 0.0 - 1.0
                if img.ndimension() == 3:
                    img = img.unsqueeze(0)
                # Inference
                pred = self.model(img, augment=self.opt.augment)[0]

                # Apply NMS
                pred = non_max_suppression(pred, self.opt.conf_thres, self.opt.iou_thres, classes=self.opt.classes,
                                           agnostic=self.opt.agnostic_nms)
                # Process detections
                for i, det in enumerate(pred):  # detections per image
                    if det is not None and len(det):
                        # Rescale boxes from img_size to im0 size
                        det[:, :4] = scale_coords(img.shape[2:], det[:, :4], showimg.shape).round()
                        # Write results
                        for *xyxy, conf, cls in reversed(det):
                            label = '%s %.2f' % (self.names[int(cls)], conf)
                            name_list.append(self.names[int(cls)])
                            print(label)
                            plot_one_box(xyxy, showimg, label=label, color=self.colors[int(cls)], line_thickness=2)

            show = cv2.resize(showimg, (1280, 720))
            self.result = cv2.cvtColor(show, cv2.COLOR_BGR2RGB)
            showImage = QtGui.QImage(self.result.data, self.result.shape[1], self.result.shape[0],
                                     QtGui.QImage.Format_RGB888)
            self.label.setPixmap(QtGui.QPixmap.fromImage(showImage))

        else:
            self.timer_video.stop()
            self.cap.release()
            self.label.clear()
          #  self.pushButton_video.setDisabled(False)
           # self.pushButton_img.setDisabled(False)
            self.init_logo()


    def show_video_frame_1(self):
        name_list = []

        flag, img = self.cap.read()
        if img is not None:
            showimg = img
            with torch.no_grad():
                img = letterbox(img, new_shape=self.opt.img_size)[0]
                # Convert
                img = img[:, :, ::-1].transpose(2, 0, 1)  # BGR to RGB, to 3x416x416
                img = np.ascontiguousarray(img)
                img = torch.from_numpy(img).to(self.device)
                img = img.half() if self.half else img.float()  # uint8 to fp16/32
                img /= 255.0  # 0 - 255 to 0.0 - 1.0
                if img.ndimension() == 3:
                    img = img.unsqueeze(0)
                # Inference
                pred = self.model(img, augment=self.opt.augment)[0]

                # Apply NMS
                pred = non_max_suppression(pred, self.opt.conf_thres, self.opt.iou_thres, classes=self.opt.classes,
                                           agnostic=self.opt.agnostic_nms)
                # Process detections
                for i, det in enumerate(pred):  # detections per image
                    if det is not None and len(det):
                        # Rescale boxes from img_size to im0 size
                        det[:, :4] = scale_coords(img.shape[2:], det[:, :4], showimg.shape).round()
                        # Write results
                        for *xyxy, conf, cls in reversed(det):
                            label = '%s %.2f' % (self.names[int(cls)], conf)
                            name_list.append(self.names[int(cls)])
                            print(label)
                            self.reclabel =  name_list
                            plot_one_box(xyxy, showimg, label=label, color=self.colors[int(cls)], line_thickness=2)

            show = cv2.resize(showimg, (1280, 720))
            self.result = cv2.cvtColor(show, cv2.COLOR_BGR2RGB)
            showImage = QtGui.QImage(self.result.data, self.result.shape[1], self.result.shape[0],
                                     QtGui.QImage.Format_RGB888)
            self.label.setPixmap(QtGui.QPixmap.fromImage(showImage))

        else:
            self.timer_video_1.stop()
            self.cap.release()
            self.label.clear()
          #  self.pushButton_video.setDisabled(False)
           # self.pushButton_img.setDisabled(False)
            self.init_logo()








 #######---图档事件---
    def button_image_open(self):
        print('button_image_open')
        name_list = []

        img_name, _ = QtWidgets.QFileDialog.getOpenFileName(self, "打开图片", "", "*.jpg;;*.png;;All Files(*)")
        flag = self.cap.open(img_name)
        if flag == False:
            QtWidgets.QMessageBox.warning(self, u"Warning", u"打开图片失败", buttons=QtWidgets.QMessageBox.Ok,
                                          defaultButton=QtWidgets.QMessageBox.Ok)

        else:
            img = cv2.imread(img_name)
            print(img_name)
            showimg = img
            with torch.no_grad():
                img = letterbox(img, new_shape=self.opt.img_size)[0]
                # Convert
                img = img[:, :, ::-1].transpose(2, 0, 1)  # BGR to RGB, to 3x416x416
                img = np.ascontiguousarray(img)
                img = torch.from_numpy(img).to(self.device)
                img = img.half() if self.half else img.float()  # uint8 to fp16/32
                img /= 255.0  # 0 - 255 to 0.0 - 1.0
                if img.ndimension() == 3:
                    img = img.unsqueeze(0)
                # Inference
                pred = self.model(img, augment=self.opt.augment)[0]
                # Apply NMS
                pred = non_max_suppression(pred, self.opt.conf_thres, self.opt.iou_thres, classes=self.opt.classes,
                                           agnostic=self.opt.agnostic_nms)
                print(pred)
                # Process detections
                for i, det in enumerate(pred):
                    if det is not None and len(det):
                        # Rescale boxes from img_size to im0 size
                        det[:, :4] = scale_coords(img.shape[2:], det[:, :4], showimg.shape).round()

                        for *xyxy, conf, cls in reversed(det):
                            label = '%s %.2f' % (self.names[int(cls)], conf)
                            name_list.append(self.names[int(cls)])
                            plot_one_box(xyxy, showimg, label=label, color=self.colors[int(cls)], line_thickness=2)

            self.result = cv2.cvtColor(showimg, cv2.COLOR_BGR2BGRA)
            self.result = cv2.resize(self.result, (1280, 720), interpolation=cv2.INTER_AREA)
            self.QtImg = QtGui.QImage(self.result.data, self.result.shape[1], self.result.shape[0], QtGui.QImage.Format_RGB32)
            self.label.setPixmap(QtGui.QPixmap.fromImage(self.QtImg))

###---视频事件---
    def button_video_open(self):
        global flag
        if flag == False:
            flag = True
            video_name, _ = QtWidgets.QFileDialog.getOpenFileName(self, "打开视频", "", "*.mp4;;*.avi;;All Files(*)")
            flag = self.cap.open(video_name)
            if flag == False:
                QtWidgets.QMessageBox.warning(self, u"Warning", u"打开视频失败", buttons=QtWidgets.QMessageBox.Ok,
                                              defaultButton=QtWidgets.QMessageBox.Ok)
            else:
                self.timer_video.start(30)
                # 空间按钮 Enable
                self.btn_opencam_video.setText(u'关闭识别')


        else:
            flag = False
            self.timer_video.stop()
            self.cap.release()
            self.label.clear()
            self.btn_opencam_video.setText(u'打开视频')
            # self.pushButton_2.setDisabled(False)
            # self.pushButton.setDisabled(False)

###---Webcam---
    def button_opencameras_click(self):
        global flag2
        self.timer_video_1.stop()
        self.cap.release()
        if flag2 == False:
            flag2=True
            # 默认是打开usb本地摄像头,如果想打开rtsp码流摄像头修改self.CAM_NUM为你的rtsp地址,例如:"rtsp://admin:test123456@192.168.10.65:554/MPEG-4/ch1/main/av_stream";
            # flag = self.cap.open("rtsp://192.168.1.56:554")
            flag = self.cap.open("rtsp://admin:yian1234@192.168.1.56:554/h264/1/main")
            if flag == False:
                msg = QtWidgets.QMessageBox.warning(self, u"Warning", u"请检测相机与电脑是否连接正确",
                                                    buttons=QtWidgets.QMessageBox.Ok,
                                                    defaultButton=QtWidgets.QMessageBox.Ok)
                self.btn_opencam.setDisabled(True)
                self.btn_opencam_2.setText(u'重新启动')
            else:
                self.timer_video_1.start(0)
                self.btn_opencam_2.setText(u'关闭识别')
                self.btn_opencam.setDisabled(True)
        else:
            flag2=False
            self.timer_video_1.stop()
            self.cap.release()
            self.label.clear()
            self.btn_opencam_2.setText(u'Open Webcam')
            self.btn_opencam.setDisabled(False)

###---USB Camear
    def button_opencameras_click_0(self):
        global flag2
        self.timer_video_1.stop()
        self.cap.release()
        if flag2 == False:
            flag2=True
            # 默认是打开usb本地摄像头,如果想打开rtsp码流摄像头修改self.CAM_NUM为你的rtsp地址,例如:"rtsp://admin:test123456@192.168.10.65:554/MPEG-4/ch1/main/av_stream";
            # flag = self.cap.open("rtsp://192.168.1.56:554")
            flag = self.cap.open(0)
            if flag == False:
                msg = QtWidgets.QMessageBox.warning(self, u"Warning", u"请检测相机与电脑是否连接正确",
                                                    buttons=QtWidgets.QMessageBox.Ok,
                                                    defaultButton=QtWidgets.QMessageBox.Ok)
                self.btn_opencam_2.setDisabled(True)
                self.btn_opencam.setText(u'重新启动')
            else:
                self.timer_video_1.start(0)
                self.btn_opencam.setText(u'关闭识别')
                self.btn_opencam_2.setDisabled(True)
        else:
            flag2=False
            self.timer_video_1.stop()
            self.cap.release()
            self.label.clear()
            self.btn_opencam.setText(u'Open Camera')
            self.btn_opencam_2.setDisabled(False)


###---定格
    def Openvideo_click(self):
        prev_time = time.time()
        result = cv2.cvtColor(self.result, cv2.COLOR_BGR2RGB)
        cv2.imwrite('save/' + str(prev_time) + '.jpg', result)
        print(self.reclabel)
        fw = open('save/' + str(prev_time) + '.txt', 'a')
        for i in range(len(self.reclabel)):
            fw.write(self.reclabel[i] + '\n')
        fw.close()







###---大屏显示
class B(QtWidgets.QMainWindow):
    def __init__(self, parent=None):
        super(B, self).__init__(parent)
        self.setupUi(self)
        self.timer_video = QtCore.QTimer()

        self.init_slots()
        self.cap = cv2.VideoCapture()


        ####---模型初始化---
        parser = argparse.ArgumentParser()
        parser.add_argument('--weights', nargs='+', type=str, default='weights/yolov5s.pt', help='model.pt path(s)')
        parser.add_argument('--source', type=str, default='data/images', help='source')  # file/folder, 0 for webcam
        parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
        parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')
        parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
        parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
        parser.add_argument('--view-img', action='store_true', help='display results')
        parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
        parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
        parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
        parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
        parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
        parser.add_argument('--augment', action='store_true', help='augmented inference')
        parser.add_argument('--update', action='store_true', help='update all models')
        parser.add_argument('--project', default='runs/detect', help='save results to project/name')
        parser.add_argument('--name', default='exp', help='save results to project/name')
        parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
        self.opt = parser.parse_args()
        print(self.opt)

        source, weights, view_img, save_txt, imgsz = self.opt.source, self.opt.weights, self.opt.view_img, self.opt.save_txt, self.opt.img_size

        self.device = select_device(self.opt.device)
        self.half = self.device.type != 'cpu'  # half precision only supported on CUDA

        cudnn.benchmark = True

        # Load model
        self.model = attempt_load(weights, map_location=self.device)  # load FP32 model
        stride = int(self.model.stride.max())  # model stride
        self.imgsz = check_img_size(imgsz, s=stride)  # check img_size
        if self.half:
            self.model.half()  # to FP16

        # Get names and colors
        self.names = self.model.module.names if hasattr(self.model, 'module') else self.model.names
        self.colors = [[random.randint(0, 255) for _ in range(3)] for _ in self.names]



    def setupUi(self, MainWindow):
        MainWindow.setObjectName("MainWindow")
        MainWindow.resize(1920, 1080)
        self.centralwidget = QtWidgets.QWidget(MainWindow)
        self.centralwidget.setObjectName("centralwidget")
        self.label = QtWidgets.QLabel(self.centralwidget)
        self.label.setGeometry(QtCore.QRect(9, 9, 1920, 1080))
        self.label.setObjectName("label")
        self.pushButton = QtWidgets.QPushButton(self.centralwidget)
        self.pushButton.setGeometry(QtCore.QRect(1800, 910, 100, 50))
        self.pushButton.setObjectName("pushButton")
        MainWindow.setCentralWidget(self.centralwidget)
        self.menubar = QtWidgets.QMenuBar(MainWindow)
        self.menubar.setGeometry(QtCore.QRect(0, 0, 1491, 23))
        self.menubar.setObjectName("menubar")
        MainWindow.setMenuBar(self.menubar)
        self.statusbar = QtWidgets.QStatusBar(MainWindow)
        self.statusbar.setObjectName("statusbar")
        MainWindow.setStatusBar(self.statusbar)

        self.retranslateUi(MainWindow)
        QtCore.QMetaObject.connectSlotsByName(MainWindow)

    def retranslateUi(self, MainWindow):
        _translate = QtCore.QCoreApplication.translate
        MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
        self.label.setText(_translate("MainWindow", "TextLabel"))
        self.pushButton.setText(_translate("MainWindow", "点击3秒触发"))

    def init_slots(self):

        self.timer_video.timeout.connect(self.show_video_frame)
        self.pushButton.clicked.connect(self.button_opencameras_click)




    def show_video_frame(self):
        name_list = []

        flag, img = self.cap.read()
        if img is not None:
            showimg = img
            with torch.no_grad():
                img = letterbox(img, new_shape=self.opt.img_size)[0]
                # Convert
                img = img[:, :, ::-1].transpose(2, 0, 1)  # BGR to RGB, to 3x416x416
                img = np.ascontiguousarray(img)
                img = torch.from_numpy(img).to(self.device)
                img = img.half() if self.half else img.float()  # uint8 to fp16/32
                img /= 255.0  # 0 - 255 to 0.0 - 1.0
                if img.ndimension() == 3:
                    img = img.unsqueeze(0)
                # Inference
                pred = self.model(img, augment=self.opt.augment)[0]

                # Apply NMS
                pred = non_max_suppression(pred, self.opt.conf_thres, self.opt.iou_thres, classes=self.opt.classes,
                                           agnostic=self.opt.agnostic_nms)
                # Process detections
                for i, det in enumerate(pred):  # detections per image
                    if det is not None and len(det):
                        # Rescale boxes from img_size to im0 size
                        det[:, :4] = scale_coords(img.shape[2:], det[:, :4], showimg.shape).round()
                        # Write results
                        for *xyxy, conf, cls in reversed(det):
                            label = '%s %.2f' % (self.names[int(cls)], conf)
                            name_list.append(self.names[int(cls)])
                            print(label)
                            plot_one_box(xyxy, showimg, label=label, color=self.colors[int(cls)], line_thickness=2)

            show = cv2.resize(showimg, (1920, 1080))
            self.result = cv2.cvtColor(show, cv2.COLOR_BGR2RGB)
            showImage = QtGui.QImage(self.result.data, self.result.shape[1], self.result.shape[0],
                                     QtGui.QImage.Format_RGB888)
            self.label.setPixmap(QtGui.QPixmap.fromImage(showImage))

        else:
            self.timer_video.stop()
            self.cap.release()
            self.label.clear()
          #  self.pushButton_video.setDisabled(False)
           # self.pushButton_img.setDisabled(False)
            self.init_logo()


    def button_opencameras_click(self):
        global flag2
        self.timer_video.stop()
        self.cap.release()
        if flag2 == False:
            flag2 = True
            # 默认是打开usb本地摄像头,如果想打开rtsp码流摄像头修改self.CAM_NUM为你的rtsp地址,例如:"rtsp://admin:test123456@192.168.10.65:554/MPEG-4/ch1/main/av_stream";
            # flag = self.cap.open("rtsp://192.168.1.56:554")
            flag = self.cap.open("rtsp://admin:yian1234@192.168.1.56:554/h264/1/main")
            if flag == False:
                msg = QtWidgets.QMessageBox.warning(self, u"Warning", u"请检测相机与电脑是否连接正确",
                                                    buttons=QtWidgets.QMessageBox.Ok,
                                                    defaultButton=QtWidgets.QMessageBox.Ok)
                self.pushButton.setText(u'重新启动')
            else:
                self.timer_video.start(0)
                self.pushButton.setText(u'暂停')
        else:
            flag2 = False
            self.timer_video.stop()
            self.cap.release()
            #self.label.clear()
            self.pushButton.setText(u'Open Webcam')








#滚动窗入口
class ChildWindow(QMainWindow):
    def __init__(self):
        super().__init__()

        # 一张大图
        self.label0 = QLabel(self)
        # 五张小图
        self.label1 = QLabel(self)
        self.label2 = QLabel(self)
        self.label3 = QLabel(self)
        self.label4 = QLabel(self)
        self.label5 = QLabel(self)

        self.initUI(self)

    def initUI(self,MainWindow):
        MainWindow.resize(1269, 888)
        MainWindow.setStyleSheet("background-image: url(C:/Users/Administrator/Anaconda3/envs/pytorch1.6/yolov5/img/pyqt5 backup.png);")
       # self.setGeometry(1500, 100, 1060, 900)
        self.setWindowTitle('AUO-滚动图系列')

        th = threading.Thread(target=self.show_images)
        th.start()

    # 显示图片
    def show_images(self):
        while True:
            path = './images'
            filenames = os.listdir(path)
            filenames = [os.path.join(path, filename) for filename in filenames]
            filenames.sort(key=lambda fp: os.path.getctime(fp), reverse=True)
            filenames_5 = filenames[:5]

            src0 = cv2.imread(filenames_5[0])
            src0 = cv2.resize(src0, (960, 600))
            src0 = cv2.cvtColor(src0, cv2.COLOR_BGR2RGB)
            img = QtGui.QImage(src0.data, src0.shape[1], src0.shape[0], QtGui.QImage.Format_RGB888)
            self.label0.setGeometry(QtCore.QRect(50, 50, 960, 600))
            self.label0.setPixmap(QtGui.QPixmap(img))

            src1 = cv2.imread(filenames_5[0])
            src1 = cv2.resize(src1, (160, 120))
            src1 = cv2.cvtColor(src1, cv2.COLOR_BGR2RGB)
            img = QtGui.QImage(src1.data, src1.shape[1], src1.shape[0], QtGui.QImage.Format_RGB888)
            self.label1.setGeometry(QtCore.QRect(50, 720, 160, 120))
            self.label1.setPixmap(QtGui.QPixmap(img))

            src2 = cv2.imread(filenames_5[1])
            src2 = cv2.resize(src2, (160, 120))
            src2 = cv2.cvtColor(src2, cv2.COLOR_BGR2RGB)
            img = QtGui.QImage(src2.data, src2.shape[1], src2.shape[0], QtGui.QImage.Format_RGB888)
            self.label2.setGeometry(QtCore.QRect(250, 720, 160, 120))
            self.label2.setPixmap(QtGui.QPixmap(img))

            src3 = cv2.imread(filenames_5[2])
            src3 = cv2.resize(src3, (200, 120))
            src3 = cv2.cvtColor(src3, cv2.COLOR_BGR2RGB)
            img = QtGui.QImage(src3.data, src3.shape[1], src3.shape[0], QtGui.QImage.Format_RGB888)
            self.label3.setGeometry(QtCore.QRect(450, 720, 160, 120))
            self.label3.setPixmap(QtGui.QPixmap(img))

            src4 = cv2.imread(filenames_5[3])
            src4 = cv2.resize(src4, (200, 120))
            src4 = cv2.cvtColor(src4, cv2.COLOR_BGR2RGB)
            img = QtGui.QImage(src4.data, src4.shape[1], src4.shape[0], QtGui.QImage.Format_RGB888)
            self.label4.setGeometry(QtCore.QRect(650, 720, 160, 120))
            self.label4.setPixmap(QtGui.QPixmap(img))

            src5 = cv2.imread(filenames_5[4])
            src5 = cv2.resize(src5, (200, 120))
            src5 = cv2.cvtColor(src5, cv2.COLOR_BGR2RGB)
            img = QtGui.QImage(src5.data, src5.shape[1], src5.shape[0], QtGui.QImage.Format_RGB888)
            self.label5.setGeometry(QtCore.QRect(850, 720, 160, 120))
            self.label5.setPixmap(QtGui.QPixmap(img))

            time.sleep(1)












if __name__ == '__main__':
    app = QtWidgets.QApplication(sys.argv)
    ui = Ui_MainWindow()
    b = B()
    c = ChildWindow()
    ui.show()
    ui.pushButton.clicked.connect(b.show)
    ui.btn_opencam_3.clicked.connect(c.show)
    sys.exit(app.exec_())





# 总结
刚入门。很不错的学习例子。做个记录。第一个UI完成  噢耶~~~~  







  • 1
    点赞
  • 41
    收藏
    觉得还不错? 一键收藏
  • 5
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 5
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值