代码95:UI_interface4.py *********
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'UI_interface4.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
import sys
import cv2
import argparse
import random
import torch
import numpy as np
import torch.backends.cudnn as cudnn
from utils.torch_utils import select_device
from models.experimental import attempt_load
from utils.general import check_img_size, non_max_suppression, scale_coords
from utils.datasets import letterbox
from utils.plots import plot_one_box
from openpyxl import load_workbook
import xlrd
import xlwt
class Ui_MainWindow(QtWidgets.QMainWindow):
def __init__(self, parent=None):
super(Ui_MainWindow, self).__init__(parent)
self.timer_video = QtCore.QTimer()
self.setupUi(self)
self.init_logo()
self.init_slots()
self.cap = cv2.VideoCapture()
self.out = None
# self.out = cv2.VideoWriter('prediction.avi', cv2.VideoWriter_fourcc(*'XVID'), 20.0, (640, 480))
parser = argparse.ArgumentParser()
parser.add_argument('--weights', nargs='+', type=str, default='E:/555/555/yolov5-master/runs/train/exp29/weights/best.pt', help='model.pt path(s)')
parser.add_argument('--source', type=str, default='0', help='source') # file/folder, 0 for webcam
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--view-img', action='store_true', help='display results')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--update', action='store_true', help='update all models')
parser.add_argument('--project', default='runs/detect', help='save results to project/name')
parser.add_argument('--name', default='exp', help='save results to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
self.opt = parser.parse_args()
print(self.opt)
source, weights, view_img, save_txt, imgsz = self.opt.source, self.opt.weights, self.opt.view_img, self.opt.save_txt, self.opt.img_size
self.device = select_device(self.opt.device)
self.half = self.device.type != 'cpu' # half precision only supported on CUDA
cudnn.benchmark = True
# Load model
self.model = attempt_load(weights, map_location=self.device) # load FP32 model
stride = int(self.model.stride.max()) # model stride
self.imgsz = check_img_size(imgsz, s=stride) # check img_size
if self.half:
self.model.half() # to FP16
# Get names and colors
self.names = self.model.module.names if hasattr(self.model, 'module') else self.model.names
self.colors = [[random.randint(0, 255) for _ in range(3)] for _ in self.names]
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1936, 1036)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.layoutWidget = QtWidgets.QWidget(self.centralwidget)
self.layoutWidget.setGeometry(QtCore.QRect(30, 40, 152, 822))
self.layoutWidget.setObjectName("layoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setSpacing(80)
self.verticalLayout.setObjectName("verticalLayout")
self.pushButton_img = QtWidgets.QPushButton(self.layoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_img.sizePolicy().hasHeightForWidth())
self.pushButton_img.setSizePolicy(sizePolicy)
self.pushButton_img.setMinimumSize(QtCore.QSize(150, 100))
self.pushButton_img.setMaximumSize(QtCore.QSize(150, 100))
font = QtGui.QFont()
font.setFamily("Agency FB")
font.setPointSize(12)
self.pushButton_img.setFont(font)
self.pushButton_img.setObjectName("pushButton_img")
self.verticalLayout.addWidget(self.pushButton_img)
self.pushButton_camera = QtWidgets.QPushButton(self.layoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_camera.sizePolicy().hasHeightForWidth())
self.pushButton_camera.setSizePolicy(sizePolicy)
self.pushButton_camera.setMinimumSize(QtCore.QSize(150, 100))
self.pushButton_camera.setMaximumSize(QtCore.QSize(150, 100))
font = QtGui.QFont()
font.setFamily("Agency FB")
font.setPointSize(12)
self.pushButton_camera.setFont(font)
self.pushButton_camera.setObjectName("pushButton_camera")
self.verticalLayout.addWidget(self.pushButton_camera, 0, QtCore.Qt.AlignHCenter)
self.pushButton_video = QtWidgets.QPushButton(self.layoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_video.sizePolicy().hasHeightForWidth())
self.pushButton_video.setSizePolicy(sizePolicy)
self.pushButton_video.setMinimumSize(QtCore.QSize(150, 100))
self.pushButton_video.setMaximumSize(QtCore.QSize(150, 100))
font = QtGui.QFont()
font.setFamily("Agency FB")
font.setPointSize(12)
self.pushButton_video.setFont(font)
self.pushButton_video.setObjectName("pushButton_video")
self.verticalLayout.addWidget(self.pushButton_video, 0, QtCore.Qt.AlignHCenter)
self.pushButton_Transfer_data = QtWidgets.QPushButton(self.layoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_Transfer_data.sizePolicy().hasHeightForWidth())
self.pushButton_Transfer_data.setSizePolicy(sizePolicy)
self.pushButton_Transfer_data.setMinimumSize(QtCore.QSize(150, 100))
self.pushButton_Transfer_data.setMaximumSize(QtCore.QSize(150, 100))
font = QtGui.QFont()
font.setFamily("Agency FB")
font.setPointSize(12)
self.pushButton_Transfer_data.setFont(font)
self.pushButton_Transfer_data.setObjectName("pushButton_Transfer_data")
self.verticalLayout.addWidget(self.pushButton_Transfer_data)
self.pushButton_Transfer_data_2 = QtWidgets.QPushButton(self.layoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_Transfer_data_2.sizePolicy().hasHeightForWidth())
self.pushButton_Transfer_data_2.setSizePolicy(sizePolicy)
self.pushButton_Transfer_data_2.setMinimumSize(QtCore.QSize(150, 100))
self.pushButton_Transfer_data_2.setMaximumSize(QtCore.QSize(150, 100))
font = QtGui.QFont()
font.setFamily("Agency FB")
font.setPointSize(12)
self.pushButton_Transfer_data_2.setFont(font)
self.pushButton_Transfer_data_2.setObjectName("pushButton_Transfer_data_2")
self.verticalLayout.addWidget(self.pushButton_Transfer_data_2)
self.verticalLayout.setStretch(2, 1)
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(280, 40, 791, 821))
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(1110, 40, 721, 821))
self.label_2.setLineWidth(2)
self.label_2.setObjectName("label_2")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1936, 26))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.pushButton_img.setText(_translate("MainWindow", "图片检测"))
self.pushButton_camera.setText(_translate("MainWindow", "摄像头检测"))
self.pushButton_video.setText(_translate("MainWindow", "视频检测"))
self.pushButton_Transfer_data.setText(_translate("MainWindow", "传递数据"))
self.pushButton_Transfer_data_2.setText(_translate("MainWindow", "帧检测"))
self.label.setText(_translate("MainWindow", "TextLabel"))
self.label_2.setText(_translate("MainWindow", "TextLabel"))
def init_slots(self):
self.pushButton_img.clicked.connect(self.button_image_open)
self.pushButton_video.clicked.connect(self.button_video_open)
self.pushButton_camera.clicked.connect(self.button_camera_open)
self.pushButton_Transfer_data.clicked.connect(self.pushButton_Transfer_data_open)
self.timer_video.timeout.connect(self.show_video_frame)
self.timer_video.timeout.connect(self.show_video_frame_2)
def init_logo(self):
pix = QtGui.QPixmap('wechat.jpg')
self.label.setScaledContents(True)
self.label_2.setScaledContents(True)
self.label.setPixmap(pix)
self.label_2.setPixmap(pix)
def button_image_open(self):
print('button_image_open')
name_list = []
img_name, _ = QtWidgets.QFileDialog.getOpenFileName(self, "Open image", "", "*.jpg;;*.png;;All Files(*)")
img = cv2.imread(img_name)
print(img_name)
showimg = img
with torch.no_grad():
img = letterbox(img, new_shape=self.opt.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
img = torch.from_numpy(img).to(self.device)
img = img.half() if self.half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
pred = self.model(img, augment=self.opt.augment)[0]
# Apply NMS
pred = non_max_suppression(pred, self.opt.conf_thres, self.opt.iou_thres, classes=self.opt.classes,
agnostic=self.opt.agnostic_nms)
print(pred)
# Process detections
for i, det in enumerate(pred):
if det is not None and len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], showimg.shape).round()
for *xyxy, conf, cls in reversed(det):
label = '%s %.2f' % (self.names[int(cls)], conf)
name_list.append(self.names[int(cls)])
plot_one_box(xyxy, showimg, label=label, color=self.colors[int(cls)], line_thickness=2)
cv2.imwrite('prediction.jpg', showimg)
self.result = cv2.cvtColor(showimg, cv2.COLOR_BGR2BGRA)
self.result = cv2.resize(self.result, (640, 480), interpolation=cv2.INTER_AREA)
self.QtImg = QtGui.QImage(self.result.data, self.result.shape[1], self.result.shape[0],
QtGui.QImage.Format_RGB32)
self.label.setPixmap(QtGui.QPixmap.fromImage(self.QtImg))
self.label.setStyleSheet("border: 2px solid red") # 添加,设置窗口边界颜色
self.label_2.setPixmap(QtGui.QPixmap.fromImage(self.QtImg)) # 添加
self.label_2.setStyleSheet("border: 2px solid blue") # 添加,设置窗口边界颜色
def button_video_open(self):
video_name, _ = QtWidgets.QFileDialog.getOpenFileName(self, "Open video", "", "*.mp4;;*.avi;;All Files(*)")
flag = self.cap.open(video_name)
if flag == False:
QtWidgets.QMessageBox.warning(self, u"Warning", u"Video opening failed",
buttons=QtWidgets.QMessageBox.Ok, defaultButton=QtWidgets.QMessageBox.Ok)
else:
self.out = cv2.VideoWriter('prediction.avi', cv2.VideoWriter_fourcc(*'MJPG'), 20,
(int(self.cap.get(3)), int(self.cap.get(4))))
self.timer_video.start(30)
self.pushButton_video.setDisabled(True)
self.pushButton_img.setDisabled(True)
self.pushButton_camera.setDisabled(True)
self.label.setStyleSheet("border: 2px solid red") # 添加,设置窗口边界颜色
# self.label_2.setPixmap(QtGui.QPixmap.fromImage(self.flag)) # 添加
self.label_2.setStyleSheet("border: 2px solid blue") # 添加,设置窗口边界颜色
QtWidgets.QMessageBox.information(self, u"Tips", u"Video detection in progress!",
buttons=QtWidgets.QMessageBox.Ok,
defaultButton=QtWidgets.QMessageBox.Ok)
def button_camera_open(self):
if not self.timer_video.isActive():
# 默认使用第一个本地camera
flag = self.cap.open(0)
if flag == False:
QtWidgets.QMessageBox.warning(self, u"Warning", u"Camera failed to open",
buttons=QtWidgets.QMessageBox.Ok,
defaultButton=QtWidgets.QMessageBox.Ok)
else:
self.out = cv2.VideoWriter('prediction.avi', cv2.VideoWriter_fourcc(*'MJPG'), 20,
(int(self.cap.get(3)), int(self.cap.get(4))))
self.timer_video.start(30)
self.pushButton_video.setDisabled(True)
self.pushButton_img.setDisabled(True)
self.pushButton_camera.setText(u"Turn off the camera")
else:
self.timer_video.stop()
self.cap.release()
self.out.release()
self.label.clear()
self.init_logo()
self.pushButton_video.setDisabled(False)
self.pushButton_img.setDisabled(False)
self.pushButton_camera.setText(u"Camera detection")
def pushButton_Transfer_data_open(self):
flile_name = "E:/YoloV5/yolov5-master/runs/img/out/excel/excel_xls.xls"
# 读取源excel
xlsx = xlrd.open_workbook(flile_name)
# xlsx = xlrd.open_workbook("readexcel.xlsx")
# 获取sheet个数
sheets = len(xlsx.sheets())
# 准备写入
new_workbook = xlwt.Workbook()
for sheet in range(sheets):
table = xlsx.sheet_by_index(sheet)
rows = table.nrows
cols = table.ncols
worksheet = new_workbook.add_sheet("sheet" + str(sheet))
for i in range(0, rows):
for j in range(0, cols):
# print(i,j,table.cell_value(i, j))
worksheet.write(i, j, table.cell_value(i, j))
new_workbook.save('E:/YoloV5/yolov5-master/runs/pass_xls/Data.xls')
QtWidgets.QMessageBox.information(self, u"Tips", u"Data transfer is successful!",
buttons=QtWidgets.QMessageBox.Ok,
defaultButton=QtWidgets.QMessageBox.Ok)
def show_video_frame(self): # 左侧显示窗口显示
name_list = []
flag, img = self.cap.read()
if img is not None:
showimg = img
with torch.no_grad():
img = letterbox(img, new_shape=self.opt.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
img = torch.from_numpy(img).to(self.device)
img = img.half() if self.half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
pred = self.model(img, augment=self.opt.augment)[0]
# Apply NMS
pred = non_max_suppression(pred, self.opt.conf_thres, self.opt.iou_thres, classes=self.opt.classes,
agnostic=self.opt.agnostic_nms)
# Process detections
for i, det in enumerate(pred): # detections per image
if det is not None and len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], showimg.shape).round()
# Write results
for *xyxy, conf, cls in reversed(det):
label = '%s %.2f' % (self.names[int(cls)], conf)
name_list.append(self.names[int(cls)])
print(label)
plot_one_box(xyxy, showimg, label=label, color=self.colors[int(cls)], line_thickness=2)
self.out.write(showimg)
show = cv2.resize(showimg, (640, 480))
self.result = cv2.cvtColor(show, cv2.COLOR_BGR2RGB)
showImage = QtGui.QImage(self.result.data, self.result.shape[1], self.result.shape[0],
QtGui.QImage.Format_RGB888)
self.label.setPixmap(QtGui.QPixmap.fromImage(showImage))
else:
self.timer_video.stop()
self.cap.release()
self.out.release()
self.label.clear()
self.pushButton_video.setDisabled(False)
self.pushButton_img.setDisabled(False)
self.pushButton_camera.setDisabled(False)
self.init_logo()
def show_video_frame_2(self): # 右侧显示窗口显示
name_list = []
flag, img = self.cap.read()
if img is not None:
showimg = img
with torch.no_grad():
img = letterbox(img, new_shape=self.opt.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
img = torch.from_numpy(img).to(self.device)
img = img.half() if self.half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
pred = self.model(img, augment=self.opt.augment)[0]
# Apply NMS
pred = non_max_suppression(pred, self.opt.conf_thres, self.opt.iou_thres, classes=self.opt.classes,
agnostic=self.opt.agnostic_nms)
# Process detections
for i, det in enumerate(pred): # detections per image
if det is not None and len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], showimg.shape).round()
# Write results
for *xyxy, conf, cls in reversed(det):
label_2 = '%s %.2f' % (self.names[int(cls)], conf)
name_list.append(self.names[int(cls)])
print(label_2)
plot_one_box(xyxy, showimg, label=label_2, color=self.colors[int(cls)],
line_thickness=2)
self.out.write(showimg)
show = cv2.resize(showimg, (640, 480))
self.result = cv2.cvtColor(show, cv2.COLOR_BGR2RGB)
showImage = QtGui.QImage(self.result.data, self.result.shape[1], self.result.shape[0],
QtGui.QImage.Format_RGB888)
self.label.setPixmap(QtGui.QPixmap.fromImage(showImage))
self.label_2.setPixmap(QtGui.QPixmap.fromImage(showImage)) # 添加,不可或缺,用来显示右侧标签的
else:
self.timer_video.stop()
self.cap.release()
self.out.release()
self.label.clear()
self.pushButton_video.setDisabled(False)
self.pushButton_img.setDisabled(False)
self.pushButton_camera.setDisabled(False)
self.init_logo()
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
ui = Ui_MainWindow()
ui.show()
sys.exit(app.exec_())
代码96:
import time
import cv2
import numpy as np
from PIL import Image
import detect
import os
import xlwt
capture = cv2.VideoCapture(0)#开启摄像头
# capture=cv2.VideoCapture("D:/1.mp4")
if capture.isOpened():
ref,frame = capture.read()
else:
ref = False
c = 2
b = 2
while True:
if(c % b ==0):
detect.run(source="0", name='../img/out/photo+str(c)'+'.jpg', save_excel =True)#调节yoloV5检测参数
else:
def getline(filepath, xlspath):
# 读取所有文本
file_names = os.listdir(filepath)
# 对os.listdir进行排序 指定参数加上 (key=lambda x:int(x[0]))
file_names.sort(key = lambda x:int(x[:-4]))
file_ob_list = []
try:
# 获取完整目录名并保存到数组
for file_name in file_names:
file_ob = filepath + "/" + file_name
file_ob_list.append(file_ob)
print(file_ob_list)
# 新建工作表格
xls = xlwt.Workbook()
# 循环读取文件,并写入到表格中
for file_ob in file_ob_list:
# 仅获取文件名,如果末尾为 '/' '\' ,返回空
sheet_name = os.path.basename(file_ob)
# 每一个文本都会新建一个相同文件名的 sheet
sheet = xls.add_sheet(sheet_name, cell_overwrite_ok=True)
# txt 写入 xls
f = open(file_ob)
x = 0
# 按行读取文本
while True:
line = f.readline()
if not line:
break
for i in range(len(line.split('\t'))):
data = line.split('\t')[i]
data = str(data) #将数据转化为字符串,再对其中的换行符进行处理
data = data.replace('\n', ' ') #使用python中字符串函数替换换行符为空格
sheet.write(x,i,data) # x,i,data 代表横、纵坐标和内容
x += 1
# 然后读取下一个文本
f.close()
xls.save(xlspath)
except:
raise
if __name__ == "__main__":
filepath = "E:/YoloV5/yolov5-master/runs/img/out/photo+str(c)2.jpg/labels" # 文件目录
xlspath = "E:/YoloV5/yolov5-master/runs/img/out/excel/excel_xls.xls" # xls 文件绝对路径
# 传入参数执行
getline(filepath, xlspath)
# 数据传递
flile_name = "E:/YoloV5/yolov5-master/runs/img/out/excel/excel_xls.xls" # 上面xls文件的绝对路径
# 读取源excel
xlsx = xlrd.open_workbook(flile_name)
# xlsx = xlrd.open_workbook("readexcel.xlsx")
# 获取sheet个数
sheets = len(xlsx.sheets())
# 准备写入
new_workbook = xlwt.Workbook()
for sheet in range(sheets):
table = xlsx.sheet_by_index(sheet)
rows = table.nrows
cols = table.ncols
worksheet = new_workbook.add_sheet("sheet" + str(sheet))
for i in range(0, rows):
for j in range(0, cols):
# print(i,j,table.cell_value(i, j))
worksheet.write(i, j, table.cell_value(i, j))
new_workbook.save('E:/YoloV5/yolov5-master/runs/pass_xls/Data.xls') # 传递至地址.xls数据
c += 1
cv2.imshow("video", capture)
k = cv2.waitKey(1)
# 按q退出
if k == ord('q'):
capture.release()
break
# 按ESC退出
k = cv2.waitKey(1)
if k == 27:
capture.release()
break
代码96:*******(2021年8月29日)UI(detect63.py)
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'UI_interface4.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
import sys
import cv2
import argparse
import random
import torch
import numpy as np
import torch.backends.cudnn as cudnn
from utils.torch_utils import select_device
from models.experimental import attempt_load
from utils.general import check_img_size, non_max_suppression, scale_coords
from utils.datasets import letterbox
from utils.plots import plot_one_box
from openpyxl import load_workbook
import xlrd
import xlwt
import time
import detect
import os
from PIL import Image
class Ui_MainWindow(QtWidgets.QMainWindow):
def __init__(self, parent=None):
super(Ui_MainWindow, self).__init__(parent)
self.timer_video = QtCore.QTimer()
self.setupUi(self)
self.init_logo()
self.init_slots()
self.cap = cv2.VideoCapture()
self.out = None
# self.out = cv2.VideoWriter('prediction.avi', cv2.VideoWriter_fourcc(*'XVID'), 20.0, (640, 480))
parser = argparse.ArgumentParser()
parser.add_argument('--weights', nargs='+', type=str, default='E:/555/555/yolov5-master/runs/train/exp29/weights/best.pt', help='model.pt path(s)')
parser.add_argument('--source', type=str, default='0', help='source') # file/folder, 0 for webcam
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--view-img', action='store_true', help='display results')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--update', action='store_true', help='update all models')
parser.add_argument('--project', default='runs/detect', help='save results to project/name')
parser.add_argument('--name', default='exp', help='save results to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
self.opt = parser.parse_args()
print(self.opt)
source, weights, view_img, save_txt, imgsz = self.opt.source, self.opt.weights, self.opt.view_img, self.opt.save_txt, self.opt.img_size
self.device = select_device(self.opt.device)
self.half = self.device.type != 'cpu' # half precision only supported on CUDA
cudnn.benchmark = True
# Load model
self.model = attempt_load(weights, map_location=self.device) # load FP32 model
stride = int(self.model.stride.max()) # model stride
self.imgsz = check_img_size(imgsz, s=stride) # check img_size
if self.half:
self.model.half() # to FP16
# Get names and colors
self.names = self.model.module.names if hasattr(self.model, 'module') else self.model.names
self.colors = [[random.randint(0, 255) for _ in range(3)] for _ in self.names]
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1936, 1036)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.layoutWidget = QtWidgets.QWidget(self.centralwidget)
self.layoutWidget.setGeometry(QtCore.QRect(30, 40, 152, 822))
self.layoutWidget.setObjectName("layoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setSpacing(80)
self.verticalLayout.setObjectName("verticalLayout")
self.pushButton_img = QtWidgets.QPushButton(self.layoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_img.sizePolicy().hasHeightForWidth())
self.pushButton_img.setSizePolicy(sizePolicy)
self.pushButton_img.setMinimumSize(QtCore.QSize(150, 100))
self.pushButton_img.setMaximumSize(QtCore.QSize(150, 100))
font = QtGui.QFont()
font.setFamily("Agency FB")
font.setPointSize(12)
self.pushButton_img.setFont(font)
self.pushButton_img.setObjectName("pushButton_img")
self.verticalLayout.addWidget(self.pushButton_img)
self.pushButton_camera = QtWidgets.QPushButton(self.layoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_camera.sizePolicy().hasHeightForWidth())
self.pushButton_camera.setSizePolicy(sizePolicy)
self.pushButton_camera.setMinimumSize(QtCore.QSize(150, 100))
self.pushButton_camera.setMaximumSize(QtCore.QSize(150, 100))
font = QtGui.QFont()
font.setFamily("Agency FB")
font.setPointSize(12)
self.pushButton_camera.setFont(font)
self.pushButton_camera.setObjectName("pushButton_camera")
self.verticalLayout.addWidget(self.pushButton_camera, 0, QtCore.Qt.AlignHCenter)
self.pushButton_video = QtWidgets.QPushButton(self.layoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_video.sizePolicy().hasHeightForWidth())
self.pushButton_video.setSizePolicy(sizePolicy)
self.pushButton_video.setMinimumSize(QtCore.QSize(150, 100))
self.pushButton_video.setMaximumSize(QtCore.QSize(150, 100))
font = QtGui.QFont()
font.setFamily("Agency FB")
font.setPointSize(12)
self.pushButton_video.setFont(font)
self.pushButton_video.setObjectName("pushButton_video")
self.verticalLayout.addWidget(self.pushButton_video, 0, QtCore.Qt.AlignHCenter)
self.pushButton_Transfer_data = QtWidgets.QPushButton(self.layoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_Transfer_data.sizePolicy().hasHeightForWidth())
self.pushButton_Transfer_data.setSizePolicy(sizePolicy)
self.pushButton_Transfer_data.setMinimumSize(QtCore.QSize(150, 100))
self.pushButton_Transfer_data.setMaximumSize(QtCore.QSize(150, 100))
font = QtGui.QFont()
font.setFamily("Agency FB")
font.setPointSize(12)
self.pushButton_Transfer_data.setFont(font)
self.pushButton_Transfer_data.setObjectName("pushButton_Transfer_data")
self.verticalLayout.addWidget(self.pushButton_Transfer_data)
self.pushButton_Transfer_data_2 = QtWidgets.QPushButton(self.layoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_Transfer_data_2.sizePolicy().hasHeightForWidth())
self.pushButton_Transfer_data_2.setSizePolicy(sizePolicy)
self.pushButton_Transfer_data_2.setMinimumSize(QtCore.QSize(150, 100))
self.pushButton_Transfer_data_2.setMaximumSize(QtCore.QSize(150, 100))
font = QtGui.QFont()
font.setFamily("Agency FB")
font.setPointSize(12)
self.pushButton_Transfer_data_2.setFont(font)
self.pushButton_Transfer_data_2.setObjectName("pushButton_Transfer_data_2")
self.verticalLayout.addWidget(self.pushButton_Transfer_data_2)
self.verticalLayout.setStretch(2, 1)
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(280, 40, 791, 821))
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(1110, 40, 721, 821))
self.label_2.setLineWidth(2)
self.label_2.setObjectName("label_2")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1936, 26))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.pushButton_img.setText(_translate("MainWindow", "Image detection"))
self.pushButton_camera.setText(_translate("MainWindow", "Real-time detection"))
self.pushButton_video.setText(_translate("MainWindow", "Video detection"))
self.pushButton_Transfer_data.setText(_translate("MainWindow", "Data transmission"))
self.pushButton_Transfer_data_2.setText(_translate("MainWindow", "Frame detection"))
self.label.setText(_translate("MainWindow", "TextLabel"))
self.label_2.setText(_translate("MainWindow", "TextLabel"))
def init_slots(self):
self.pushButton_img.clicked.connect(self.button_image_open)
self.pushButton_video.clicked.connect(self.button_video_open)
self.pushButton_camera.clicked.connect(self.button_camera_open)
self.pushButton_Transfer_data.clicked.connect(self.pushButton_Transfer_data_open)
self.pushButton_Transfer_data_2.clicked.connect(self.pushButton_Transfer_data_open_2)
self.timer_video.timeout.connect(self.show_video_frame)
self.timer_video.timeout.connect(self.show_video_frame_2)
def init_logo(self):
pix = QtGui.QPixmap('wechat.jpg')
self.label.setScaledContents(True)
self.label_2.setScaledContents(True)
self.label.setPixmap(pix)
self.label_2.setPixmap(pix)
def button_image_open(self):#图片按钮操作
print('button_image_open')
name_list = []
img_name, _ = QtWidgets.QFileDialog.getOpenFileName(self, "Open image", "", "*.jpg;;*.png;;All Files(*)")
img = cv2.imread(img_name)
print(img_name)
showimg = img
with torch.no_grad():
img = letterbox(img, new_shape=self.opt.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
img = torch.from_numpy(img).to(self.device)
img = img.half() if self.half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
pred = self.model(img, augment=self.opt.augment)[0]
# Apply NMS
pred = non_max_suppression(pred, self.opt.conf_thres, self.opt.iou_thres, classes=self.opt.classes,
agnostic=self.opt.agnostic_nms)
print(pred)
# Process detections
for i, det in enumerate(pred):
if det is not None and len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], showimg.shape).round()
for *xyxy, conf, cls in reversed(det):
label = '%s %.2f' % (self.names[int(cls)], conf)
name_list.append(self.names[int(cls)])
plot_one_box(xyxy, showimg, label=label, color=self.colors[int(cls)], line_thickness=2)
cv2.imwrite('prediction.jpg', showimg)
self.result = cv2.cvtColor(showimg, cv2.COLOR_BGR2BGRA)
self.result = cv2.resize(self.result, (640, 480), interpolation=cv2.INTER_AREA)
self.QtImg = QtGui.QImage(self.result.data, self.result.shape[1], self.result.shape[0],
QtGui.QImage.Format_RGB32)
self.label.setPixmap(QtGui.QPixmap.fromImage(self.QtImg))
self.label.setStyleSheet("border: 2px solid red") # 添加,设置窗口边界颜色
self.label_2.setPixmap(QtGui.QPixmap.fromImage(self.QtImg)) # 添加
self.label_2.setStyleSheet("border: 2px solid blue") # 添加,设置窗口边界颜色
def button_video_open(self):#视频按钮按下操作
video_name, _ = QtWidgets.QFileDialog.getOpenFileName(self, "Open video", "", "*.mp4;;*.avi;;All Files(*)")
flag = self.cap.open(video_name)
if flag == False:
QtWidgets.QMessageBox.warning(self, u"Warning", u"Video opening failed",
buttons=QtWidgets.QMessageBox.Ok, defaultButton=QtWidgets.QMessageBox.Ok)
else:
self.out = cv2.VideoWriter('prediction.avi', cv2.VideoWriter_fourcc(*'MJPG'), 20,
(int(self.cap.get(3)), int(self.cap.get(4))))
self.timer_video.start(30)
#self.pushButton_video.setDisabled(True)
#self.pushButton_img.setDisabled(True)
#self.pushButton_camera.setDisabled(True)
self.label.setStyleSheet("border: 2px solid red") # 添加,设置窗口边界颜色
# self.label_2.setPixmap(QtGui.QPixmap.fromImage(self.flag)) # 添加
self.label_2.setStyleSheet("border: 2px solid blue") # 添加,设置窗口边界颜色
QtWidgets.QMessageBox.information(self, u"Tips", u"Video detection in progress!",
buttons=QtWidgets.QMessageBox.Ok,
defaultButton=QtWidgets.QMessageBox.Ok)
def button_camera_open(self):#摄像头进行的按钮操作
if not self.timer_video.isActive():
# 默认使用第一个本地camera
flag = self.cap.open(0)
if flag == False:
QtWidgets.QMessageBox.warning(self, u"Warning", u"Camera failed to open",
buttons=QtWidgets.QMessageBox.Ok,
defaultButton=QtWidgets.QMessageBox.Ok)
else:
self.out = cv2.VideoWriter('prediction.avi', cv2.VideoWriter_fourcc(*'MJPG'), 20,
(int(self.cap.get(3)), int(self.cap.get(4))))
self.timer_video.start(30)
self.pushButton_video.setDisabled(True)
self.pushButton_img.setDisabled(True)
self.pushButton_camera.setText(u"Turn off the camera")
else:
self.timer_video.stop()
self.cap.release()
self.out.release()
self.label.clear()
self.init_logo()
self.pushButton_video.setDisabled(False)
self.pushButton_img.setDisabled(False)
self.pushButton_camera.setText(u"Camera detection")
def pushButton_Transfer_data_open(self):#数据传输按钮按下的操作
flile_name = "E:/YoloV5/yolov5-master/runs/img/out/excel/excel_xls.xls"
# 读取源excel
xlsx = xlrd.open_workbook(flile_name)
# xlsx = xlrd.open_workbook("readexcel.xlsx")
# 获取sheet个数
sheets = len(xlsx.sheets())
# 准备写入
new_workbook = xlwt.Workbook()
for sheet in range(sheets):
table = xlsx.sheet_by_index(sheet)
rows = table.nrows
cols = table.ncols
worksheet = new_workbook.add_sheet("sheet" + str(sheet))
for i in range(0, rows):
for j in range(0, cols):
# print(i,j,table.cell_value(i, j))
worksheet.write(i, j, table.cell_value(i, j))
new_workbook.save('E:/YoloV5/yolov5-master/runs/pass_xls/Data.xls')
QtWidgets.QMessageBox.information(self, u"Tips", u"Data transfer is successful!",
buttons=QtWidgets.QMessageBox.Ok,
defaultButton=QtWidgets.QMessageBox.Ok)
def pushButton_Transfer_data_open_2(self):
if not self.timer_video.isActive():
# 默认使用第一个本地camera
flag = self.cap.open(0)
if flag == False:
QtWidgets.QMessageBox.warning(self, u"Warning", u"Camera failed to open",
buttons=QtWidgets.QMessageBox.Ok,
defaultButton=QtWidgets.QMessageBox.Ok)
else:
ref,frame =self.cap.read()
fps =0.0
timeF = 50 # yolov5每秒140帧(FPS)
c = 1
while ref:
t1 = time.time()
# 读取某一帧
ref, frame = self.cap.read()
# 此处保存图片无检测结果,用于采集训练数据和测试摄像头是否清晰稳定
if (c % timeF == 0):
fps = (fps + (1. / (time.time() - t1))) / 2
print("fps= %.2f" % (fps))
frame = cv2.putText(frame, "fps= %.2f" % (fps), (0, 40), cv2.FONT_HERSHEY_SIMPLEX, 1,
(0, 255, 0), 2)
# 采集到每隔420帧的图像,保存到/runs/img./in/
cv2.imwrite("./runs/img/in/" + str(c) + '.jpg', frame)
# 将采集到的/runs/img./in/图像输入detect检测,结果保存在/runs/img/out
detect.run(source="./runs/img/in/" + str(c) + '.jpg', name='../img/out/photo+str(c)' + '.jpg',
save_txt=True)
self.result = cv2.cvtColor(frame, cv2.COLOR_BGR2BGRA)
self.result = cv2.resize(self.result, (640, 480), interpolation=cv2.INTER_AREA)
self.QtImg = QtGui.QImage(self.result.data, self.result.shape[1], self.result.shape[0],
QtGui.QImage.Format_RGB32)
self.label.setPixmap(QtGui.QPixmap.fromImage(self.QtImg))
self.label.setStyleSheet("border: 2px solid red") # 添加,设置窗口边界颜色
self.label_2.setPixmap(QtGui.QPixmap.fromImage(self.QtImg)) # 添加
self.label_2.setStyleSheet("border: 2px solid blue") # 添加,设置窗口边界颜色
#self.pushButton_Transfer_data_open_2.setText(u"帧检测关闭")
def getline(filepath, xlspath):
# 读取所有文本
file_names = os.listdir(filepath)
# 对os.listdir进行排序 指定参数加上 (key=lambda x:int(x[0]))
file_names.sort(key=lambda x: int(x[:-4]))
file_ob_list = []
try:
# 获取完整目录名并保存到数组
for file_name in file_names:
file_ob = filepath + "/" + file_name
file_ob_list.append(file_ob)
print(file_ob_list)
# 新建工作表格
xls = xlwt.Workbook()
# 循环读取文件,并写入到表格中
for file_ob in file_ob_list:
# 仅获取文件名,如果末尾为 '/' '\' ,返回空
sheet_name = os.path.basename(file_ob)
# 每一个文本都会新建一个相同文件名的 sheet
sheet = xls.add_sheet(sheet_name, cell_overwrite_ok=True)
# txt 写入 xls
f = open(file_ob)
x = 0
# 按行读取文本
while True:
line = f.readline()
if not line:
break
for i in range(len(line.split('\t'))):
data = line.split('\t')[i]
data = str(data) # 将数据转化为字符串,再对其中的换行符进行处理
data = data.replace('\n', ' ') # 使用python中字符串函数替换换行符为空格
sheet.write(x, i, data) # x,i,data 代表横、纵坐标和内容
x += 1
# 然后读取下一个文本
f.close()
xls.save(xlspath)
except:
raise
if __name__ == "__main__":
filepath = "E:/YoloV5/yolov5-master/runs/img/out/photo+str(c)2.jpg/labels" # 文件目录
xlspath = "E:/YoloV5/yolov5-master/runs/img/out/excel/excel_xls.xls" # xls 文件绝对路径
# 传入参数执行
getline(filepath, xlspath)
# 需求:拷贝excel中的内容到另外一个excel中
flile_name = "E:/YoloV5/yolov5-master/runs/img/out/excel/excel_xls.xls"
# 读取源excel
xlsx = xlrd.open_workbook(flile_name)
# xlsx = xlrd.open_workbook("readexcel.xlsx")
# 获取sheet个数
sheets = len(xlsx.sheets())
# 准备写入
new_workbook = xlwt.Workbook()
for sheet in range(sheets):
table = xlsx.sheet_by_index(sheet)
rows = table.nrows
cols = table.ncols
worksheet = new_workbook.add_sheet("sheet" + str(sheet))
for i in range(0, rows):
for j in range(0, cols):
# print(i,j,table.cell_value(i, j))
worksheet.write(i, j, table.cell_value(i, j))
new_workbook.save('E:/YoloV5/yolov5-master/runs/pass_xls/Data.xls')
c += 1
else:
self.timer_video.stop()
self.cap.release()
self.out.release()
self.label.clear()
self.init_logo()
#self.pushButton_video.setDisabled(False)
#self.pushButton_img.setDisabled(False)
self.pushButton_camera.setText(u"Camera detection")
def show_video_frame(self): # 左侧显示窗口显示
name_list = []
flag, img = self.cap.read()
if img is not None:
showimg = img
with torch.no_grad():
img = letterbox(img, new_shape=self.opt.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
img = torch.from_numpy(img).to(self.device)
img = img.half() if self.half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
pred = self.model(img, augment=self.opt.augment)[0]
# Apply NMS
pred = non_max_suppression(pred, self.opt.conf_thres, self.opt.iou_thres, classes=self.opt.classes,
agnostic=self.opt.agnostic_nms)
# Process detections
for i, det in enumerate(pred): # detections per image
if det is not None and len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], showimg.shape).round()
# Write results
for *xyxy, conf, cls in reversed(det):
label = '%s %.2f' % (self.names[int(cls)], conf)
name_list.append(self.names[int(cls)])
print(label)
plot_one_box(xyxy, showimg, label=label, color=self.colors[int(cls)], line_thickness=2)
self.out.write(showimg)
show = cv2.resize(showimg, (640, 480))
self.result = cv2.cvtColor(show, cv2.COLOR_BGR2RGB)
showImage = QtGui.QImage(self.result.data, self.result.shape[1], self.result.shape[0],
QtGui.QImage.Format_RGB888)
self.label.setPixmap(QtGui.QPixmap.fromImage(showImage))
else:
self.timer_video.stop()
self.cap.release()
self.out.release()
self.label.clear()
self.pushButton_video.setDisabled(False)
self.pushButton_img.setDisabled(False)
self.pushButton_camera.setDisabled(False)
self.init_logo()
def show_video_frame_2(self): # 右侧显示窗口显示
name_list = []
flag, img = self.cap.read()
if img is not None:
showimg = img
with torch.no_grad():
img = letterbox(img, new_shape=self.opt.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
img = torch.from_numpy(img).to(self.device)
img = img.half() if self.half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
pred = self.model(img, augment=self.opt.augment)[0]
# Apply NMS
pred = non_max_suppression(pred, self.opt.conf_thres, self.opt.iou_thres, classes=self.opt.classes,
agnostic=self.opt.agnostic_nms)
# Process detections
for i, det in enumerate(pred): # detections per image
if det is not None and len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], showimg.shape).round()
# Write results
for *xyxy, conf, cls in reversed(det):
label_2 = '%s %.2f' % (self.names[int(cls)], conf)
name_list.append(self.names[int(cls)])
print(label_2)
plot_one_box(xyxy, showimg, label=label_2, color=self.colors[int(cls)],
line_thickness=2)
self.out.write(showimg)
show = cv2.resize(showimg, (640, 480))
self.result = cv2.cvtColor(show, cv2.COLOR_BGR2RGB)
showImage = QtGui.QImage(self.result.data, self.result.shape[1], self.result.shape[0],
QtGui.QImage.Format_RGB888)
self.label.setPixmap(QtGui.QPixmap.fromImage(showImage))
self.label_2.setPixmap(QtGui.QPixmap.fromImage(showImage)) # 添加,不可或缺,用来显示右侧标签的
else:
self.timer_video.stop()
self.cap.release()
self.out.release()
self.label.clear()
self.pushButton_video.setDisabled(False)
self.pushButton_img.setDisabled(False)
self.pushButton_camera.setDisabled(False)
self.init_logo()
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
ui = Ui_MainWindow()
ui.show()
sys.exit(app.exec_())
96代码:将多个txt文件合成1个txt文件
import os
import shutil #导入这个包
txtpath='E:/YoloV5/yolov5-master/runs/img/out/photo+str(c)2.jpg/labels'
namelist=[x for x in os.listdir(txtpath)]
namelist.sort(key=lambda x:int(x[:-4])) #txt file name
print(len(namelist))#1674
outfilename='E:/YoloV5/yolov5-master/runs/img/out/photo+str(c)2.jpg/b.txt'
outfile=open(outfilename,'a')#a没有文件可以创建
for i in range( len(namelist) ):
datapath = os.path.join(txtpath, namelist[i])
print(datapath)
file=open(datapath,'r')
shutil.copyfileobj(file,outfile)
file.close()
outfile.close()
代码97:
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'untitled.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
import sys
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 600)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 255, 134))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 255, 134))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 255, 134))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 255, 134))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
MainWindow.setPalette(palette)
MainWindow.setWindowOpacity(1.0)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(200, 210, 93, 28))
self.pushButton.setObjectName("pushButton")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 26))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.pushButton.clicked.connect(MainWindow.close)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.pushButton.setText(_translate("MainWindow", "PushButton"))
if __name__ == '__main__':
app =QtWidgets.QApplication(sys.argv)
MainWindow =QtWidgets.QMainWindow()
ui =Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())