起因
在使用PyQT为目标检测编写GUI界面时,通过一个子线程调用opencv获取摄像头画面并实时渲染,然后另外启动一个子线程对摄像头画面进行实时检测。
结果UI界面还是因为模型推理(通过CPU)耗时导致卡顿,后发现是因为GIL的影响,所以Python没有真正意义上的多线程...,于是转而通过多进程,在子进程对摄像头画面进行推理
代码
界面
class VideoProcessThread(QThread):
#此信号用于渲染画面到UI界面
frameCaptured = pyqtSignal(QPixmap)
# 用于目标检测
imageCaptured = pyqtSignal(object)
videoError = pyqtSignal(str)
def __init__(self, url):
super().__init__()
self.url = url
self.flag = True
def stop(self):
"""
停止线程
"""
self.flag = False
def run(self):
vid = cv2.VideoCapture(self.url)
if not vid.isOpened():
self.videoError.emit(f'打开“{self.url}”失败')
return
fps = vid.get(cv2.CAP_PROP_FPS)
# 单帧时长,控制最多每秒25帧
single_frame_duration = int(max(40.0, 1000 / fps))
while self.flag:
ret, frame = vid.read()
if not ret:
break
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
self.imageCaptured.emit(frame)
pixmap = QPixmap(qimage2ndarray.array2qimage(frame))
self.frameCaptured.emit(pixmap)
# 控制频率
self.msleep(single_frame_duration)
vid.release()
class RealTimeDetectWidget(QWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.ui = Ui_RealTimeDetect()
self.ui.setupUi(self)
# 捕获视频帧的线程
self.video_capture_thread = None
self.buttons = (self.ui.video_file, self.ui.camera, self.ui.online_video)
self.ui.video_file.clicked.connect(self.open_video_file)
self.ui.camera.clicked.connect(self.open_camera)
self.ui.online_video.clicked.connect(self.open_online_video)
self.ui.close.clicked.connect(self.close_video)
# 标识当前是否正在推理图片,以做跳帧处理
self.onProcessImage = False
# 构造进程池,传入子进程数量和子进程初始化函数
self.process_pool = multiprocessing.Pool(2, initializer=initialize)
def open_video_file(self):
video_file, _ = QFileDialog.getOpenFileName(self, "选择视频文件", os.curdir,
'视频文件(*.avi *.mp4 *.wmv)')
if video_file:
self.play_video(video_file)
for button in self.buttons:
button.setEnabled(False)
self.ui.close.setEnabled(True)
self.log_message(f"打开文件:{video_file}")
else:
self.ui.video_file.setChecked(False)
def open_camera(self):
self.play_video(0)
for button in self.buttons:
button.setEnabled(False)
self.ui.close.setEnabled(True)
self.log_message("打开摄像头")
def open_online_video(self):
text, ok = QInputDialog.getText(self, "视频地址", "地址")
text = text.strip()
if ok and text:
self.play_video(text)
for button in self.buttons:
button.setEnabled(False)
self.ui.close.setEnabled(True)
self.log_message(f"打开在线视频流:{text}")
else:
self.ui.online_video.setChecked(False)
def play_video(self, url):
self.video_capture_thread = VideoProcessThread(url)
self.video_capture_thread.frameCaptured.connect(self.display_video_frame)
self.video_capture_thread.imageCaptured.connect(self.process_image)
self.video_capture_thread.videoError.connect(self.log_message)
# 视频正常结束而不是用户停止时
def finished():
if self.ui.close.isEnabled():
self.ui.close.click()
show_toast(self, "视频结束")
self.video_capture_thread.finished.connect(finished)
self.video_capture_thread.start()
def close_video(self):
self.video_capture_thread.stop()
self.ui.close.setEnabled(False)
for button in self.buttons:
button.setChecked(False)
button.setEnabled(True)
self.log_message("视频流已关闭")
def log_message(self, message: str):
time_info = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
text = (f'<font color="red">{time_info}</font>:'
f'<font color="green">{message}</font>')
self.ui.message.append(text)
def closeEvent(self, a0):
super().closeEvent(a0)
# 关闭界面时停止捕获画面
if self.video_capture_thread:
self.video_capture_thread.stop()
供子进程调用的函数
# 初始化进程池时调用,为每个子进程初始化模型
def initialize():
global predictor
predictor = DetectPredictor()
# 进行推理
def predict_in_subprocess(img, threshold: float = 0.7):
global predictor
result = predictor.predict(img, threshold)
return result
然后在子线程调用进程池执行推理
# 依然在子线程执行(因为等待子进程也是阻塞的)
# 或者可以通过apply_async(predict_in_subprocess,args=(frame,),callback)
# 传入回调函数的方式(未测试...)
result = self.process_pool.apply(predict_in_subprocess, args=(frame,))
#...对检测结果进行处理
查看任务管理器
结束
模型推理其实还是放在了子线程,但是不是由子线程来执行,故不会造成UI卡顿