英特尔Realsense学习笔记二: pyqt5 实时显示 Realsense D415 深度图像和彩色图像 并支持按钮保存图像

22 篇文章 0 订阅
8 篇文章 6 订阅

需求:需要使用Realsense D415 采集人体面部深度图和彩色图像

UI界面如下:

保存的文件格式如下:

代码:

1. face_mainwindow.ui

<?xml version="1.0" encoding="UTF-8"?>
<ui version="4.0">
 <class>MainWindow</class>
 <widget class="QMainWindow" name="MainWindow">
  <property name="geometry">
   <rect>
    <x>0</x>
    <y>0</y>
    <width>1300</width>
    <height>860</height>
   </rect>
  </property>
  <property name="windowTitle">
   <string>MainWindow</string>
  </property>
  <widget class="QWidget" name="centralwidget">
   <widget class="QLabel" name="label_show">
    <property name="geometry">
     <rect>
      <x>10</x>
      <y>0</y>
      <width>1280</width>
      <height>720</height>
     </rect>
    </property>
    <property name="text">
     <string>Depth</string>
    </property>
   </widget>
   <widget class="QPushButton" name="pushButton_takephotos">
    <property name="geometry">
     <rect>
      <x>330</x>
      <y>750</y>
      <width>101</width>
      <height>41</height>
     </rect>
    </property>
    <property name="text">
     <string>拍摄</string>
    </property>
   </widget>
   <widget class="QLineEdit" name="lineEdit_id">
    <property name="geometry">
     <rect>
      <x>140</x>
      <y>750</y>
      <width>151</width>
      <height>41</height>
     </rect>
    </property>
   </widget>
   <widget class="QLabel" name="label">
    <property name="geometry">
     <rect>
      <x>50</x>
      <y>760</y>
      <width>61</width>
      <height>21</height>
     </rect>
    </property>
    <property name="text">
     <string>ID:</string>
    </property>
   </widget>
  </widget>
  <widget class="QMenuBar" name="menubar">
   <property name="geometry">
    <rect>
     <x>0</x>
     <y>0</y>
     <width>1300</width>
     <height>26</height>
    </rect>
   </property>
  </widget>
  <widget class="QStatusBar" name="statusbar"/>
 </widget>
 <resources/>
 <connections/>
</ui>

2. face_mainwindow.py

# -*- coding: utf-8 -*-

# Form implementation generated from reading ui file 'face_mainwindow.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again.  Do not edit this file unless you know what you are doing.


from PyQt5 import QtCore, QtGui, QtWidgets


class Ui_MainWindow(object):
    def setupUi(self, MainWindow):
        MainWindow.setObjectName("MainWindow")
        MainWindow.resize(1300, 860)
        self.centralwidget = QtWidgets.QWidget(MainWindow)
        self.centralwidget.setObjectName("centralwidget")
        self.label_show = QtWidgets.QLabel(self.centralwidget)
        self.label_show.setGeometry(QtCore.QRect(10, 0, 1280, 720))
        self.label_show.setObjectName("label_show")
        self.pushButton_takephotos = QtWidgets.QPushButton(self.centralwidget)
        self.pushButton_takephotos.setGeometry(QtCore.QRect(330, 750, 101, 41))
        self.pushButton_takephotos.setObjectName("pushButton_takephotos")
        self.lineEdit_id = QtWidgets.QLineEdit(self.centralwidget)
        self.lineEdit_id.setGeometry(QtCore.QRect(140, 750, 151, 41))
        self.lineEdit_id.setObjectName("lineEdit_id")
        self.label = QtWidgets.QLabel(self.centralwidget)
        self.label.setGeometry(QtCore.QRect(50, 760, 61, 21))
        self.label.setObjectName("label")
        MainWindow.setCentralWidget(self.centralwidget)
        self.menubar = QtWidgets.QMenuBar(MainWindow)
        self.menubar.setGeometry(QtCore.QRect(0, 0, 1300, 26))
        self.menubar.setObjectName("menubar")
        MainWindow.setMenuBar(self.menubar)
        self.statusbar = QtWidgets.QStatusBar(MainWindow)
        self.statusbar.setObjectName("statusbar")
        MainWindow.setStatusBar(self.statusbar)

        self.retranslateUi(MainWindow)
        QtCore.QMetaObject.connectSlotsByName(MainWindow)

    def retranslateUi(self, MainWindow):
        _translate = QtCore.QCoreApplication.translate
        MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
        self.label_show.setText(_translate("MainWindow", "Depth"))
        self.pushButton_takephotos.setText(_translate("MainWindow", "拍摄"))
        self.label.setText(_translate("MainWindow", "ID:"))

3. face.py

import sys
from PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtCore import QObject, pyqtSignal
from face_mainwindow import Ui_MainWindow

import os
import time
import datetime as dt
import numpy as np
import threading as th
import ctypes
import inspect

import re

# First import the library
import pyrealsense2 as rs
import cv2
from skimage import io

DELAY = 0



class MainWindow(QMainWindow, Ui_MainWindow):
    def __init__(self):
        super(MainWindow, self).__init__()

        # Set up the user interface from Designer.
        self.setupUi(self)

        self.dis_update.connect(self.camera_view)
        self.pushButton_takephotos.clicked.connect(self.pushButton_takephotos_clicked)

        self.thread_camera = None
        self.takePhotos = False

    # 在对应的页面类的内部,与def定义的函数同级
    dis_update = pyqtSignal(QPixmap)

    def pushButton_takephotos_clicked(self):
        self.takePhotos = True

    # 添加一个退出的提示事件
    def closeEvent(self, event):
        """我们创建了一个消息框,上面有俩按钮:Yes和No.第一个字符串显示在消息框的标题栏,第二个字符串显示在对话框,
              第三个参数是消息框的俩按钮,最后一个参数是默认按钮,这个按钮是默认选中的。返回值在变量reply里。"""

        reply = QMessageBox.question(self, 'Message', "Are you sure to quit?",
                                     QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
        # 判断返回值,如果点击的是Yes按钮,我们就关闭组件和应用,否则就忽略关闭事件
        if reply == QMessageBox.Yes:
            self.stop_thread(self.thread_camera)
            event.accept()
        else:
            event.ignore()


    def open_camera(self):
        # target选择开启摄像头的函数
        self.thread_camera = th.Thread(target=self.open_realsense)
        self.thread_camera.start()
        print('Open Camera')

    def camera_view(self, c):
        # 调用setPixmap函数设置显示Pixmap
        self.label_show.setPixmap(c)
        # 调用setScaledContents将图像比例化显示在QLabel上
        self.label_show.setScaledContents(True)

    def _async_raise(self, tid, exctype):
        """raises the exception, performs cleanup if needed"""
        tid = ctypes.c_long(tid)
        if not inspect.isclass(exctype):
            exctype = type(exctype)
        res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))
        if res == 0:
            raise ValueError("invalid thread id")
        elif res != 1:
            # """if it returns a number greater than one, you're in trouble,
            # and you should call it again with exc=NULL to revert the effect"""
            ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
            raise SystemError("PyThreadState_SetAsyncExc failed")

    def stop_thread(self, thread):
        self._async_raise(thread.ident, SystemExit)

    def open_realsense(self):
        print('open_realsense')

        # Create a pipeline
        pipeline = rs.pipeline()

        # Create a config and configure the pipeline to stream
        #  different resolutions of color and depth streams
        config = rs.config()
        config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
        config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 30)

        # Start streaming
        profile = pipeline.start(config)

        # Getting the depth sensor's depth scale (see rs-align example for explanation)
        depth_sensor = profile.get_device().first_depth_sensor()
        depth_scale = depth_sensor.get_depth_scale()
        print("Depth Scale is: ", depth_scale)

        # We will be removing the background of objects more than
        #  clipping_distance_in_meters meters away
        clipping_distance_in_meters = 1  # 1 meter
        clipping_distance = clipping_distance_in_meters / depth_scale


        # Color Intrinsics
        # intr = color_frame.profile.as_video_stream_profile().intrinsics

        # Create an align object
        # rs.align allows us to perform alignment of depth frames to others frames
        # The "align_to" is the stream type to which we plan to align depth frames.
        align_to = rs.stream.color
        align = rs.align(align_to)

        # Streaming loop
        try:
            while True:
                # Get frameset of color and depth
                frames = pipeline.wait_for_frames()
                # frames.get_depth_frame() is a 640x360 depth image

                # Align the depth frame to color frame
                aligned_frames = align.process(frames)

                # Get aligned frames
                aligned_depth_frame = aligned_frames.get_depth_frame()  # aligned_depth_frame is a 640x480 depth image
                color_frame = aligned_frames.get_color_frame()

                # Validate that both frames are valid
                if not aligned_depth_frame or not color_frame:
                    continue

                depth_image = np.asanyarray(aligned_depth_frame.get_data())
                color_image = np.asanyarray(color_frame.get_data())

                if(self.takePhotos == True):

                    now_date = dt.datetime.now().strftime('%F')
                    now_time = dt.datetime.now().strftime('%F_%H%M%S')

                    path_ok = os.path.exists(now_date)
                    if(path_ok == False):
                        os.mkdir(now_date)

                    if(os.path.isdir(now_date)):
                        id = self.lineEdit_id.text()

                        depth_full_path = ''
                        color_full_path = ''

                        # if (re.match('^[a-zA-Z0-9_]*$', id) and (id != '')):
                        if (re.match('^[\u4E00-\u9FA5a-zA-Z0-9_]*$', id) and (id != '')):

                            depth_full_path = os.path.join('./', now_date, id + '_depth.png')
                            color_full_path = os.path.join('./', now_date, id + '_color.png')
                            print(depth_full_path)
                            print(color_full_path)
                        else:
                            depth_full_path = os.path.join('./', now_date, now_time + '_depth.png')
                            color_full_path = os.path.join('./', now_date, now_time + '_color.png')

                        # cv2.imwrite(depth_full_path, depth_image)
                        # cv2.imwrite(color_full_path, color_image)
                        # io.imsave(depth_full_path, depth_image)
                        # io.imsave(color_full_path, color_image)
                        cv2.imencode('.png', depth_image)[1].tofile(depth_full_path)
                        cv2.imencode('.png', color_image)[1].tofile(color_full_path)
                        # print('ok')
                    self.takePhotos = False


                # Remove background - Set pixels further than clipping_distance to grey
                grey_color = 153
                depth_image_3d = np.dstack(
                    (depth_image, depth_image, depth_image))  # depth image is 1 channel, color is 3 channels
                bg_removed = np.where((depth_image_3d > clipping_distance) | (depth_image_3d <= 0), grey_color,
                                      color_image)

                # Render images
                depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)


                images = np.hstack((bg_removed[0:720, 320:960], depth_colormap[0:720, 320:960]))


                qimage = QImage(images, 1280, 720, QImage.Format_BGR888)
                pixmap = QPixmap.fromImage(qimage)
                self.dis_update.emit(pixmap)

                time.sleep(DELAY)
        finally:
            pipeline.stop()


if __name__ == "__main__":
    app = QApplication(sys.argv)
    w = MainWindow()
    w.show()

    w.open_camera()

    # thread_camera = th.Thread(target=w.open_realsense)
    # thread_camera.start()

    print('Hello World!')

    sys.exit(app.exec_())

4.face_mirror.py

import sys
from PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtCore import QObject, pyqtSignal
from face_mainwindow import Ui_MainWindow

import os
import time
import datetime as dt
import numpy as np
import threading as th
import ctypes
import inspect

import re

# First import the library
import pyrealsense2 as rs
import cv2
from skimage import io

DELAY = 0


class MainWindow(QMainWindow, Ui_MainWindow):
    def __init__(self):
        super(MainWindow, self).__init__()

        # Set up the user interface from Designer.
        self.setupUi(self)

        self.dis_update.connect(self.camera_view)
        self.pushButton_takephotos.clicked.connect(self.pushButton_takephotos_clicked)

        self.thread_camera = None
        self.takePhotos = False

    # 在对应的页面类的内部,与def定义的函数同级
    dis_update = pyqtSignal(QPixmap)

    def pushButton_takephotos_clicked(self):
        self.takePhotos = True

    # 添加一个退出的提示事件
    def closeEvent(self, event):
        """我们创建了一个消息框,上面有俩按钮:Yes和No.第一个字符串显示在消息框的标题栏,第二个字符串显示在对话框,
              第三个参数是消息框的俩按钮,最后一个参数是默认按钮,这个按钮是默认选中的。返回值在变量reply里。"""

        reply = QMessageBox.question(self, 'Message', "Are you sure to quit?",
                                     QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
        # 判断返回值,如果点击的是Yes按钮,我们就关闭组件和应用,否则就忽略关闭事件
        if reply == QMessageBox.Yes:
            self.stop_thread(self.thread_camera)
            event.accept()
        else:
            event.ignore()

    def open_camera(self):
        # target选择开启摄像头的函数
        self.thread_camera = th.Thread(target=self.open_realsense)
        self.thread_camera.start()
        print('Open Camera')

    def camera_view(self, c):
        # 调用setPixmap函数设置显示Pixmap
        self.label_show.setPixmap(c)
        # 调用setScaledContents将图像比例化显示在QLabel上
        self.label_show.setScaledContents(True)

    def _async_raise(self, tid, exctype):
        """raises the exception, performs cleanup if needed"""
        tid = ctypes.c_long(tid)
        if not inspect.isclass(exctype):
            exctype = type(exctype)
        res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))
        if res == 0:
            raise ValueError("invalid thread id")
        elif res != 1:
            # """if it returns a number greater than one, you're in trouble,
            # and you should call it again with exc=NULL to revert the effect"""
            ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
            raise SystemError("PyThreadState_SetAsyncExc failed")

    def stop_thread(self, thread):
        self._async_raise(thread.ident, SystemExit)

    def open_realsense(self):
        print('open_realsense')

        # Create a pipeline
        pipeline = rs.pipeline()

        # Create a config and configure the pipeline to stream
        #  different resolutions of color and depth streams
        config = rs.config()
        config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
        config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 30)

        # Start streaming
        profile = pipeline.start(config)

        # Getting the depth sensor's depth scale (see rs-align example for explanation)
        depth_sensor = profile.get_device().first_depth_sensor()
        depth_scale = depth_sensor.get_depth_scale()
        print("Depth Scale is: ", depth_scale)

        # We will be removing the background of objects more than
        #  clipping_distance_in_meters meters away
        clipping_distance_in_meters = 1  # 1 meter
        clipping_distance = clipping_distance_in_meters / depth_scale

        # Color Intrinsics
        # intr = color_frame.profile.as_video_stream_profile().intrinsics

        # Create an align object
        # rs.align allows us to perform alignment of depth frames to others frames
        # The "align_to" is the stream type to which we plan to align depth frames.
        align_to = rs.stream.color
        align = rs.align(align_to)

        # Streaming loop
        try:
            while True:
                # Get frameset of color and depth
                frames = pipeline.wait_for_frames()
                # frames.get_depth_frame() is a 640x360 depth image

                # Align the depth frame to color frame
                aligned_frames = align.process(frames)

                # Get aligned frames
                aligned_depth_frame = aligned_frames.get_depth_frame()  # aligned_depth_frame is a 640x480 depth image
                color_frame = aligned_frames.get_color_frame()

                # Validate that both frames are valid
                if not aligned_depth_frame or not color_frame:
                    continue

                depth_image = np.asanyarray(aligned_depth_frame.get_data())
                color_image = np.asanyarray(color_frame.get_data())

                if (self.takePhotos == True):

                    now_date = dt.datetime.now().strftime('%F')
                    now_time = dt.datetime.now().strftime('%F_%H%M%S')

                    path_ok = os.path.exists(now_date)
                    if (path_ok == False):
                        os.mkdir(now_date)

                    if (os.path.isdir(now_date)):
                        id = self.lineEdit_id.text()

                        depth_full_path = ''
                        color_full_path = ''

                        # if (re.match('^[a-zA-Z0-9_]*$', id) and (id != '')):
                        if (re.match('^[\u4E00-\u9FA5a-zA-Z0-9_]*$', id) and (id != '')):

                            depth_full_path = os.path.join('./', now_date, id + '_depth.png')
                            color_full_path = os.path.join('./', now_date, id + '_color.png')
                            print(depth_full_path)
                            print(color_full_path)
                        else:
                            depth_full_path = os.path.join('./', now_date, now_time + '_depth.png')
                            color_full_path = os.path.join('./', now_date, now_time + '_color.png')

                        # cv2.imwrite(depth_full_path, depth_image)
                        # cv2.imwrite(color_full_path, color_image)
                        # io.imsave(depth_full_path, depth_image)
                        # io.imsave(color_full_path, color_image)
                        cv2.imencode('.png', depth_image)[1].tofile(depth_full_path)
                        cv2.imencode('.png', color_image)[1].tofile(color_full_path)
                        # print('ok')
                    self.takePhotos = False

                # Remove background - Set pixels further than clipping_distance to grey
                grey_color = 153
                depth_image_3d = np.dstack(
                    (depth_image, depth_image, depth_image))  # depth image is 1 channel, color is 3 channels
                bg_removed = np.where((depth_image_3d > clipping_distance) | (depth_image_3d <= 0), grey_color,
                                      color_image)

                # Render images
                depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)

                bg_removed = cv2.flip(bg_removed, 1)
                depth_colormap = cv2.flip(depth_colormap, 1)

                images = np.hstack((bg_removed[0:720, 320:960], depth_colormap[0:720, 320:960]))

                qimage = QImage(images, 1280, 720, QImage.Format_BGR888)
                pixmap = QPixmap.fromImage(qimage)
                self.dis_update.emit(pixmap)

                time.sleep(DELAY)
        finally:
            pipeline.stop()


if __name__ == "__main__":
    app = QApplication(sys.argv)
    w = MainWindow()
    w.show()

    w.open_camera()

    # thread_camera = th.Thread(target=w.open_realsense)
    # thread_camera.start()

    print('Hello World!')

    sys.exit(app.exec_())

 

说明:face.py和face_mirror.py的主要区别是一个是镜像的,一个是非镜像的。

 

参考文献:

1.align-depth2color.py

  • 0
    点赞
  • 9
    收藏
    觉得还不错? 一键收藏
  • 5
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 5
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值