目录
安装
pip install pyrealsense2
读图:
import pyrealsense2 as rs
import numpy as np
import cv2
if __name__ == "__main__":
# Configure depth and color streams
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
# Start streaming
pipeline.start(config)
try:
while True:
# Wait for a coherent pair of frames: depth and color
frames = pipeline.wait_for_frames()
depth_frame = frames.get_depth_frame()
color_frame = frames.get_color_frame()
if not depth_frame or not color_frame:
continue
# Convert images to numpy arrays
depth_image = np.asanyarray(depth_frame.get_data())
color_image = np.asanyarray(color_frame.get_data())
# Apply colormap on depth image (image must be converted to 8-bit per pixel first)
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
# Stack both images horizontally
images = np.hstack((color_image, depth_colormap))
# Show images
cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
cv2.imshow('RealSense', images)
key = cv2.waitKey(1)
# Press esc or 'q' to close the image window
if key & 0xFF == ord('q') or key == 27:
cv2.destroyAllWindows()
break
finally:
# Stop streaming
pipeline.stop()
获取内参:
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
pipeline.start(config)
# 创建对齐对象(深度对齐颜色)
align = rs.align(rs.stream.color)
try:
while True:
frames = pipeline.wait_for_frames()
# 获取对齐帧集
aligned_frames = align.process(frames)
# 获取对齐后的深度帧和彩色帧
aligned_depth_frame = aligned_frames.get_depth_frame()
color_frame = aligned_frames.get_color_frame()
# 获取颜色帧内参
color_profile = color_frame.get_profile()
cvsprofile = rs.video_stream_profile(color_profile)
color_intrin = cvsprofile.get_intrinsics()
color_intrin_part = [color_intrin.ppx, color_intrin.ppy, color_intrin.fx, color_intrin.fy]
print(color_intrin_part)
# [318.48199462890625, 241.16720581054688, 616.5906372070312, 616.7650146484375]
if not aligned_depth_frame or not color_frame:
continue
finally:
pipeline.stop()
fx、fy为xy轴的焦距。ppx、ppy为xy轴的原理点
开始报错:
Traceback (most recent call last):
File "F:\biadu_down\yolov5-face-master\data\aaa.py", line 17, in <module>
frames = pipeline.wait_for_frames()
RuntimeError: Frame didn't arrive within 5000
后来就好了,结果:
[318.36590576171875, 240.8314208984375, 382.5411682128906, 382.3366394042969]
[318.36590576171875, 240.8314208984375, 382.5411682128906, 382.3366394042969]
[318.36590576171875, 240.8314208984375, 382.5411682128906, 382.3366394042969]
[318.36590576171875, 240.8314208984375, 382.5411682128906, 382.3366394042969]
同时读取2个摄像头,深度图和rgb图:
import pyrealsense2 as rs
import cv2
import numpy as np
import time
import os
import sys
if __name__ == '__main__':
# 确定图像的输入分辨率与帧率
resolution_width = 640 # pixels
resolution_height = 480 # pixels
frame_rate = 30 # fps
# 注册数据流,并对其图像
align = rs.align(rs.stream.color)
rs_config = rs.config()
rs_config.enable_stream(rs.stream.depth, resolution_width, resolution_height, rs.format.z16, frame_rate)
rs_config.enable_stream(rs.stream.color, resolution_width, resolution_height, rs.format.bgr8, frame_rate)
### d435i
#
rs_config.enable_stream(rs.stream.infrared, 1, 640, 480, rs.format.y8, frame_rate)
rs_config.enable_stream(rs.stream.infrared, 2, 640, 480, rs.format.y8, frame_rate)
# check相机是不是进来了
connect_device = []
for d in rs.context().devices:
print('Found device: ',
d.get_info(rs.camera_info.name), ' ',
d.get_info(rs.camera_info.serial_number))
if d.get_info(rs.camera_info.name).lower() != 'platform camera':
connect_device.append(d.get_info(rs.camera_info.serial_number))
if len(connect_device) < 2:
print('Registrition needs two camera connected.But got one.')
exit()
# 确认相机并获取相机的内部参数
pipeline1 = rs.pipeline()
rs_config.enable_device(connect_device[0])
# pipeline_profile1 = pipeline1.start(rs_config)
pipeline1.start(rs_config)
pipeline2 = rs.pipeline()
rs_config.enable_device(connect_device[1])
# pipeline_profile2 = pipeline2.start(rs_config)
pipeline2.start(rs_config)
try:
while True:
# 等待数据进来
frames1 = pipeline1.wait_for_frames()
frames2 = pipeline2.wait_for_frames()
# 将进来的RGBD数据对齐
aligned_frames1 = align.process(frames1)
aligned_frames2 = align.process(frames2)
# 将对其的RGB—D图取出来
color_frame1 = aligned_frames1.get_color_frame()
depth_frame1 = aligned_frames1.get_depth_frame()
color_frame2 = aligned_frames2.get_color_frame()
depth_frame2 = aligned_frames2.get_depth_frame()
# --------------------------------------
depth_frame1 = frames1.get_depth_frame()
color_frame1 = frames1.get_color_frame()
depth_frame2 = frames2.get_depth_frame()
color_frame2 = frames2.get_color_frame()
# 数组化数据便于处理
ir_frame_left1 = frames1.get_infrared_frame(1)
ir_frame_right1 = frames1.get_infrared_frame(2)
if not depth_frame1 or not color_frame1:
continue
ir_frame_left2 = frames2.get_infrared_frame(1)
ir_frame_right2 = frames2.get_infrared_frame(2)
if not depth_frame2 or not color_frame2:
continue
color_image1 = np.asanyarray(color_frame1.get_data())
depth_image1 = np.asanyarray(depth_frame1.get_data())
ir_left_image1 = np.asanyarray(ir_frame_left1.get_data())
ir_right_image1 = np.asanyarray(ir_frame_right1.get_data())
color_image2 = np.asanyarray(color_frame2.get_data())
depth_image2 = np.asanyarray(depth_frame2.get_data())
ir_left_image2 = np.asanyarray(ir_frame_left2.get_data())
ir_right_image2 = np.asanyarray(ir_frame_right2.get_data())
depth_colormap1 = cv2.applyColorMap(cv2.convertScaleAbs(depth_image1, alpha=0.03), cv2.COLORMAP_JET)
images1 = np.hstack((color_image1, depth_colormap1))
depth_colormap2 = cv2.applyColorMap(cv2.convertScaleAbs(depth_image2, alpha=0.03), cv2.COLORMAP_JET)
images2 = np.hstack((color_image2, depth_colormap2))
cv2.imshow('RealSense1', images1)
cv2.imshow('RealSense2', images2)
key = cv2.waitKey(1)
if key & 0xFF == ord('q') or key == 27:
cv2.destroyAllWindows()
break
finally:
pipeline1.stop()
pipeline2.stop()
同时读取两个摄像头,彩色图:
import pyrealsense2 as rs
import cv2
import numpy as np
import time
import os
import sys
class Realsense2:
def __init__(self, camera_id_list=[0], camera_width=1280, camera_height=720, camera_fps=30):
self.camera_width = camera_width
self.camera_height = camera_height
self.camera_fps = camera_fps
self.camera_id_list = camera_id_list
def camera_config(self):
self.connect_device = []
# get all realsense serial number
for d in rs.context().devices:
print('Found device: ',
d.get_info(rs.camera_info.name), ' ',
d.get_info(rs.camera_info.serial_number))
if d.get_info(rs.camera_info.name).lower() != 'platform camera':
self.connect_device.append(d.get_info(rs.camera_info.serial_number))
# config realsense
self.pipeline_list = [rs.pipeline() for i in range(len(self.camera_id_list))]
self.config_list = [rs.config() for i in range(len(self.camera_id_list))]
if len(self.camera_id_list) == 1: # one realsense
self.config_list[0].enable_device(self.connect_device[0])
self.config_list[0].enable_stream(rs.stream.depth, self.camera_width, self.camera_height, rs.format.z16,
self.camera_fps)
self.config_list[0].enable_stream(rs.stream.color, self.camera_width, self.camera_height, rs.format.bgr8,
self.camera_fps)
self.pipeline_list[0].start(self.config_list[0])
elif len(self.camera_id_list) >= 2: # two realsense
if len(self.connect_device) < 2:
print(
'Registrition needs two camera connected.But got one.Please run realsense-viewer to check your camera status.')
exit()
# enable config
for n, config in enumerate(self.config_list):
config.enable_device(self.connect_device[n])
config.enable_stream(rs.stream.depth, self.camera_width, self.camera_height, rs.format.z16,
self.camera_fps)
config.enable_stream(rs.stream.color, self.camera_width, self.camera_height, rs.format.bgr8,
self.camera_fps)
# self.config_list[n] = config
# start pipeline
for n, pipeline in enumerate(self.pipeline_list):
pipeline.start(self.config_list[n])
def wait_frames(self, frame_id=None):
'''
camera_id:
@ = camera number , get all frame
@ = id , get specific id camera frame
'''
self.frame_list = [None for i in range(len(self.camera_id_list))]
if frame_id != None: # give a frame id
for n, camera_id in enumerate(self.camera_id_list):
self.frame_list[n] = self.pipeline_list[frame_id].wait_for_frames()
else: # get all frame
if len(self.camera_id_list) == 1:
self.frame_list.append(self.pipeline_list[0].wait_for_frames())
else:
for n, camera_id in enumerate(self.camera_id_list):
self.frame_list[n] = self.pipeline_list[n].wait_for_frames()
def rgb_image(self, camera_id=0):
color_frame = self.frame_list[camera_id].get_color_frame()
color_image = np.asanyarray(color_frame.get_data())
return color_image
def depth_frame(self, frames):
depth_frame = frames.get_depth_frame()
return depth_frame
def stop(self):
for pipeline in self.pipeline_list:
pipeline.stop()
print("camera exit sucessfully.")
if __name__ == '__main__':
cap = Realsense2(camera_id_list=[0, 1], camera_width=640, camera_height=480) #
cap.camera_config()
while True:
start = time.time()
cap.wait_frames()
img0 = cap.rgb_image(0)
img1 = cap.rgb_image(1)
cv2.imshow("img0", img0)
cv2.imshow("img1", img1)
# print("FPS:", 1 / (time.time() - start), "f/s")
if cv2.waitKey(1) == ord("q"):
break
cap.stop()