在Python的cv4.7.0版本以后,有一些针对aruco库的修改,这会导致你无法使用之前的代码。
修改的地方如下,这里原来的代码参考了源代码,这是一个针对使用realsense对环境中aruco码的位姿检测的代码示例。其中主要是estimatePoseSingleMarkers需要重写。
import pyrealsense2 as rs
import numpy as np
import cv2
# 提示没有aruco的看问题汇总
import cv2.aruco as aruco
# 配置摄像头与开启pipeline
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
profile = pipeline.start(config)
align_to = rs.stream.color
align = rs.align(align_to)
# 获取对齐的rgb和深度图
def get_aligned_images():
frames = pipeline.wait_for_frames()
aligned_frames = align.process(frames)
aligned_depth_frame = aligned_frames.get_depth_frame()
color_frame = aligned_frames.get_color_frame()
# 获取intelrealsense参数
intr = color_frame.profile.as_video_stream_profile().intrinsics
# 内参矩阵,转ndarray方便后续opencv直接使用
intr_matrix = np.array([
[intr.fx, 0, intr.ppx], [0, intr.fy, intr.ppy], [0, 0, 1]
])
# 深度图-16位
depth_image = np.asanyarray(aligned_depth_frame.get_data())
# 深度图-8位
depth_image_8bit = cv2.convertScaleAbs(depth_image, alpha=0.03)
pos = np.where(depth_image_8bit == 0)
depth_image_8bit[pos] = 255
# rgb图
color_image = np.asanyarray(color_frame.get_data())
# return: rgb图,深度图,相机内参,相机畸变系数(intr.coeffs)
return color_image, depth_image, intr_matrix, np.array(intr.coeffs)
def my_estimatePoseSingleMarkers(corners, marker_size, mtx, distortion):
'''
This will estimate the rvec and tvec for each of the marker corners detected by:
corners, ids, rejectedImgPoints = detector.detectMarkers(image)
corners - is an array of detected corners for each detected marker in the image
marker_size - is the size of the detected markers
mtx - is the camera matrix
distortion - is the camera distortion matrix
RETURN list of rvecs, tvecs, and trash (so that it corresponds to the old estimatePoseSingleMarkers())
'''
marker_points = np.array([[-marker_size / 2, marker_size / 2, 0],
[marker_size / 2, marker_size / 2, 0],
[marker_size / 2, -marker_size / 2, 0],
[-marker_size / 2, -marker_size / 2, 0]], dtype=np.float32)
trash = []
rvecs = []
tvecs = []
for c in corners:
nada, R, t = cv2.solvePnP(marker_points, c, mtx, distortion, False, cv2.SOLVEPNP_IPPE_SQUARE)
rvecs.append(R)
tvecs.append(t)
trash.append(nada)
return rvecs, tvecs, trash
if __name__ == "__main__":
n = 0
# 获取dictionary, 4x4的码,指示位50个
aruco_dict = aruco.getPredefinedDictionary(aruco.DICT_ARUCO_ORIGINAL)
# 创建detector parameters
parameters = aruco.DetectorParameters()
detector = aruco.ArucoDetector(aruco_dict, parameters)
while 1:
rgb, depth, intr_matrix, intr_coeffs = get_aligned_images()
# 输入rgb图, aruco的dictionary, 相机内参, 相机的畸变参数
corners, ids, rejected_img_points = detector.detectMarkers(rgb)
# 估计出aruco码的位姿,0.045对应markerLength参数,单位是meter
# rvec是旋转向量, tvec是平移向量
# rvec, tvec, markerPoints = detector.estimatePoseSingleMarkers(corners, 0.045, intr_matrix, intr_coeffs)
rvec, tvec, markerPoints = my_estimatePoseSingleMarkers(corners, 0.04, intr_matrix, intr_coeffs)
try:
# 在图片上标出aruco码的位置
aruco.drawDetectedMarkers(rgb, corners)
# 根据aruco码的位姿标注出对应的xyz轴, 0.05对应length参数,代表xyz轴画出来的长度
# aruco.drawFrameAxes(rgb, intr_matrix, intr_coeffs, rvec, tvec, 0.5)
for r,t in zip(rvec,tvec):
cv2.drawFrameAxes(rgb, intr_matrix, intr_coeffs, r, t, 0.05)
except:pass
cv2.imshow('RGB image', rgb)
key = cv2.waitKey(1)
# 按键盘q退出程序
if key & 0xFF == ord('q') or key == 27:
pipeline.stop()
break
# 按键盘s保存图片
elif key == ord('s'):
n = n + 1
# 保存rgb图
cv2.imwrite('./img/rgb' + str(n) + '.jpg', rgb)
cv2.destroyAllWindows()
效果如下: