手眼标定(python、realsense、jaka);四元素、欧拉角、旋转矢量到旋转矩阵转换

一、相机到机械臂末端位姿自动标定

机械臂和realsense初始化:

import jkrc
import math
import time
import pyrealsense2 as rs
import numpy as np
import cv2
# 提示没有aruco的看问题汇总
import cv2.aruco as aruco
import transforms3d as tfs
#JAKA初始化
robot = jkrc.RC("192.168.0.104")#机械臂ip地址
#登录
robot.login()

# realsense初始化,配置摄像头与开启pipeline
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 848, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 848, 480, rs.format.bgr8, 30)
profile = pipeline.start(config)
align_to = rs.stream.color
align = rs.align(align_to)

机械臂末端到基地位姿,也可示教器读取:

#获取jaka的末端位姿,xyz;弧度制rxryrz
def get_jaka_gripper():
    tcp_pos = robot.get_tcp_position()
    return  tcp_pos[1]

获取realsense的相机内参、畸变参数等:

# 获取对齐的rgb和深度图
def get_aligned_images():
    frames = pipeline.wait_for_frames()
    aligned_frames = align.process(frames)
    aligned_depth_frame = aligned_frames.get_depth_frame()
    color_frame = aligned_frames.get_color_frame()
    # 获取intelrealsense参数
    intr = color_frame.profile.as_video_stream_profile().intrinsics
    # 内参矩阵,转ndarray方便后续opencv直接使用
    intr_matrix = np.array([
        [intr.fx, 0, intr.ppx], [0, intr.fy, intr.ppy], [0, 0, 1]
    ])
    # 深度图-16位
    depth_image = np.asanyarray(aligned_depth_frame.get_data())
    # 深度图-8位
    depth_image_8bit = cv2.convertScaleAbs(depth_image, alpha=0.03)
    pos = np.where(depth_image_8bit == 0)
    depth_image_8bit[pos] = 255
    # rgb图
    color_image = np.asanyarray(color_frame.get_data())
    # return: rgb图,深度图,相机内参,相机畸变系数(intr.coeffs)
    return color_image, depth_image, intr_matrix, np.array(intr.coeffs)

获取相机-标定板位姿:

#获取标定板位姿
def get_realsense_mark(intr_matrix, intr_coeffs):
    # 获取dictionary, 4x4的码,指示位50个
    aruco_dict = aruco.Dictionary_get(aruco.DICT_ARUCO_ORIGINAL)
    # 创建detector parameters
    parameters = aruco.DetectorParameters_create()
    # 输入rgb图, aruco的dictionary, 相机内参, 相机的畸变参数
    corners, ids, rejected_img_points = aruco.detectMarkers(rgb, aruco_dict, parameters=parameters,
                                                            cameraMatrix=intr_matrix, distCoeff=intr_coeffs)
    # 估计出aruco码的位姿,0.045对应markerLength参数,单位是meter
    # rvec是旋转向量, tvec是平移向量
    rvec, tvec, markerPoints = aruco.estimatePoseSingleMarkers(corners, 0.14, intr_matrix, intr_coeffs)
    return list(np.reshape(tvec,3))+list(np.reshape(rvec,3))

完整代码,执行脚本后,按 r 记录两种位姿(可以多次记录),按 c 计算位姿;因为随便写的,没有处理相机中没有出现标定板的情况,可以自行增加:

import jkrc
import math
import time
import pyrealsense2 as rs
import numpy as np
import cv2
# 提示没有aruco的看问题汇总
import cv2.aruco as aruco
import transforms3d as tfs
#JAKA初始化
robot = jkrc.RC("192.168.0.104") #机械臂ip地址
#登录
robot.login()

# realsense初始化,配置摄像头与开启pipeline
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 848, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 848, 480, rs.format.bgr8, 30)
profile = pipeline.start(config)
align_to = rs.stream.color
align = rs.align(align_to)

#获取jaka的末端位姿,xyz;弧度制rxryrz
def get_jaka_gripper():
    tcp_pos = robot.get_tcp_position()
    return  tcp_pos[1]
# 获取对齐的rgb和深度图
def get_aligned_images():
    frames = pipeline.wait_for_frames()
    aligned_frames = align.process(frames)
    aligned_depth_frame = aligned_frames.get_depth_frame()
    color_frame = aligned_frames.get_color_frame()
    # 获取intelrealsense参数
    intr = color_frame.profile.as_video_stream_profile().intrinsics
    # 内参矩阵,转ndarray方便后续opencv直接使用
    intr_matrix = np.array([
        [intr.fx, 0, intr.ppx], [0, intr.fy, intr.ppy], [0, 0, 1]
    ])
    # 深度图-16位
    depth_image = np.asanyarray(aligned_depth_frame.get_data())
    # 深度图-8位
    depth_image_8bit = cv2.convertScaleAbs(depth_image, alpha=0.03)
    pos = np.where(depth_image_8bit == 0)
    depth_image_8bit[pos] = 255
    # rgb图
    color_image = np.asanyarray(color_frame.get_data())
    # return: rgb图,深度图,相机内参,相机畸变系数(intr.coeffs)
    return color_image, depth_image, intr_matrix, np.array(intr.coeffs)

#获取标定板位姿
def get_realsense_mark(intr_matrix, intr_coeffs):
    # 获取dictionary, 4x4的码,指示位50个
    aruco_dict = aruco.Dictionary_get(aruco.DICT_ARUCO_ORIGINAL)
    # 创建detector parameters
    parameters = aruco.DetectorParameters_create()
    # 输入rgb图, aruco的dictionary, 相机内参, 相机的畸变参数
    corners, ids, rejected_img_points = aruco.detectMarkers(rgb, aruco_dict, parameters=parameters,
                                                            cameraMatrix=intr_matrix, distCoeff=intr_coeffs)
    # 估计出aruco码的位姿,0.045对应markerLength参数,单位是meter
    # rvec是旋转向量, tvec是平移向量
    rvec, tvec, markerPoints = aruco.estimatePoseSingleMarkers(corners, 0.14, intr_matrix, intr_coeffs)
    return list(np.reshape(tvec,3))+list(np.reshape(rvec,3))
if __name__ == "__main__":
    hands =[]
    cameras = []
    while True:
        rgb, depth, intr_matrix, intr_coeffs = get_aligned_images()
        cv2.imshow('RGB image', rgb)
        key = cv2.waitKey(1)
        if key & 0xFF == ord('q') or key == 27:
            pipeline.stop()
            break
        # 按键盘r记录g-b,m-c位姿
        elif key == ord('r'):
            hands.append(get_jaka_gripper())
            cameras.append(get_realsense_mark(intr_matrix,intr_coeffs))
            print("record_ok")
        elif key ==ord('c'):
            R_Hgs, R_Hcs = [], []
            T_Hgs, T_Hcs = [], []
            for camera in cameras:
                #m-c的旋转矩阵和位移矩阵
                c = camera[3:6]
                # R_Hcs.append(tfs.quaternions.quat2mat((q[3], q[0], q[1], q[2]))) #四元素转旋转矩阵;相机读出x,y,z,w 使用该方法
                camera_mat,j = cv2.Rodrigues((c[0],c[1],c[2])) #旋转矢量到旋转矩阵
                R_Hcs.append(camera_mat)
                T_Hcs.append(np.array(camera[0:3])*1000) #统一单位   *1000
            for hand in hands:
                # g-b的旋转矩阵和位移矩阵
                g = hand[3:6]
                #R_Hgs.append(tfs.euler.euler2mat(math.radians(g[0])... 'sxyz'))#如果读出角度,转弧度再计算
                R_Hgs.append(tfs.euler.euler2mat(g[0], g[1], g[2], 'sxyz'))#欧拉角到旋转矩阵;
                T_Hgs.append(np.array(hand[0:3]))
            print("R_Hcs:",R_Hcs)
            print("T_Hcs:",T_Hcs)
            print("R_Hgs:",R_Hgs)
            print("T_Hgs:",T_Hgs)
            #计算c-g
            R_cam2gripper, t_cam2gripper = cv2.calibrateHandEye(R_Hgs, T_Hgs, R_Hcs, T_Hcs,
                                                               method=cv2.CALIB_HAND_EYE_TSAI)
            RT_c2g = tfs.affines.compose(np.squeeze(t_cam2gripper), R_cam2gripper, [1, 1, 1])
            print("RT_c2g:",RT_c2g)

            #根据计算 RT_c2g 推算出之前记录数据的机械臂末端相对基地移动矩阵
            final_pose = []
            for i in range(len(R_Hgs)):
                RT_g2b = tfs.affines.compose(np.squeeze(T_Hgs[i]), R_Hgs[i], [1, 1, 1])
                temp = np.dot(RT_g2b, RT_c2g)
                RT_t2c = tfs.affines.compose(np.squeeze(T_Hcs[i]), R_Hcs[i], [1, 1, 1])
                temp = np.dot(temp, RT_t2c)
                tr = temp[0:3, 3:4].T[0]
                rot = tfs.euler.mat2euler(temp[0:3, 0:3])
                final_pose.append([tr[0], tr[1], tr[2], math.degrees(rot[0]), rot[1], rot[2]])
            final_pose = np.array(final_pose)
            print('final_pose\n', final_pose)
            break
    cv2.destroyAllWindows()

二、补充:
摄像头:
1、标定板生成地址:https://chev.me/arucogen/;(跟换标定板后、修改获取相机-标定板位姿处代码)
2、如果采用普通摄像头,首先通过棋盘格标定出相机内参和外参;
3、也可调用opencv的pnp直接计算目标物体的相对位姿(需要参数,目标物体的四个角点、边长【目前测试是正方形,不规则的一般内切或外切建立一个正方形计算】、相机参数)
机械臂:
1、可以直接调用示教器参数,一般rx、ry、rz位角度值,注意转换
2、直接调用SDK,获取末端位姿

  • 9
    点赞
  • 59
    收藏
    觉得还不错? 一键收藏
  • 12
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 12
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值