雷达和RGB相机对齐的核心代码crop

代码来源:

https://github.com/agarwa65/lidar_camera_calibration

最核心的只是把 校准函数calibrate的核心crop,去理解整个过程,其实和 结构光相机与RGB相机对齐,没有区别

import numpy
import math
#from tf.transformations import euler_from_matrix #原函数
# axis sequences for Euler angles
_NEXT_AXIS = [1, 2, 0, 1]
# epsilon for testing whether a number is close to zero
_EPS = numpy.finfo(float).eps * 4.0
# map axes strings to/from tuples of inner axis, parity, repetition, frame
_AXES2TUPLE = {
    'sxyz': (0, 0, 0, 0), 'sxyx': (0, 0, 1, 0), 'sxzy': (0, 1, 0, 0),
    'sxzx': (0, 1, 1, 0), 'syzx': (1, 0, 0, 0), 'syzy': (1, 0, 1, 0),
    'syxz': (1, 1, 0, 0), 'syxy': (1, 1, 1, 0), 'szxy': (2, 0, 0, 0),
    'szxz': (2, 0, 1, 0), 'szyx': (2, 1, 0, 0), 'szyz': (2, 1, 1, 0),
    'rzyx': (0, 0, 0, 1), 'rxyx': (0, 0, 1, 1), 'ryzx': (0, 1, 0, 1),
    'rxzx': (0, 1, 1, 1), 'rxzy': (1, 0, 0, 1), 'ryzy': (1, 0, 1, 1),
    'rzxy': (1, 1, 0, 1), 'ryxy': (1, 1, 1, 1), 'ryxz': (2, 0, 0, 1),
    'rzxz': (2, 0, 1, 1), 'rxyz': (2, 1, 0, 1), 'rzyz': (2, 1, 1, 1)}
_TUPLE2AXES = dict((v, k) for k, v in _AXES2TUPLE.items())

def euler_from_matrix(matrix, axes='sxyz'):
    """Return Euler angles from rotation matrix for specified axis sequence.

    axes : One of 24 axis sequences as string or encoded tuple

    Note that many Euler angle triplets can describe one matrix.

    >>> R0 = euler_matrix(1, 2, 3, 'syxz')
    >>> al, be, ga = euler_from_matrix(R0, 'syxz')
    >>> R1 = euler_matrix(al, be, ga, 'syxz')
    >>> numpy.allclose(R0, R1)
    True
    >>> angles = (4.0*math.pi) * (numpy.random.random(3) - 0.5)
    >>> for axes in _AXES2TUPLE.keys():
    ...    R0 = euler_matrix(axes=axes, *angles)
    ...    R1 = euler_matrix(axes=axes, *euler_from_matrix(R0, axes))
    ...    if not numpy.allclose(R0, R1): print axes, "failed"

    """
    try:
        firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
    except (AttributeError, KeyError):
        _ = _TUPLE2AXES[axes]
        firstaxis, parity, repetition, frame = axes

    i = firstaxis
    j = _NEXT_AXIS[i+parity]
    k = _NEXT_AXIS[i-parity+1]

    M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:3, :3]
    if repetition:
        sy = math.sqrt(M[i, j]*M[i, j] + M[i, k]*M[i, k])
        if sy > _EPS:
            ax = math.atan2( M[i, j],  M[i, k])
            ay = math.atan2( sy,       M[i, i])
            az = math.atan2( M[j, i], -M[k, i])
        else:
            ax = math.atan2(-M[j, k],  M[j, j])
            ay = math.atan2( sy,       M[i, i])
            az = 0.0
    else:
        cy = math.sqrt(M[i, i]*M[i, i] + M[j, i]*M[j, i])
        if cy > _EPS:
            ax = math.atan2( M[k, j],  M[k, k])
            ay = math.atan2(-M[k, i],  cy)
            az = math.atan2( M[j, i],  M[i, i])
        else:
            ax = math.atan2(-M[j, k],  M[j, j])
            ay = math.atan2(-M[k, i],  cy)
            az = 0.0

    if parity:
        ax, ay, az = -ax, -ay, -az
    if frame:
        ax, az = az, ax
    return ax, ay, az

# ----------------------------分界线----------------------------
# ----------------------------分界线----------------------------
# ----------------------------分界线----------------------------
# ----------------------------分界线----------------------------

import cv2
import numpy as np
import os
import sys

import matplotlib.cm
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D

from sensor_msgs.msg import Image, CameraInfo, PointCloud2
import image_geometry
CAMERA_MODEL = image_geometry.PinholeCameraModel()
camera_info = CameraInfo()
camera_info.height = 724
camera_info.width = 964
camera_info.K = [481.228482, 0.000000, 456.782531, \
                0.000000, 481.158298, 364.412635, \
                0.000000, 0.000000, 1.000000]

camera_info.distortion_model = 'plumb_bob'
camera_info.D = [-0.195875, 0.065588, 0.003400, 0.000218, 0.000000]

camera_info.R = [1.000000, 0.000000, 0.000000, \
                0.000000, 1.000000, 0.000000, \
                0.000000, 0.000000, 1.000000]

camera_info.P = [422.024750, 0.000000, 460.332694, \
                0.000000, 0.000000, 429.271149, \
                368.531509, 0.000000, 0.000000, \
                0.000000, 1.000000, 0.000000]

CAMERA_MODEL.fromCameraInfo(camera_info)

# Rectify image
# CAMERA_MODEL.rectifyImage(img, img)

# ----------------------------分界线----------------------------
# ----------------------------分界线----------------------------
# ----------------------------分界线----------------------------
# ----------------------------分界线----------------------------
# 校准过程calibrate

def calibrate(points2D=None, points3D=None):
    '''
    Calibrate the 3D and image points using OpenCV PnP RANSAC
    Requires minimum 5 point correspondences
    相关性 or 

    Inputs:
        points2D - [numpy array] - (N, 2) array of image points
        points3D - [numpy array] - (N, 3) array of 3D points
    '''

    # Check points shape
    assert(points2D.shape[0] == points3D.shape[0])
    if not (points2D.shape[0] >= 5):
        print('PnP RANSAC Requires minimum 5 points')
        return

    # Obtain camera matrix and distortion coefficients
    camera_matrix = CAMERA_MODEL.intrinsicMatrix()
    dist_coeffs = CAMERA_MODEL.distortionCoeffs()


    # Estimate extrinsics
    success, rotation_vector, translation_vector, inliers = cv2.solvePnPRansac(points3D, 
        points2D, camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE)


    # Compute re-projection error.
    points2D_reproj = cv2.projectPoints(points3D, rotation_vector,
        translation_vector, camera_matrix, dist_coeffs)[0].squeeze(1)
    assert(points2D_reproj.shape == points2D.shape)
    error = (points2D_reproj - points2D)[inliers]  # Compute error only over inliers.
    rmse = np.sqrt(np.mean(error[:, 0] ** 2 + error[:, 1] ** 2))
    print('Re-projection error before LM refinement (RMSE) in px: ' + str(rmse))

    # Refine estimate using LM
    if not success:
        print('Initial estimation unsuccessful, skipping refinement')
    elif not hasattr(cv2, 'solvePnPRefineLM'):
        print('solvePnPRefineLM requires OpenCV >= 4.1.1, skipping refinement')
    else:
        assert len(inliers) >= 3, 'LM refinement requires at least 3 inlier points'
        rotation_vector, translation_vector = cv2.solvePnPRefineLM(points3D[inliers],
            points2D[inliers], camera_matrix, dist_coeffs, rotation_vector, translation_vector)

        # Compute re-projection error.
        points2D_reproj = cv2.projectPoints(points3D, rotation_vector,
            translation_vector, camera_matrix, dist_coeffs)[0].squeeze(1)
        assert(points2D_reproj.shape == points2D.shape)
        error = (points2D_reproj - points2D)[inliers]  # Compute error only over inliers.
        rmse = np.sqrt(np.mean(error[:, 0] ** 2 + error[:, 1] ** 2))
        print('Re-projection error after LM refinement (RMSE) in px: ' + str(rmse))
    
    
    # Convert rotation vector
    rotation_matrix = cv2.Rodrigues(rotation_vector)[0]
    euler = euler_from_matrix(rotation_matrix)

    # Save extrinsics
    np.savez(os.path.join("./result", 'extrinsics.npz'),
        euler=euler, R=rotation_matrix, T=translation_vector.T)

    # Display results
    print('Euler angles (RPY):', euler)
    print('Rotation Matrix:', rotation_matrix)
    print('Translation Offsets:', translation_vector.T)


# ----------------------------分界线----------------------------
# ----------------------------分界线----------------------------
# ----------------------------分界线----------------------------
# ----------------------------分界线----------------------------
# 这个是将雷达3D点通过外参矩阵旋转到标定相机的2D坐标系下投影后形成2D图像

def project_point_cloud(points3D, img, config):
    '''
    Projects the point cloud on to the image plane using the extrinsics

    points3D - [numpy array] - (N, 3) array of 3D points
    img - [numpy array] - (H, W, 3) array of 2D image
    config 内部有外参,即[RT]
    '''

    rt_points3D = np.dot(points3D, config.T)
    rt_points3D = np.asarray(rt_points3D.tolist())

    # Group all beams together and pick the first 4 columns for X, Y, Z, intensity.
    OUSTER_LIDAR = False
    if OUSTER_LIDAR: 
        points3D = points3D.reshape(-1, 9)[:, :4]
    
    # Filter points in front of camera
    inrange = np.where((points3D[:, 2] > 0) &
                       (points3D[:, 2] < 6) &
                       (np.abs(points3D[:, 0]) < 6) &
                       (np.abs(points3D[:, 1]) < 6))
    max_intensity = np.max(points3D[:, -1])
    points3D = points3D[inrange[0]]

    # Color map for the points
    cmap = matplotlib.cm.get_cmap('jet')
    colors = cmap(points3D[:, -1] / max_intensity) * 255

    # Project to 2D and filter points within image boundaries
    points2D = [ CAMERA_MODEL.project3dToPixel(point) for point in points3D[:, :3] ]
    points2D = np.asarray(points2D)
    inrange = np.where((points2D[:, 0] >= 0) &
                       (points2D[:, 1] >= 0) &
                       (points2D[:, 0] < img.shape[1]) &
                       (points2D[:, 1] < img.shape[0]))
    points2D = points2D[inrange[0]].round().astype('int')

    # Draw the projected 2D points
    for i in range(len(points2D)):
        cv2.circle(img, tuple(points2D[i]), 2, tuple(colors[i]), -1)

激光雷达点云数据与RGB图像的融合可以提高环境感知和目标识别的能力。激光雷达通过发射激光束对周围环境进行扫描,获取点云数据,可以提供高精度的三维空间信息。而RGB图像则提供了丰富的颜色信息,可以用于识别物体的视觉特征。 通过将激光雷达点云数据与RGB图像进行融合,可以实现对环境的全面感知。首先,通过将两者进行对齐,可以获取每个点在图像上的像素位置,从而实现对点云数据进行可视化。这样可以直观地展示出点云所代表的物体和环境的形状和结构,并结合颜色信息进行更准确的目标识别。 其次,通过将两者的信息进行融合,可以提高目标检测和跟踪的准确性。激光雷达提供了准确的3D空间信息,可以检测到障碍物的位置和距离,而RGB图像则提供了目标的视觉外观特征,可以识别物体的种类和属性。通过融合两者的信息,可以更准确地确定目标的形状、位置和姿态,提高目标检测和跟踪的效果。 此外,激光雷达点云数据与RGB图像的融合还可以用于实现自动驾驶、机器人导航和虚拟现实等应用。通过融合两者的信息,可以实现对周围环境的模型重建和场景理解,从而实现精确的定位和导航。在虚拟现实领域,通过将激光雷达点云和RGB图像融合,可以实现更真实、更逼真的虚拟场景。 总之,激光雷达点云数据与RGB图像的融合可以提高环境感知和目标识别的能力,实现精确的定位和导航,并应用于自动驾驶、机器人导航和虚拟现实等多个领域。
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值