1.激光雷达与相机的融合标定(附python代码)

注:该文首先介绍了相机成像原理、三维坐标转二维坐标原理、最后分析不用标定板与相机内参的合理性,最终分析直接用外参矩阵即可求出转换关系的合理性(当然,直接求解时,要对二维和三维坐标做稍微的简单变化,这个会分析原因)。最后附加代码,该代码是本人用过后合理才写入的,具有合理性,标定需要的场景:只需要棱角分明并且雷达能反射的物体即可,比如人、金属架等。

一、相机成像原理

二、雷达的三维坐标转换为相机的二维坐标原理

三、两个矩阵的融合标定

四、代码

1.打开相机,鼠标获取相机帧的坐标代码如下:

# coding:utf-8
import cv2


def on_EVENT_LBUTTONDOWN(event, x, y, flags, param):
    if event == cv2.EVENT_LBUTTONDOWN:
        xy = "%d,%d" % (x, y)
        a.append(x)
        b.append(y)
        cv2.circle(frame, (x, y), 1, (0, 0, 255), thickness=-1)
        cv2.putText(frame, xy, (x, y), cv2.FONT_HERSHEY_PLAIN,
                    1.0, (0, 0, 0), thickness=1)
        cv2.imshow("image", frame)
        print(x, y)
a = []
b = []

 

cap = cv2.VideoCapture("/dev/video61")

while (cap.isOpened()):

    ret, frame = cap.read()

    frame = cv2.rotate(frame, 0, dst=None)  # 瑙嗛鏄€掔潃鐨勶紝瑕佸瑙嗛杩涜涓ゆ90搴︾殑缈昏浆
    frame = cv2.rotate(frame, 0, dst=None)  # 瑙嗛鏄€掔潃鐨勶紝瑕佸瑙嗛杩涜涓ゆ90搴︾殑缈昏浆

    #cv2.imshow("src_image", frame)

    #cv2.waitKey(1)
    
    
    cv2.namedWindow("image")
    cv2.setMouseCallback("image", on_EVENT_LBUTTONDOWN)
    cv2.imshow("image", frame)
    cv2.waitKey(0)
    print(a[0], b[0])

2.求转换矩阵的代码如下:

import numpy as np

#listdir:表示雷达和相机的坐标,坐标输入格式在以下有,需要根据实际情况修改
#image_size:图片的shape,格式在以下有,需要根据实际情况修改
def input_camera_lidar(listdir, image_size):
    A = []
    b = []
    for item in listdir:
        xyz = item[0]
        uv = item[1]

        x = xyz[0]
        y = xyz[1]
        z = xyz[2]
        # r=xyz[3]

        # 雷达坐标转换如下(x,y,z)=====>(z/x,y/x,1),这种转换是因为不同距离x下相同的(y,z)在像素坐标系中的坐标不同,所以必须有这样一步处理
        x0, y0 = z / x, y / x
        # 建立方程组
        # a11*x0+a12*y0+b1*1+ a21*0+a22*0+b2*0+ a31*0+a32*0+b3*0=u
        # a11*0+a12*0+b1*0+ a21*x0+a22*y0+b2*1+ a31*0+a32*0+b3*0=v
        # a11*0+a12*0+b1*0+ a21*0+a22*0+b2*0+ a31*x0+a32*y0+b3*1=1
        A1 = [[x0, y0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, x0, y0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, x0, y0, 1]]
        # u=uv[0]-image_size[0]/2
        # v=uv[1]-image_size[1]/2
        # v=[[371.33757634,0.,306.01237024],[0.,374.79184398,208.74053634],[  0.,0.,1.]]

        #转换为图像坐标系
        u = uv[0] - image_size[0] / 2
        v = uv[1] - image_size[1] / 2

        b1 = [[u], [v], [1]]
        # print("b",b1)

        A.append(A1[0])
        A.append(A1[1])
        A.append(A1[2])

        b.append(b1[0])
        b.append(b1[1])
        b.append(b1[2])
    print(A)
    print(b)

    ss = np.linalg.pinv(A).dot(b)
    # print(ss)
    B = [[ss[0][0], ss[1][0], ss[2][0]],
         [ss[3][0], ss[4][0], ss[5][0]],
         [ss[6][0], ss[7][0], ss[8][0]]
         ]

    return B

if __name__ == "__main__":
    image_size = [480, 640]

    listdir = [[[3670, 169, 129], [281, 211]],
               [[4776, -1147, 299], [386, 207]],
               [[4141, -950, -127], [380, 237]],
               [[3824, -1314, 502], [416, 172]],
               [[4153, -2357, 48.2], [500, 224]],
               [[3205, 1214, -80], [161, 233]],
               [[4778, -1148, 263], [387, 211]],
               [[3922,-1804, 531],[457,176]],
               [[3900,-122,-556],[311,276]],
               [[4054,-390,-545],[333,273]],
               [[4768,-1132,-487],[384,264]],
               ]#这个坐标根据实际测量获得

    B = input_camera_lidar(listdir, image_size)
    print(B)


3.雷达坐标转换为相机坐标的测试代码如下:

import numpy as np
import cv2

#lidar_index:雷达坐标
#B:上面求得的转换矩阵
#image_size:图片的shape
def output_lidar(lidar_index,B,image_size):

    print("...................相机坐标......................")

    b1=[[image_size[0]/2],[image_size[1]/2],[1]]
    b1=np.array(b1)
    b1=b1.reshape(3,1)

    lidar_index=np.array(lidar_index)
    B=np.array(B)

    ss = np.dot(B, lidar_index)+b1

    return ss

if __name__ == "__main__":
    # B=[[-24.500760749792903,-355.40002643416284, 58.8043861483383],
    #    [-397.06466121337525,-2.428988418743103, -94.02523143506387],
    #    [-6.217248937900877e-14, -1.4166445794216997e-13, 0.9999999999999648]]#比较精确

    #转换矩阵B
    B = [[-20.16703503751802, -358.03933850329025, 57.72595612343851],
         [-372.2097187710982, -4.475480223336756, -94.87477824692705],
         [6.439293542825908e-14, 9.825473767932635e-15, 1.0000000000000087]]

    image_size = [480, 640]
    lidar_index=[4360.5,-2733.0,-178]
    lidar_index = [lidar_index[2] / lidar_index[0], lidar_index[1] / lidar_index[0],1]
    lidar_index=np.array(lidar_index)
    lidar_index=lidar_index.reshape(3,1)

    ss=output_lidar(lidar_index,B,image_size)
    print(ss)

相机获得点云距离如下:

import numpy as np

#s2:雷达数据
#s3:相机数据

def minss(s2,s3):

    s1=s2[:,-1:]
    print("s1",s1)
    ss=s2[:,:-1]
    print(ss)
    out=(ss-s3)**2
    aves=np.average(out,axis=1)
    aves=aves.reshape((aves.shape[0],1))
    outs=np.c_[aves,s1]
    print(outs)
    print(outs[np.argsort(outs[:,0])])
    print(outs[np.argsort(outs[:, 0])][0][1])
    return aves



s1=np.array([[4,5,1],[2,3,5],[2,2,1]])
s2=np.array([5,5,3])
s3=np.array([2,3])

aves=minss(s1,s3)

  • 0
    点赞
  • 20
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值