创建点云数据中的前视图
前视图投影
为了将激光雷达传感器的前视图平面化为二维图像,必须将三维空间中的点投影到可以展开的圆柱形表面上,以将其平面化。下面公式见论文Vehicle Detection from 3D Lidar Using Fully Convolutional Network
# h_res = 激光雷达的水平分辨率
# v_res = 激光雷达的垂直分辨率
x_img = arctan2(y_lidar, x_lidar)/ h_res
y_img = np.arctan2(z_lidar, np.sqrt(x_lidar**2 + y_lidar**2))/ v_res
问题是这样做的方式将图像的方向直接放在汽车的右侧。 将方向定位在汽车的后部是更有意义的,因此前面和侧面的更重要的区域是不中断的。 使这些重要区域不间断将使卷积神经网络更容易识别这些重要区域中的整个对象。 以下代码修复了这个问题。
x_img = np.arctan2(-y_lidar, x_lidar)/ h_res # seam in the back
y_img = np.arctan2(z_lidar, np.sqrt(x_lidar**2 + y_lidar**2))/ v_res
沿着每个轴配置刻度
h_res 和 v_res 非常依赖于所使用的雷达传感器。在KITTI数据集中,使用的传感器是Velodyne HDL 64E。它具有以下重要特性:
1、垂直视角为26.9度,分辨率为0.4度。垂直视野被分成传感器上方+2度和传感器下方-24.9度。
2、360度的水平视野,分辨率为0.08-0.35(取决于旋转速度)
3、旋转速度可以选择在5-20Hz之间。
# 激光雷达传感器的分辨率和视场
h_res = 0.35 # 水平分辨率,假设使用20HZ的旋转速率
v_res = 0.4 # 垂直分辨率
v_fov = (-24.9, 2.0) # 沿着垂直轴的视野范围(-ve, +ve)
v_fov_total = -v_fov[0] + v_fov[1]
# 转换为弧度
v_res_rad = v_res * (np.pi/180)
h_res_rad = h_res * (np.pi/180)
# 投影到图像坐标
x_img = np.arctan2(-y_lidar, x_lidar)/ h_res_rad
y_img = np.arctan2(z_lidar, d_lidar)/ v_res_rad
然而,这导致大约一半的点位于负x坐标上,而大多数位于负y坐标上。为了投影到2D图像,我们需要将最小值设为(0, 0)。所以我们需要转换:
# 转换坐标使得(0,0)是最小的
x_min = -360.0/h_res/2 # 基于传感器规格的最小x值
x_img = x_img - x_min # 转换
x_max = 360.0/h_res # 转换后最大x值
y_min = v_fov[0]/v_res # 基于传感器规格的最小y值
y_img = y_img - y_min # 转换
y_max = v_fov_total/v_res # 转换后最大x值
y_max = y_max + 5 # UGLY:Fudge因素,因为基于规格表的计算似乎与
# 传感器在数据中收集的角度范围不匹配。
绘制二维图像
pixel_values = -d_lidar # 使用深度数据编码每个像素的值
cmap = "jet" # 使用彩色图
dpi = 100 # 图像分辨率
fig, ax = plt.subplots(figsize=(x_max/dpi, y_max/dpi), dpi=dpi)
ax.scatter(x_img,y_img, s=1, c=pixel_values, linewidths=0, alpha=1, cmap=cmap)
ax.set_axis_bgcolor((0, 0, 0)) # 将没有点的区域设置成黑色
ax.axis('scaled') # {equal, scaled}
ax.xaxis.set_visible(False) # 不画轴刻度线
ax.yaxis.set_visible(False) # Do not draw axis tick marks
plt.xlim([0, x_max]) # 防止在水平FOV之外绘制空白空间
plt.ylim([0, y_max]) # prevent drawing empty space outside of vertical FOV
fig.savefig("/tmp/depth.png", dpi=dpi, bbox_inches='tight', pad_inches=0.0)
完整代码
def lidar_to_2d_front_view(points,
v_res,
h_res,
v_fov,
val="depth",
cmap="jet",
saveto=None,
y_fudge=0.0
):
""" Takes points in 3D space from LIDAR data and projects them to a 2D
"front view" image, and saves that image.
Args:
points: (np array)
The numpy array containing the lidar points.
The shape should be Nx4
- Where N is the number of points, and
- each point is specified by 4 values (x, y, z, reflectance)
v_res: (float)
vertical resolution of the lidar sensor used.
h_res: (float)
horizontal resolution of the lidar sensor used.
v_fov: (tuple of two floats)
(minimum_negative_angle, max_positive_angle)
val: (str)
What value to use to encode the points that get plotted.
One of {"depth", "height", "reflectance"}
cmap: (str)
Color map to use to color code the `val` values.
NOTE: Must be a value accepted by matplotlib's scatter function
Examples: "jet", "gray"
saveto: (str or None)
If a string is provided, it saves the image as this filename.
If None, then it just shows the image.
y_fudge: (float)
A hacky fudge factor to use if the theoretical calculations of
vertical range do not match the actual data.
For a Velodyne HDL 64E, set this value to 5.
"""
# DUMMY PROOFING
assert len(v_fov) ==2, "v_fov must be list/tuple of length 2"
assert v_fov[0] <= 0, "first element in v_fov must be 0 or negative"
assert val in {"depth", "height", "reflectance"}, \
'val must be one of {"depth", "height", "reflectance"}'
x_lidar = points[:, 0]
y_lidar = points[:, 1]
z_lidar = points[:, 2]
r_lidar = points[:, 3] # Reflectance
# Distance relative to origin when looked from top
d_lidar = np.sqrt(x_lidar ** 2 + y_lidar ** 2)
# Absolute distance relative to origin
# d_lidar = np.sqrt(x_lidar ** 2 + y_lidar ** 2, z_lidar ** 2)
v_fov_total = -v_fov[0] + v_fov[1]
# Convert to Radians
v_res_rad = v_res * (np.pi/180)
h_res_rad = h_res * (np.pi/180)
# PROJECT INTO IMAGE COORDINATES
x_img = np.arctan2(-y_lidar, x_lidar)/ h_res_rad
y_img = np.arctan2(z_lidar, d_lidar)/ v_res_rad
# SHIFT COORDINATES TO MAKE 0,0 THE MINIMUM
x_min = -360.0 / h_res / 2 # Theoretical min x value based on sensor specs
x_img -= x_min # Shift
x_max = 360.0 / h_res # Theoretical max x value after shifting
y_min = v_fov[0] / v_res # theoretical min y value based on sensor specs
y_img -= y_min # Shift
y_max = v_fov_total / v_res # Theoretical max x value after shifting
y_max += y_fudge # Fudge factor if the calculations based on
# spec sheet do not match the range of
# angles collected by in the data.
# WHAT DATA TO USE TO ENCODE THE VALUE FOR EACH PIXEL
if val == "reflectance":
pixel_values = r_lidar
elif val == "height":
pixel_values = z_lidar
else:
pixel_values = -d_lidar
# PLOT THE IMAGE
cmap = "jet" # Color map to use
dpi = 100 # Image resolution
fig, ax = plt.subplots(figsize=(x_max/dpi, y_max/dpi), dpi=dpi)
ax.scatter(x_img,y_img, s=1, c=pixel_values, linewidths=0, alpha=1, cmap=cmap)
ax.set_axis_bgcolor((0, 0, 0)) # Set regions with no points to black
ax.axis('scaled') # {equal, scaled}
ax.xaxis.set_visible(False) # Do not draw axis tick marks
ax.yaxis.set_visible(False) # Do not draw axis tick marks
plt.xlim([0, x_max]) # prevent drawing empty space outside of horizontal FOV
plt.ylim([0, y_max]) # prevent drawing empty space outside of vertical FOV
if saveto is not None:
fig.savefig(saveto, dpi=dpi, bbox_inches='tight', pad_inches=0.0)
else:
fig.show()
使用示例
import matplotlib.pyplot as plt
import numpy as np
lidar = np.loadtxt('0000000000.txt')
HRES = 0.35 # horizontal resolution (assuming 20Hz setting)
VRES = 0.4 # vertical res
VFOV = (-24.9, 2.0) # Field of view (-ve, +ve) along vertical axis
Y_FUDGE = 5 # y fudge factor for velodyne HDL 64E
lidar_to_2d_front_view(lidar, v_res=VRES, h_res=HRES, v_fov=VFOV, val="depth",
saveto="D:\\天翼云盘同步盘\\project\\PythonProject\\pointcloud\\tmp/lidar_depth.png", y_fudge=Y_FUDGE)
lidar_to_2d_front_view(lidar, v_res=VRES, h_res=HRES, v_fov=VFOV, val="height",
saveto="D:\\天翼云盘同步盘\\project\\PythonProject\\pointcloud\\tmp/lidar_height.png", y_fudge=Y_FUDGE)
lidar_to_2d_front_view(lidar, v_res=VRES, h_res=HRES, v_fov=VFOV,
val="reflectance", saveto="D:\\天翼云盘同步盘\\project\\PythonProject\\pointcloud\\tmp/lidar_reflectance.png",
y_fudge=Y_FUDGE)
使用numpy创建
def scale_to_255(a, min, max, dtype=np.uint8):
""" Scales an array of values from specified min, max range to 0-255
Optionally specify the data type of the output (default is uint8)
"""
return (((a - min) / float(max - min)) * 255).astype(dtype)
# ==============================================================================
# POINT_CLOUD_TO_PANORAMA
# ==============================================================================
def point_cloud_to_panorama(points,
v_res=0.42,
h_res = 0.35,
v_fov = (-24.9, 2.0),
d_range = (0,100),
y_fudge=3
):
""" Takes point cloud data as input and creates a 360 degree panoramic
image, returned as a numpy array.
Args:
points: (np array)
The numpy array containing the point cloud. .
The shape should be at least Nx3 (allowing for more columns)
- Where N is the number of points, and
- each point is specified by at least 3 values (x, y, z)
v_res: (float)
vertical angular resolution in degrees. This will influence the
height of the output image.
h_res: (float)
horizontal angular resolution in degrees. This will influence
the width of the output image.
v_fov: (tuple of two floats)
Field of view in degrees (-min_negative_angle, max_positive_angle)
d_range: (tuple of two floats) (default = (0,100))
Used for clipping distance values to be within a min and max range.
y_fudge: (float)
A hacky fudge factor to use if the theoretical calculations of
vertical image height do not match the actual data.
Returns:
A numpy array representing a 360 degree panoramic image of the point
cloud.
"""
# Projecting to 2D
x_points = points[:, 0]
y_points = points[:, 1]
z_points = points[:, 2]
r_points = points[:, 3]
d_points = np.sqrt(x_points ** 2 + y_points ** 2) # map distance relative to origin
#d_points = np.sqrt(x_points**2 + y_points**2 + z_points**2) # abs distance
# We use map distance, because otherwise it would not project onto a cylinder,
# instead, it would map onto a segment of slice of a sphere.
# RESOLUTION AND FIELD OF VIEW SETTINGS
v_fov_total = -v_fov[0] + v_fov[1]
# CONVERT TO RADIANS
v_res_rad = v_res * (np.pi / 180)
h_res_rad = h_res * (np.pi / 180)
# MAPPING TO CYLINDER
x_img = np.arctan2(y_points, x_points) / h_res_rad
y_img = -(np.arctan2(z_points, d_points) / v_res_rad)
# THEORETICAL MAX HEIGHT FOR IMAGE
d_plane = (v_fov_total/v_res) / (v_fov_total* (np.pi / 180))
h_below = d_plane * np.tan(-v_fov[0]* (np.pi / 180))
h_above = d_plane * np.tan(v_fov[1] * (np.pi / 180))
y_max = int(np.ceil(h_below+h_above + y_fudge))
# SHIFT COORDINATES TO MAKE 0,0 THE MINIMUM
x_min = -360.0 / h_res / 2
x_img = np.trunc(-x_img - x_min).astype(np.int32)
x_max = int(np.ceil(360.0 / h_res))
y_min = -((v_fov[1] / v_res) + y_fudge)
y_img = np.trunc(y_img - y_min).astype(np.int32)
# CLIP DISTANCES
d_points = np.clip(d_points, a_min=d_range[0], a_max=d_range[1])
# CONVERT TO IMAGE ARRAY
img = np.zeros([y_max + 1, x_max + 1], dtype=np.uint8)
img[y_img, x_img] = scale_to_255(d_points, min=d_range[0], max=d_range[1])
return img
使用 Velodyne HDL 64E 配置的示例
import matplotlib.pyplot as plt
import numpy as np
points = np.loadtxt('0000000000.txt')
im = point_cloud_to_panorama(points,
v_res=0.42,
h_res=0.35,
v_fov=(-24.9, 2.0),
y_fudge=3,
d_range=(0,100))
特别说明:本文为本人学习所做笔记。
具体参考:http://ronny.rest/tutorials/module/pointclouds_01/point_cloud_birdseye/