nuscenes数据集dvk开发工具修改

1、获取图片以及对应的雷达数据以及将图片标签数据转换为对应的xml文件

使用代码:

from nuscenes import NuScenes
# from python_sdk.nuscenes import NuScenes
import numpy as np
from xml.dom.minidom import Document
import cv2
import matplotlib.pyplot as plt
from PIL import Image
import matplotlib.pyplot as plt

# 这个函数用来创建yolo格式的xml文件
def makexml(name_all,box_all,xml_path,img_name,w,h,d):
    xmlBuilder = Document()
    annotation = xmlBuilder.createElement("annotation")  # 创建annotation标签
    xmlBuilder.appendChild(annotation)

    folder = xmlBuilder.createElement("folder")  # folder标签
    folderContent = xmlBuilder.createTextNode("nuscenes")
    folder.appendChild(folderContent)
    annotation.appendChild(folder)

    filename = xmlBuilder.createElement("filename")  # filename标签
    filenameContent = xmlBuilder.createTextNode(img_name)
    filename.appendChild(filenameContent)
    annotation.appendChild(filename)

    size = xmlBuilder.createElement("size")  # size标签
    width = xmlBuilder.createElement("width")  # size子标签width
    widthContent = xmlBuilder.createTextNode(str(w))
    width.appendChild(widthContent)
    size.appendChild(width)
    height = xmlBuilder.createElement("height")  # size子标签height
    heightContent = xmlBuilder.createTextNode(str(h))
    height.appendChild(heightContent)
    size.appendChild(height)
    depth = xmlBuilder.createElement("depth")  # size子标签depth
    depthContent = xmlBuilder.createTextNode(str(d))
    depth.appendChild(depthContent)
    size.appendChild(depth)
    annotation.appendChild(size)

    # 处理多个obj对象
    for i in range(len(name_all)):
        object = xmlBuilder.createElement("object")
        picname = xmlBuilder.createElement("name")
        nameContent = xmlBuilder.createTextNode(name_all[i])
        picname.appendChild(nameContent)
        object.appendChild(picname)
        pose = xmlBuilder.createElement("pose")
        poseContent = xmlBuilder.createTextNode("Unspecified")
        pose.appendChild(poseContent)
        object.appendChild(pose)
        truncated = xmlBuilder.createElement("truncated")
        truncatedContent = xmlBuilder.createTextNode("0")
        truncated.appendChild(truncatedContent)
        object.appendChild(truncated)
        difficult = xmlBuilder.createElement("difficult")
        difficultContent = xmlBuilder.createTextNode("0")
        difficult.appendChild(difficultContent)
        object.appendChild(difficult)
        bndbox = xmlBuilder.createElement("bndbox")
        xmin = xmlBuilder.createElement("xmin")
        xminContent = xmlBuilder.createTextNode(str(box_all[i][0]))
        xmin.appendChild(xminContent)
        bndbox.appendChild(xmin)
        ymin = xmlBuilder.createElement("ymin")
        yminContent = xmlBuilder.createTextNode(str(box_all[i][1]))
        ymin.appendChild(yminContent)
        bndbox.appendChild(ymin)
        xmax = xmlBuilder.createElement("xmax")
        xmaxContent = xmlBuilder.createTextNode(str(box_all[i][2]))
        xmax.appendChild(xmaxContent)
        bndbox.appendChild(xmax)
        ymax = xmlBuilder.createElement("ymax")
        ymaxContent = xmlBuilder.createTextNode(str(box_all[i][3]))
        ymax.appendChild(ymaxContent)
        bndbox.appendChild(ymax)
        object.appendChild(bndbox)
        annotation.appendChild(object)
    f = open(xml_path+img_name[:-4]+".xml", 'w')
    xmlBuilder.writexml(f, indent='\t', newl='\n', addindent='\t', encoding='utf-8')
    f.close()


#nusc = NuScenes(version='v1.0-trainval', dataroot='G:/download/nuscenes_data_1/v1.0-trainval01_blobs/', verbose=True)
nusc = NuScenes(version='v1.0-mini', dataroot='C:\\Users\\13532\\Downloads\\v1.0-mini\\v1.0-mini', verbose=True)

my_scene = nusc.scene[0:84] # 获取前84个场景,这个需要根据自己下的数据集有多少个场景进行填写
for scen in my_scene: # 遍历每个场景
    # 遍历这个场景
    sample = None
    while True:
        if sample is None:
            sample = nusc.get('sample', scen['first_sample_token']) # 获取第一帧
        if sample['next'] != '':
            sample = nusc.get('sample', sample['next']) # 进行下一帧的获取,这个包含了这一帧中的所有传感器的数据
            sample_data = nusc.get('sample_data', sample['data']['CAM_FRONT']) # 获取前面摄像头的数据
            # all_point 前面三维数据为经过坐标变换的 中间15维为雷达的其他参数 后面三维为雷达数据没有经过变换的
            all_point, x_y_z, color, img_from_radar = nusc.render_pointcloud_in_image(sample['token'], pointsensor_channel='RADAR_FRONT', dot_size=50)
            all_box_and_name ,img_path = nusc.render_sample_data(sample_data['token']) # 根据口令获取图像的所有苗匡以及苗匡的类别
            img = cv2.imread(img_from_radar)
            box = []
            name_a = []
            for item in all_box_and_name:
                x1 = 0 if int(item.box_2D[0])<0 else int(item.box_2D[0])
                x2 = 0 if int(item.box_2D[1])<0 else int(item.box_2D[1])
                x3 = 0 if int(item.box_2D[2])<0 else int(item.box_2D[2])
                x4 = 0 if int(item.box_2D[3])<0 else int(item.box_2D[3])
                max_x = img.shape[0]
                max_y = img.shape[1]
                if x4>max_x:
                    x4 = max_x
                if x3 > max_y:
                    x3 = max_y
                box.append([x1,x2,x3,x4])
                tem_name_arr = item.name.split('.')
                # 进行box_name聚合
                tem_name = ""
                # if tem_name_arr[1] == "truck" or tem_name_arr[1] == "trailer":
                #     tem_name = "truck"
                # else:
                tem_name=tem_name_arr[1] # 就是将所欲的box-name的中的第二个标注作为标签
                name_a.append(tem_name)
                # 下面这两句话为绘制框和对应的标注
                cv2.rectangle(img,(x1,x2),(x3,x4),(255,0,0),2)
                cv2.putText(img, tem_name, (x1, x2), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
            ##############################     图像绘制    ########################################
            # 绘制雷达点云数据
            for item in range(x_y_z.shape[1]):
                cv2.circle(img,(int(x_y_z[0][item]),int(x_y_z[1][item])),3,(0,0,255),4)
            cv2.namedWindow("frame", 0)
            cv2.imshow("frame",img)
            cv2.waitKey(0)
            ####################################################################################
            ###########################          数据保存       ###############################
            # h, w, d = img.shape
            # img_name = img_path.split('/')[-1]
            # # print(img_name)
            # makexml(name_a, box, "G:/nuscenes_data_convert/xml/", img_name, w, h, d) # 保存xml
            # cv2.imwrite("G:/nuscenes_data_convert/img/"+img_name,img) # 保存图片
            # # 保存雷达数据,每个文件保存一个图片的雷达数据,直接保存为np格式
            # np.save("G:/nuscenes_data_convert/radar/"+img_name[:-4],all_point)
            ##################################################################################

            # 对雷达数据进行编码
            # 首选绘制雷达散点图
            # plt.scatter(all_point[0,:],all_point[1,:],c=color)
            # plt.imshow(img)
            # plt.show()


        else:
            break

1.1dvk开发板修改的位置1:

1.1.1、nusc.render_pointcloud_in_image函数修改:

因为这个函数在原始的开发包中只会将点映射到图像中,这个通过修改这个函数,是使其返回四个参数:all_point, x_y_z, color, img_from_radar.
其中:all_point:是一个21维的数据,前面三维数据为经过坐标变换的 中间15维为雷达的其他参数 后面三维为雷达数据没有经过变换的,其中间15维数据具体含义可以参考之前写的pcd格式解析。
x_y_z:表示经过坐标变换的三维坐标
color:表示颜色强度
img_from_radar:表示这张图片对应的图片路径

将这个函数修改为下式:

    def render_pointcloud_in_image(self, sample_token: str, dot_size: int = 15, pointsensor_channel: str = 'LIDAR_TOP',
                                   camera_channel: str = 'CAM_FRONT', out_path: str = None,
                                   render_intensity: bool = False,
                                   show_lidarseg: bool = False,
                                   filter_lidarseg_labels: List = None,
                                   show_lidarseg_legend: bool = False,
                                   verbose: bool = True,
                                   lidarseg_preds_bin_path: str = None,
                                   show_panoptic: bool = False) -> None:
        self.explorer.render_pointcloud_in_image(sample_token, dot_size, pointsensor_channel=pointsensor_channel,
                                                 camera_channel=camera_channel, out_path=out_path,
                                                 render_intensity=render_intensity,
                                                 show_lidarseg=show_lidarseg,
                                                 filter_lidarseg_labels=filter_lidarseg_labels,
                                                 show_lidarseg_legend=show_lidarseg_legend,
                                                 verbose=verbose,
                                                 lidarseg_preds_bin_path=lidarseg_preds_bin_path,
                                                 show_panoptic=show_panoptic)
        return self.explorer.point_a,self.explorer.point_x_y_z,self.explorer.color,self.explorer.img

然后在将其中的self.explorer.render_pointcloud_in_image函数修改为下式:

    def render_pointcloud_in_image(self,
                                   sample_token: str,
                                   dot_size: int = 15,
                                   pointsensor_channel: str = 'LIDAR_TOP',
                                   camera_channel: str = 'CAM_FRONT',
                                   out_path: str = None,
                                   render_intensity: bool = False,
                                   show_lidarseg: bool = False,
                                   filter_lidarseg_labels: List = None,
                                   ax: Axes = None,
                                   show_lidarseg_legend: bool = False,
                                   verbose: bool = True,
                                   lidarseg_preds_bin_path: str = None,
                                   show_panoptic: bool = False):
        """
        Scatter-plots a pointcloud on top of image.
        :param sample_token: Sample token.
        :param dot_size: Scatter plot dot size.
        :param pointsensor_channel: RADAR or LIDAR channel name, e.g. 'LIDAR_TOP'.
        :param camera_channel: Camera channel name, e.g. 'CAM_FRONT'.
        :param out_path: Optional path to save the rendered figure to disk.
        :param render_intensity: Whether to render lidar intensity instead of point depth.
        :param show_lidarseg: Whether to render lidarseg labels instead of point depth.
        :param filter_lidarseg_labels: Only show lidar points which belong to the given list of classes.
        :param ax: Axes onto which to render.
        :param show_lidarseg_legend: Whether to display the legend for the lidarseg labels in the frame.
        :param verbose: Whether to display the image in a window.
        :param lidarseg_preds_bin_path: A path to the .bin file which contains the user's lidar segmentation
                                        predictions for the sample.
        :param show_panoptic: When set to True, the lidar data is colored with the panoptic labels. When set
            to False, the colors of the lidar data represent the distance from the center of the ego vehicle.
            If show_lidarseg is True, show_panoptic will be set to False.
        """
        if show_lidarseg:
            show_panoptic = False
        sample_record = self.nusc.get('sample', sample_token) # 通过口令获得这张图片

        # Here we just grab the front camera and the point sensor.
        pointsensor_token = sample_record['data'][pointsensor_channel] # 获取点云数据
        camera_token = sample_record['data'][camera_channel] # 获取摄像头数据

        points,point_all, coloring, im = self.map_pointcloud_to_image(pointsensor_token, camera_token, # 返回经过变换的坐标点(这里只有x,y,z的坐标)以及点对应的颜色,以及原始图像im
                                                            render_intensity=render_intensity,
                                                            show_lidarseg=show_lidarseg,
                                                            filter_lidarseg_labels=filter_lidarseg_labels,
                                                            lidarseg_preds_bin_path=lidarseg_preds_bin_path,
                                                            show_panoptic=show_panoptic)

1.1.1.1、依次,然后在将这个函数里面的self.map_pointcloud_to_image修改为下式:
    def map_pointcloud_to_image(self, # 将点云数据映射到图片上
                                pointsensor_token: str,
                                camera_token: str,
                                min_dist: float = 1.0,
                                render_intensity: bool = False,
                                show_lidarseg: bool = False,
                                filter_lidarseg_labels: List = None,
                                lidarseg_preds_bin_path: str = None,
                                show_panoptic: bool = False) -> Tuple:
        """
        Given a point sensor (lidar/radar) token and camera sample_data token, load pointcloud and map it to the image
        plane.
        :param pointsensor_token: Lidar/radar sample_data token.
        :param camera_token: Camera sample_data token.
        :param min_dist: Distance from the camera below which points are discarded.
        :param render_intensity: Whether to render lidar intensity instead of point depth.
        :param show_lidarseg: Whether to render lidar intensity instead of point depth.
        :param filter_lidarseg_labels: Only show lidar points which belong to the given list of classes. If None
            or the list is empty, all classes will be displayed.
        :param lidarseg_preds_bin_path: A path to the .bin file which contains the user's lidar segmentation
                                        predictions for the sample.
        :param show_panoptic: When set to True, the lidar data is colored with the panoptic labels. When set
            to False, the colors of the lidar data represent the distance from the center of the ego vehicle.
            If show_lidarseg is True, show_panoptic will be set to False.
        :return (pointcloud <np.float: 2, n)>, coloring <np.float: n>, image <Image>).
        """

        cam = self.nusc.get('sample_data', camera_token) # 根据口令获取摄像头数据
        pointsensor = self.nusc.get('sample_data', pointsensor_token)  # 根据口令获得雷达点云数据
        pcl_path = osp.join(self.nusc.dataroot, pointsensor['filename']) # 获得雷达的数据文件路径
        # print(pcl_path)
        if pointsensor['sensor_modality'] == 'lidar':
            if show_lidarseg or show_panoptic:
                gt_from = 'lidarseg' if show_lidarseg else 'panoptic'
                assert hasattr(self.nusc, gt_from), f'Error: nuScenes-{gt_from} not installed!'

                # Ensure that lidar pointcloud is from a keyframe.
                assert pointsensor['is_key_frame'], \
                    'Error: Only pointclouds which are keyframes have lidar segmentation labels. Rendering aborted.'

                assert not render_intensity, 'Error: Invalid options selected. You can only select either ' \
                                             'render_intensity or show_lidarseg, not both.'

            pc = LidarPointCloud.from_file(pcl_path)
        else:
            pc = RadarPointCloud.from_file(pcl_path) # 这里返回的是通过cls类包装的有效点云数据,这里之所以这样操作,是因为里面包含过滤状态的全局变量
        im_path_radar = osp.join(self.nusc.dataroot, cam['filename'])
        im = Image.open(osp.join(self.nusc.dataroot, cam['filename'])) # 打开2D彩色图像

        no_change_x_y_z = np.copy(pc.points)
        # Points live in the point sensor frame. So they need to be transformed via global to the image plane. # 进行坐标变换
        # First step: transform the pointcloud to the ego vehicle frame for the timestamp of the sweep. # 第一步:将点云转换为自我车辆帧,作为扫描的时间戳。
        cs_record = self.nusc.get('calibrated_sensor', pointsensor['calibrated_sensor_token']) # 点活在点传感器框架。所以它们需要通过全局变换到图像平面。
        pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix) # 用点云数据乘以旋转矩阵 这路只转换x,y,z坐标
        pc.translate(np.array(cs_record['translation'])) # 用点云数据加上平移矩阵

        # Second step: transform from ego to the global frame.
        poserecord = self.nusc.get('ego_pose', pointsensor['ego_pose_token'])
        pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix)
        pc.translate(np.array(poserecord['translation']))

        # Third step: transform from global into the ego vehicle frame for the timestamp of the image.
        poserecord = self.nusc.get('ego_pose', cam['ego_pose_token'])
        pc.translate(-np.array(poserecord['translation']))
        pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix.T)

        # Fourth step: transform from ego into the camera.
        cs_record = self.nusc.get('calibrated_sensor', cam['calibrated_sensor_token'])
        pc.translate(-np.array(cs_record['translation']))
        pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix.T)

        # Fifth step: actually take a "picture" of the point cloud.
        # Grab the depths (camera frame z axis points away from the camera).
        depths = pc.points[2, :] # 获取雷达数据的高度信息

        if render_intensity:
            assert pointsensor['sensor_modality'] == 'lidar', 'Error: Can only render intensity for lidar, ' \
                                                              'not %s!' % pointsensor['sensor_modality']
            # Retrieve the color from the intensities.
            # Performs arbitary scaling to achieve more visually pleasing results.
            intensities = pc.points[3, :]
            intensities = (intensities - np.min(intensities)) / (np.max(intensities) - np.min(intensities))
            intensities = intensities ** 0.1
            intensities = np.maximum(0, intensities - 0.5)
            coloring = intensities
        elif show_lidarseg or show_panoptic:
            assert pointsensor['sensor_modality'] == 'lidar', 'Error: Can only render lidarseg labels for lidar, ' \
                                                              'not %s!' % pointsensor['sensor_modality']

            gt_from = 'lidarseg' if show_lidarseg else 'panoptic'
            semantic_table = getattr(self.nusc, gt_from)

            if lidarseg_preds_bin_path:
                sample_token = self.nusc.get('sample_data', pointsensor_token)['sample_token']
                lidarseg_labels_filename = lidarseg_preds_bin_path
                assert os.path.exists(lidarseg_labels_filename), \
                    'Error: Unable to find {} to load the predictions for sample token {} (lidar ' \
                    'sample data token {}) from.'.format(lidarseg_labels_filename, sample_token, pointsensor_token)
            else:
                if len(semantic_table) > 0:  # Ensure {lidarseg/panoptic}.json is not empty (e.g. in case of v1.0-test).
                    lidarseg_labels_filename = osp.join(self.nusc.dataroot,
                                                        self.nusc.get(gt_from, pointsensor_token)['filename'])
                else:
                    lidarseg_labels_filename = None

            if lidarseg_labels_filename:
                # Paint each label in the pointcloud with a RGBA value.
                if show_lidarseg:
                    coloring = paint_points_label(lidarseg_labels_filename,
                                                  filter_lidarseg_labels,
                                                  self.nusc.lidarseg_name2idx_mapping,
                                                  self.nusc.colormap)
                else:
                    coloring = paint_panop_points_label(lidarseg_labels_filename,
                                                        filter_lidarseg_labels,
                                                        self.nusc.lidarseg_name2idx_mapping,
                                                        self.nusc.colormap)

            else:
                coloring = depths
                print(f'Warning: There are no lidarseg labels in {self.nusc.version}. Points will be colored according '
                      f'to distance from the ego vehicle instead.')
        else:
            # Retrieve the color from the depth.
            coloring = depths # 重深度检索颜色

        # Take the actual picture (matrix multiplication with camera-matrix + renormalization).
        points = view_points(pc.points[:3, :], np.array(cs_record['camera_intrinsic']), normalize=True) # 获取x,y,z

        # Remove points that are either outside or behind the camera. Leave a margin of 1 pixel for aesthetic reasons.
        # Also make sure points are at least 1m in front of the camera to avoid seeing the lidar points on the camera
        # casing for non-keyframes which are slightly out of sync.
        mask = np.ones(depths.shape[0], dtype=bool)
        mask = np.logical_and(mask, depths > min_dist)
        mask = np.logical_and(mask, points[0, :] > 1)
        mask = np.logical_and(mask, points[0, :] < im.size[0] - 1)
        mask = np.logical_and(mask, points[1, :] > 1)
        mask = np.logical_and(mask, points[1, :] < im.size[1] - 1)
        points = points[:, mask]
        coloring = coloring[mask]

        a = points # 【3,116】
        b = pc.points[:, mask] # 【18,116】
        no_change_x_y_z = no_change_x_y_z[:, mask]

        c = 0
        point_all = np.concatenate((a,b[3:,:] ))
        point_all = np.concatenate((point_all, no_change_x_y_z[:3, :]))
        self.point_a = point_all
        self.point_x_y_z = points
        self.color = coloring
        self.img = im_path_radar # 这里直接返回图片路径

        return points, point_all,coloring, im

1.1.1.2、然后在这个函数对应的类初始化中添加下列属性:
class NuScenesExplorer:
    """ Helper class to list and visualize NuScenes data. These are meant to serve as tutorials and templates for
    working with the data. """

    def __init__(self, nusc: NuScenes):
        self.nusc = nusc
        self.point_a = None
        self.point_x_y_z = None
        self.color = None
        self.img = None
        self.all_box_amd_name =None
        self.img_path = None
1.1.1.3、依次,修改这个函数中的pc = RadarPointCloud.from_file函数,将其修改为如下:
    def from_file(cls,
                  file_name: str,
                  invalid_states: List[int] = None,
                  dynprop_states: List[int] = None,
                  ambig_states: List[int] = None) -> 'RadarPointCloud':
        """
        Loads RADAR data from a Point Cloud Data file. See details below.
        :param file_name: The path of the pointcloud file.
        :param invalid_states: Radar states to be kept. See details below.
        :param dynprop_states: Radar states to be kept. Use [0, 2, 6] for moving objects only. See details below.
        :param ambig_states: Radar states to be kept. See details below.
        To keep all radar returns, set each state filter to range(18).
        :return: <np.float: d, n>. Point cloud matrix with d dimensions and n points.

        Example of the header fields:
        # .PCD v0.7 - Point Cloud Data file format
        VERSION 0.7
        FIELDS x y z dyn_prop id rcs vx vy vx_comp vy_comp is_quality_valid ambig_state x_rms y_rms invalid_state pdh0 vx_rms vy_rms
        SIZE 4 4 4 1 2 4 4 4 4 4 1 1 1 1 1 1 1 1
        TYPE F F F I I F F F F F I I I I I I I I
        COUNT 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
        WIDTH 125
        HEIGHT 1
        VIEWPOINT 0 0 0 1 0 0 0
        POINTS 125
        DATA binary

        Below some of the fields are explained in more detail:

        x is front, y is left

        vx, vy are the velocities in m/s.
        vx_comp, vy_comp are the velocities in m/s compensated by the ego motion.
        We recommend using the compensated velocities.

        invalid_state: state of Cluster validity state.
        (Invalid states)
        0x01	invalid due to low RCS
        0x02	invalid due to near-field artefact #由于近场人工制品无效
        0x03	invalid far range cluster because not confirmed in near range
        0x05	reserved
        0x06	invalid cluster due to high mirror probability 由于镜像概率高,无效集群
        0x07	Invalid cluster because outside sensor field of view
        0x0d	reserved
        0x0e	invalid cluster because it is a harmonics
        (Valid states)
        0x00	valid
        0x04	valid cluster with low RCS
        0x08	valid cluster with azimuth correction due to elevation
        0x09	valid cluster with high child probability
        0x0a	valid cluster with high probability of being a 50 deg artefact
        0x0b	valid cluster but no local maximum
        0x0c	valid cluster with high artefact probability
        0x0f	valid cluster with above 95m in near range
        0x10	valid cluster with high multi-target probability
        0x11	valid cluster with suspicious angle

        dynProp: Dynamic property of cluster to indicate if is moving or not. # 群集的动态属性,用于指示是否正在移动。
        0: moving
        1: stationary
        2: oncoming
        3: stationary candidate
        4: unknown
        5: crossing stationary
        6: crossing moving
        7: stopped

        ambig_state: State of Doppler (radial velocity) ambiguity solution. # 模糊状态:多普勒(径向速度)模糊解的状态。
        0: invalid
        1: ambiguous
        2: staggered ramp
        3: unambiguous
        4: stationary candidates 静止的候选人

        pdh0: False alarm probability of cluster (i.e. probability of being an artefact caused by multipath or similar).
        0: invalid 群集的虚警概率(即多路径或类似的人为造成的概率)。
        1: <25%
        2: 50%
        3: 75%
        4: 90%
        5: 99%
        6: 99.9%
        7: <=100%
        """

        assert file_name.endswith('.pcd'), 'Unsupported filetype {}'.format(file_name)

        meta = []
        with open(file_name, 'rb') as f:
            for line in f:
                line = line.strip().decode('utf-8')
                meta.append(line)
                if line.startswith('DATA'):
                    break

            data_binary = f.read()  # 进行雷达点云数据的获取

        # Get the header rows and check if they appear as expected.
        assert meta[0].startswith('#'), 'First line must be comment'
        assert meta[1].startswith('VERSION'), 'Second line must be VERSION'
        sizes = meta[3].split(' ')[1:]
        types = meta[4].split(' ')[1:]
        counts = meta[5].split(' ')[1:]
        width = int(meta[6].split(' ')[1])
        height = int(meta[7].split(' ')[1])
        data = meta[10].split(' ')[1]
        feature_count = len(types)
        assert width > 0
        assert len([c for c in counts if c != c]) == 0, 'Error: COUNT not supported!'
        assert height == 1, 'Error: height != 0 not supported!'
        assert data == 'binary'

        # Lookup table for how to decode the binaries. # 查找表,进行解码
        unpacking_lut = {'F': {2: 'e', 4: 'f', 8: 'd'},
                         'I': {1: 'b', 2: 'h', 4: 'i', 8: 'q'},
                         'U': {1: 'B', 2: 'H', 4: 'I', 8: 'Q'}}
        types_str = ''.join([unpacking_lut[t][int(s)] for t, s in zip(types, sizes)])

        # Decode each point. # 进行解码
        offset = 0
        point_count = width
        points = [] # 一共125个点,每个点18个特征
        for i in range(point_count):
            point = []
            for p in range(feature_count):
                start_p = offset
                end_p = start_p + int(sizes[p])
                assert end_p < len(data_binary)
                point_p = struct.unpack(types_str[p], data_binary[start_p:end_p])[0]
                point.append(point_p)
                offset = end_p
            points.append(point)

        # A NaN in the first point indicates an empty pointcloud.
        point = np.array(points[0]) # 第一个点中的NaN表示空点云
        if np.any(np.isnan(point)):
            return cls(np.zeros((feature_count, 0)))

        # Convert to numpy matrix.
        points = np.array(points).transpose() # 【18,125】

        # If no parameters are provided, use default settings.
        invalid_states = cls.invalid_states if invalid_states is None else invalid_states # 无效状态是0 集群有效性状态
        dynprop_states = cls.dynprop_states if dynprop_states is None else dynprop_states # 群集的动态属性,用于指示是否正在移动
        ambig_states = cls.ambig_states if ambig_states is None else ambig_states # 多普勒状态(径向速度)模糊解。

        # 不进行过滤

        # # Filter points with an invalid state. # 筛选无效状态的点
        # valid = [p in invalid_states for p in points[-4, :]]
        # points = points[:, valid] # 获得有效的点
        #
        # # Filter by dynProp.
        # valid = [p in dynprop_states for p in points[3, :]]
        # points = points[:, valid]
        #
        # # Filter by ambig_state.
        # valid = [p in ambig_states for p in points[11, :]]
        # points = points[:, valid]

        return cls(points)

1.2dvk开发板修改的位置2:

nusc.render_sample_data(sample_data['token'])函数修改:
原始函数是将图片进行渲染显示,这里通过修改这个函数使其返回这张图片所有的边界框以及其边界框的名字。返回2个参数:all_box_and_name ,img_path = nusc.render_sample_data(sample_data['token'])
all_box_and_name 这个里面装的是所有box的对象,每个box里面包含边界框的左上角以及右下角位置。
img_path :表示这张图片的路径。

1.2.1、将render_sample_data修改为如下:

    def render_sample_data(self, sample_data_token: str, with_anns: bool = True,
                           box_vis_level: BoxVisibility = BoxVisibility.ANY, axes_limit: float = 40, ax: Axes = None,
                           nsweeps: int = 1, out_path: str = None, underlay_map: bool = True,
                           use_flat_vehicle_coordinates: bool = True,
                           show_lidarseg: bool = False,
                           show_lidarseg_legend: bool = False,
                           filter_lidarseg_labels: List = None,
                           lidarseg_preds_bin_path: str = None, verbose: bool = True,
                           show_panoptic: bool = False) -> None:
        self.explorer.render_sample_data(sample_data_token, with_anns, box_vis_level, axes_limit, ax, nsweeps=nsweeps,
                                         out_path=out_path,
                                         underlay_map=underlay_map,
                                         use_flat_vehicle_coordinates=use_flat_vehicle_coordinates,
                                         show_lidarseg=show_lidarseg,
                                         show_lidarseg_legend=show_lidarseg_legend,
                                         filter_lidarseg_labels=filter_lidarseg_labels,
                                         lidarseg_preds_bin_path=lidarseg_preds_bin_path,
                                         verbose=verbose,
                                         show_panoptic=show_panoptic)
        return self.explorer.all_box_amd_name,self.explorer.img_path

1.2.1.1、以此,将这个函数里面的self.explorer.render_sample_data修改为如下。
    def render_sample_data(self,
                           sample_data_token: str,
                           with_anns: bool = True,
                           box_vis_level: BoxVisibility = BoxVisibility.ANY,
                           axes_limit: float = 40,
                           ax: Axes = None,
                           nsweeps: int = 1,
                           out_path: str = None,
                           underlay_map: bool = True,
                           use_flat_vehicle_coordinates: bool = True,
                           show_lidarseg: bool = False,
                           show_lidarseg_legend: bool = False,
                           filter_lidarseg_labels: List = None,
                           lidarseg_preds_bin_path: str = None,
                           verbose: bool = True,
                           show_panoptic: bool = False) -> None:
        """
        Render sample data onto axis.
        :param sample_data_token: Sample_data token.
        :param with_anns: Whether to draw box annotations.
        :param box_vis_level: If sample_data is an image, this sets required visibility for boxes.
        :param axes_limit: Axes limit for lidar and radar (measured in meters).
        :param ax: Axes onto which to render.
        :param nsweeps: Number of sweeps for lidar and radar.
        :param out_path: Optional path to save the rendered figure to disk.
        :param underlay_map: When set to true, lidar data is plotted onto the map. This can be slow.
        :param use_flat_vehicle_coordinates: Instead of the current sensor's coordinate frame, use ego frame which is
            aligned to z-plane in the world. Note: Previously this method did not use flat vehicle coordinates, which
            can lead to small errors when the vertical axis of the global frame and lidar are not aligned. The new
            setting is more correct and rotates the plot by ~90 degrees.
        :param show_lidarseg: When set to True, the lidar data is colored with the segmentation labels. When set
            to False, the colors of the lidar data represent the distance from the center of the ego vehicle.
        :param show_lidarseg_legend: Whether to display the legend for the lidarseg labels in the frame.
        :param filter_lidarseg_labels: Only show lidar points which belong to the given list of classes. If None
            or the list is empty, all classes will be displayed.
        :param lidarseg_preds_bin_path: A path to the .bin file which contains the user's lidar segmentation
                                        predictions for the sample.
        :param verbose: Whether to display the image after it is rendered.
        :param show_panoptic: When set to True, the lidar data is colored with the panoptic labels. When set
            to False, the colors of the lidar data represent the distance from the center of the ego vehicle.
            If show_lidarseg is True, show_panoptic will be set to False.
        """
        def draw_rect(axis,selected_corners, color): # 画矩形
            prev = selected_corners[-1]
            for corner in selected_corners:
                #            x         x           y         y
                axis.plot([prev[0], corner[0]], [prev[1], corner[1]], color=color)
                prev = corner
        if show_lidarseg:
            show_panoptic = False
        # Get sensor modality.
        sd_record = self.nusc.get('sample_data', sample_data_token)
        sensor_modality = sd_record['sensor_modality']

        if sensor_modality in ['lidar', 'radar']:
            sample_rec = self.nusc.get('sample', sd_record['sample_token'])
            chan = sd_record['channel']
            ref_chan = 'LIDAR_TOP'
            ref_sd_token = sample_rec['data'][ref_chan]
            ref_sd_record = self.nusc.get('sample_data', ref_sd_token)

            if sensor_modality == 'lidar':
                if show_lidarseg or show_panoptic:
                    gt_from = 'lidarseg' if show_lidarseg else 'panoptic'
                    assert hasattr(self.nusc, gt_from), f'Error: nuScenes-{gt_from} not installed!'

                    # Ensure that lidar pointcloud is from a keyframe.
                    assert sd_record['is_key_frame'], \
                        'Error: Only pointclouds which are keyframes have lidar segmentation labels. Rendering aborted.'

                    assert nsweeps == 1, \
                        'Error: Only pointclouds which are keyframes have lidar segmentation labels; nsweeps should ' \
                        'be set to 1.'

                    # Load a single lidar point cloud.
                    pcl_path = osp.join(self.nusc.dataroot, ref_sd_record['filename'])
                    pc = LidarPointCloud.from_file(pcl_path)
                else:
                    # Get aggregated lidar point cloud in lidar frame.
                    pc, times = LidarPointCloud.from_file_multisweep(self.nusc, sample_rec, chan, ref_chan,
                                                                     nsweeps=nsweeps)
                velocities = None
            else:
                # Get aggregated radar point cloud in reference frame.
                # The point cloud is transformed to the reference frame for visualization purposes.
                pc, times = RadarPointCloud.from_file_multisweep(self.nusc, sample_rec, chan, ref_chan, nsweeps=nsweeps)

                # Transform radar velocities (x is front, y is left), as these are not transformed when loading the
                # point cloud.
                radar_cs_record = self.nusc.get('calibrated_sensor', sd_record['calibrated_sensor_token'])
                ref_cs_record = self.nusc.get('calibrated_sensor', ref_sd_record['calibrated_sensor_token'])
                velocities = pc.points[8:10, :]  # Compensated velocity
                velocities = np.vstack((velocities, np.zeros(pc.points.shape[1])))
                velocities = np.dot(Quaternion(radar_cs_record['rotation']).rotation_matrix, velocities)
                velocities = np.dot(Quaternion(ref_cs_record['rotation']).rotation_matrix.T, velocities)
                velocities[2, :] = np.zeros(pc.points.shape[1])

            # By default we render the sample_data top down in the sensor frame.
            # This is slightly inaccurate when rendering the map as the sensor frame may not be perfectly upright.
            # Using use_flat_vehicle_coordinates we can render the map in the ego frame instead.
            if use_flat_vehicle_coordinates:
                # Retrieve transformation matrices for reference point cloud.
                cs_record = self.nusc.get('calibrated_sensor', ref_sd_record['calibrated_sensor_token'])
                pose_record = self.nusc.get('ego_pose', ref_sd_record['ego_pose_token'])
                ref_to_ego = transform_matrix(translation=cs_record['translation'],
                                              rotation=Quaternion(cs_record["rotation"]))

                # Compute rotation between 3D vehicle pose and "flat" vehicle pose (parallel to global z plane).
                ego_yaw = Quaternion(pose_record['rotation']).yaw_pitch_roll[0]
                rotation_vehicle_flat_from_vehicle = np.dot(
                    Quaternion(scalar=np.cos(ego_yaw / 2), vector=[0, 0, np.sin(ego_yaw / 2)]).rotation_matrix,
                    Quaternion(pose_record['rotation']).inverse.rotation_matrix)
                vehicle_flat_from_vehicle = np.eye(4)
                vehicle_flat_from_vehicle[:3, :3] = rotation_vehicle_flat_from_vehicle
                viewpoint = np.dot(vehicle_flat_from_vehicle, ref_to_ego)
            else:
                viewpoint = np.eye(4)

            # Init axes.
            if ax is None:
                _, ax = plt.subplots(1, 1, figsize=(9, 9))

            # Render map if requested.
            if underlay_map:
                assert use_flat_vehicle_coordinates, 'Error: underlay_map requires use_flat_vehicle_coordinates, as ' \
                                                     'otherwise the location does not correspond to the map!'
                self.render_ego_centric_map(sample_data_token=sample_data_token, axes_limit=axes_limit, ax=ax)

            # Show point cloud.
            points = view_points(pc.points[:3, :], viewpoint, normalize=False)
            dists = np.sqrt(np.sum(pc.points[:2, :] ** 2, axis=0))
            colors = np.minimum(1, dists / axes_limit / np.sqrt(2))
            if sensor_modality == 'lidar' and (show_lidarseg or show_panoptic):
                gt_from = 'lidarseg' if show_lidarseg else 'panoptic'
                semantic_table = getattr(self.nusc, gt_from)
                # Load labels for pointcloud.
                if lidarseg_preds_bin_path:
                    sample_token = self.nusc.get('sample_data', sample_data_token)['sample_token']
                    lidarseg_labels_filename = lidarseg_preds_bin_path
                    assert os.path.exists(lidarseg_labels_filename), \
                        'Error: Unable to find {} to load the predictions for sample token {} (lidar ' \
                        'sample data token {}) from.'.format(lidarseg_labels_filename, sample_token, sample_data_token)
                else:
                    if len(semantic_table) > 0:
                        # Ensure {lidarseg/panoptic}.json is not empty (e.g. in case of v1.0-test).
                        lidarseg_labels_filename = osp.join(self.nusc.dataroot,
                                                            self.nusc.get(gt_from, sample_data_token)['filename'])
                    else:
                        lidarseg_labels_filename = None

                if lidarseg_labels_filename:
                    # Paint each label in the pointcloud with a RGBA value.
                    if show_lidarseg or show_panoptic:
                        if show_lidarseg:
                            colors = paint_points_label(lidarseg_labels_filename, filter_lidarseg_labels,
                                                        self.nusc.lidarseg_name2idx_mapping, self.nusc.colormap)
                        else:
                            colors = paint_panop_points_label(lidarseg_labels_filename, filter_lidarseg_labels,
                                                              self.nusc.lidarseg_name2idx_mapping, self.nusc.colormap)

                        if show_lidarseg_legend:

                            # If user does not specify a filter, then set the filter to contain the classes present in
                            # the pointcloud after it has been projected onto the image; this will allow displaying the
                            # legend only for classes which are present in the image (instead of all the classes).
                            if filter_lidarseg_labels is None:
                                if show_lidarseg:
                                    # Since the labels are stored as class indices, we get the RGB colors from the
                                    # colormap in an array where the position of the RGB color corresponds to the index
                                    # of the class it represents.
                                    color_legend = colormap_to_colors(self.nusc.colormap,
                                                                      self.nusc.lidarseg_name2idx_mapping)
                                    filter_lidarseg_labels = get_labels_in_coloring(color_legend, colors)
                                else:
                                    # Only show legends for stuff categories for panoptic.
                                    filter_lidarseg_labels = stuff_cat_ids(len(self.nusc.lidarseg_name2idx_mapping))

                            if filter_lidarseg_labels and show_panoptic:
                                # Only show legends for filtered stuff categories for panoptic.
                                stuff_labels = set(stuff_cat_ids(len(self.nusc.lidarseg_name2idx_mapping)))
                                filter_lidarseg_labels = list(stuff_labels.intersection(set(filter_lidarseg_labels)))

                            create_lidarseg_legend(filter_lidarseg_labels,
                                                   self.nusc.lidarseg_idx2name_mapping,
                                                   self.nusc.colormap,
                                                   loc='upper left',
                                                   ncol=1,
                                                   bbox_to_anchor=(1.05, 1.0))
                else:
                    print('Warning: There are no lidarseg labels in {}. Points will be colored according to distance '
                          'from the ego vehicle instead.'.format(self.nusc.version))

            point_scale = 0.2 if sensor_modality == 'lidar' else 3.0
            scatter = ax.scatter(points[0, :], points[1, :], c=colors, s=point_scale)

            # Show velocities.
            if sensor_modality == 'radar':
                points_vel = view_points(pc.points[:3, :] + velocities, viewpoint, normalize=False)
                deltas_vel = points_vel - points
                deltas_vel = 6 * deltas_vel  # Arbitrary scaling
                max_delta = 20
                deltas_vel = np.clip(deltas_vel, -max_delta, max_delta)  # Arbitrary clipping
                colors_rgba = scatter.to_rgba(colors)
                for i in range(points.shape[1]):
                    ax.arrow(points[0, i], points[1, i], deltas_vel[0, i], deltas_vel[1, i], color=colors_rgba[i])

            # Show ego vehicle.
            ax.plot(0, 0, 'x', color='red')

            # Get boxes in lidar frame.
            _, boxes, _ = self.nusc.get_sample_data(ref_sd_token, box_vis_level=box_vis_level,
                                                    use_flat_vehicle_coordinates=use_flat_vehicle_coordinates)

            # Show boxes.
            if with_anns:
                for box in boxes:
                    c = np.array(self.get_color(box.name)) / 255.0
                    box.render(ax, view=np.eye(4), colors=(c, c, c))

            # Limit visible range.
            ax.set_xlim(-axes_limit, axes_limit)
            ax.set_ylim(-axes_limit, axes_limit)
        elif sensor_modality == 'camera':
            # Load boxes and image.
            # 这里的boxes为经过旋转平移,并且只保留了需要的
            data_path, boxes, camera_intrinsic = self.nusc.get_sample_data(sample_data_token,
                                                                           box_vis_level=box_vis_level)
            data = Image.open(data_path)
            self.img_path = data_path

            # Init axes.
            if ax is None:
                pass
                #_, ax = plt.subplots(1, 1, figsize=(9, 16))

            # Show image.
            # ax.imshow(data)
            # plt.show()

            # Show boxes.
            if with_anns:
                for box in boxes:
                    c = np.array(self.get_color(box.name)) / 255.0
                    box.render(ax, view=camera_intrinsic, normalize=True, colors=(c, c, c))
                    # 绘制框
                    al = []
                    al.append([box.box_2D[0] ,box.box_2D[1]])
                    al.append([box.box_2D[2] ,box.box_2D[1]])
                    al.append([box.box_2D[2] ,box.box_2D[3]])
                    al.append([box.box_2D[0], box.box_2D[3]])
                    #print(al)
                    #draw_rect(ax,al, 'r')

            # Limit visible range.
            self.all_box_amd_name = boxes
            # ax.set_xlim(0, data.size[0])
            # ax.set_ylim(data.size[1], 0)

        else:
            raise ValueError("Error: Unknown sensor modality!")

        # ax.axis('off')
        # ax.set_title('{} {labels_type}'.format(
        #     sd_record['channel'], labels_type='(predictions)' if lidarseg_preds_bin_path else ''))
        # ax.set_aspect('equal')
        #
        # if out_path is not None:
        #     plt.savefig(out_path, bbox_inches='tight', pad_inches=0, dpi=200)
        #
        # if verbose:
        #     plt.show()
1.2.1.2、以此将这个函数中的data_path, boxes, camera_intrinsic = self.nusc.get_sample_data``修改为如下(这里可以进行数据赛选过滤`)
    def get_sample_data(self, sample_data_token: str,
                        box_vis_level: BoxVisibility = BoxVisibility.ANY,
                        selected_anntokens: List[str] = None,
                        use_flat_vehicle_coordinates: bool = False) -> \
            Tuple[str, List[Box], np.array]:
        """
        Returns the data path as well as all annotations related to that sample_data.
        Note that the boxes are transformed into the current sensor's coordinate frame.
        :param sample_data_token: Sample_data token.
        :param box_vis_level: If sample_data is an image, this sets required visibility for boxes.
        :param selected_anntokens: If provided only return the selected annotation.
        :param use_flat_vehicle_coordinates: Instead of the current sensor's coordinate frame, use ego frame which is
                                             aligned to z-plane in the world.
        :return: (data_path, boxes, camera_intrinsic <np.array: 3, 3>)
        """

        # Retrieve sensor & pose records
        sd_record = self.get('sample_data', sample_data_token)
        cs_record = self.get('calibrated_sensor', sd_record['calibrated_sensor_token'])
        sensor_record = self.get('sensor', cs_record['sensor_token'])
        pose_record = self.get('ego_pose', sd_record['ego_pose_token'])

        data_path = self.get_sample_data_path(sample_data_token)

        if sensor_record['modality'] == 'camera':
            cam_intrinsic = np.array(cs_record['camera_intrinsic'])
            imsize = (sd_record['width'], sd_record['height'])
        else:
            cam_intrinsic = None
            imsize = None

        # Retrieve all sample annotations and map to sensor coordinate system.
        if selected_anntokens is not None:
            boxes = list(map(self.get_box, selected_anntokens))
        else:
            boxes = self.get_boxes(sample_data_token)

        # Make list of Box objects including coord system transforms.
        box_list = []
        for box in boxes:
            # if box.name[0:7]=='vehicle': # 只使用汽车的方框
            # vehicle.bus.bendy
            # vehicle.bus.rigid
            # vehicle.car
            # vehicle.emergency.ambulance
            # vehicle.emergency.police
            # vehicle.truck
            box_token = box.token
            vis =  self.get('sample_annotation', box_token)['visibility_token']
            vis_a = self.get('visibility', vis)['token']
            if(int(vis_a)>2 ): # 提取能见度大于等于3的标注框
                if box.name == 'vehicle.bus.bendy' or box.name == 'vehicle.bus.rigid' \
                        or box.name == 'vehicle.car' or box.name == 'vehicle.construction' \
                        or box.name == 'vehicle.motorcycle' or box.name == 'vehicle.truck' \
                        or box.name == 'vehicle.bicycle' or box.name == 'vehicle.trailer':  # 只使用汽车的方框
                    if use_flat_vehicle_coordinates:
                        # Move box to ego vehicle coord system parallel to world z plane.
                        yaw = Quaternion(pose_record['rotation']).yaw_pitch_roll[0]
                        box.translate(-np.array(pose_record['translation']))
                        box.rotate(Quaternion(scalar=np.cos(yaw / 2), vector=[0, 0, np.sin(yaw / 2)]).inverse)
                    else:
                        # Move box to ego vehicle coord system.
                        box.translate(-np.array(pose_record['translation']))
                        box.rotate(Quaternion(pose_record['rotation']).inverse)

                        #  Move box to sensor coord system.
                        box.translate(-np.array(cs_record['translation']))
                        box.rotate(Quaternion(cs_record['rotation']).inverse)

                    if sensor_record['modality'] == 'camera' and not \
                            box_in_image(box, cam_intrinsic, imsize, vis_level=box_vis_level):
                        continue

                    box_list.append(box) # 进行旋转平移

        return data_path, box_list, cam_intrinsic

1.2.1.3、3D到2D框的映射(修改这级函数中box.render修改为如下)

修改下面这段代码的 # Show boxes.
if with_anns:
for box in boxes:
c = np.array(self.get_color(box.name)) / 255.0
box.render(ax, view=np.eye(4), colors=(c, c, c))

            # Show boxes.
            if with_anns:
                for box in boxes:
                    c = np.array(self.get_color(box.name)) / 255.0
                    box.render(ax, view=np.eye(4), colors=(c, c, c))

修改为如下:

    def render(self,
               axis: Axes,
               view: np.ndarray = np.eye(3),
               normalize: bool = False,
               colors: Tuple = ('b', 'r', 'k'),
               linewidth: float = 2) -> None:
        """
        Renders the box in the provided Matplotlib axis.
        :param axis: Axis onto which the box should be drawn.
        :param view: <np.array: 3, 3>. Define a projection in needed (e.g. for drawing projection in an image).
        :param normalize: Whether to normalize the remaining coordinate.
        :param colors: (<Matplotlib.colors>: 3). Valid Matplotlib colors (<str> or normalized RGB tuple) for front,
            back and sides.
        :param linewidth: Width in pixel of the box sides.
        """
        corners = view_points(self.corners(), view, normalize=normalize)[:2, :]

        def draw_rect(selected_corners, color): # 画矩形
            prev = selected_corners[-1]
            for corner in selected_corners:
                #            x         x           y         y
                axis.plot([prev[0], corner[0]], [prev[1], corner[1]], color=color, linewidth=linewidth)
                prev = corner

        # Draw the sides
        point_all_x = []
        point_all_y = []
        this_need_point_min = []
        this_need_point_max = []
        # for i in range(4): # 画立方体的四条边线
        #     point_all_x.append(corners.T[i][0])
        #     point_all_x.append(corners.T[i + 4][0])
        #     point_all_y.append(corners.T[i][1])
        #     point_all_y.append(corners.T[i + 4][1])
        #     axis.plot([corners.T[i][0], corners.T[i + 4][0]],
        #               [corners.T[i][1], corners.T[i + 4][1]],
        #               color=colors[2], linewidth=linewidth)

        # Draw front (first 4 corners) and rear (last 4 corners) rectangles(3d)/lines(2d)
        # draw_rect(corners.T[:4], colors[0])
        for r in corners.T:
            point_all_x.append(r[0])
            point_all_y.append(r[1])
        x_min = min(point_all_x)
        y_min = min(point_all_y)
        x_max = max(point_all_x)
        y_max = max(point_all_y)

        two_box = []
        two_box.append(x_min)
        two_box.append(y_min)
        two_box.append(x_max)
        two_box.append(y_max)
        
        
        al = []
        al.append([x_min,y_min])
        al.append([x_max, y_min])
        al.append([x_max, y_max])
        al.append([x_min, y_max])
        # draw_rect(al, colors[1])
        self.box_2D = two_box
        
        #
        # Draw line indicating the front # 这里画的是车辆的底部方向线
        # center_bottom_forward = np.mean(corners.T[2:4], axis=0)
        # center_bottom = np.mean(corners.T[[2, 3, 7, 6]], axis=0)
        # axis.plot([center_bottom[0], center_bottom_forward[0]],
        #           [center_bottom[1], center_bottom_forward[1]],
        #           color=colors[0], linewidth=linewidth)

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

思禾

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值