3D点云学习:SECOND目标检测②源码注释

源码链接: PCDet.
很遗憾,SECOND的算法复现也要依赖于spconv库,而spconv库目前只支持在ubuntu系统中进行编译(虚拟机也不行),复现还是没有成功,想去安装双系统了…

1 数据预处理PCDet-master\pcdet\datasets\kitti\kitti_dataset.py

SECOND数据的预处理与大多数点云目标检测的预处理相似,主要任务在于
1>生成储存train/val数据info的文件kitti_infos_%s.pkl
2>生成数据增强时使用的groundtruth database

def create_kitti_infos(data_path, save_path, workers=4):
    dataset = BaseKittiDataset(root_path=data_path)
    train_split, val_split = 'train', 'val'

    train_filename = save_path / ('kitti_infos_%s.pkl' % train_split)
    val_filename = save_path / ('kitti_infos_%s.pkl' % val_split)
    trainval_filename = save_path / 'kitti_infos_trainval.pkl'
    test_filename = save_path / 'kitti_infos_test.pkl'
    print('---------------Start to generate data infos---------------')
    # 存储info
    dataset.set_split(train_split)
    kitti_infos_train = dataset.get_infos(num_workers=workers, has_label=True, count_inside_pts=True)
    with open(train_filename, 'wb') as f:
        pickle.dump(kitti_infos_train, f)
    print('Kitti info train file is saved to %s' % train_filename)
    dataset.set_split(val_split)
    kitti_infos_val = dataset.get_infos(num_workers=workers, has_label=True, count_inside_pts=True)
    with open(val_filename, 'wb') as f:
        pickle.dump(kitti_infos_val, f)
    print('Kitti info val file is saved to %s' % val_filename)

    with open(trainval_filename, 'wb') as f:
        pickle.dump(kitti_infos_train + kitti_infos_val, f)
    print('Kitti info trainval file is saved to %s' % trainval_filename)
    dataset.set_split('test')
    kitti_infos_test = dataset.get_infos(num_workers=workers, has_label=False, count_inside_pts=False)
    with open(test_filename, 'wb') as f:
        pickle.dump(kitti_infos_test, f)
    print('Kitti info test file is saved to %s' % test_filename)
    # 生成db文件
    print('---------------Start create groundtruth database for data augmentation---------------')
    dataset.set_split(train_split)
    dataset.create_groundtruth_database(train_filename, split=train_split)
    print('---------------Data preparation Done---------------')
if __name__ == '__main__':
    # 需要从外部输入arg,比如python kitti_dataset.py create_kitti_infos
    if sys.argv.__len__() > 1 and sys.argv[1] == 'create_kitti_infos':
        create_kitti_infos(
            data_path=cfg.ROOT_DIR / 'data' / 'kitti',
            save_path=cfg.ROOT_DIR / 'data' / 'kitti'
        )
    else:
        A = KittiDataset(root_path='data/kitti', class_names=cfg.CLASS_NAMES, split='train', training=True)
        # 下面不太懂,大概意思类似于设置一个断点,程序在此处停下
        import pdb
        pdb.set_trace()
        ans = A[1]

其中调用到的函数:

class BaseKittiDataset(DatasetTemplate):
    def __init__(self, root_path, split='train'):
        super().__init__()
        self.root_path = root_path
        self.root_split_path = os.path.join(self.root_path, 'training' if split != 'test' else 'testing')
        self.split = split
        if split in ['train', 'val', 'test']:
            split_dir = os.path.join(self.root_path, 'ImageSets', split + '.txt')
        # 文件名称中拿出序号
        self.sample_id_list = [x.strip() for x in open(split_dir).readlines()] if os.path.exists(split_dir) else None
    # 对不同任务重新初始化BaseKittiDataset
    def set_split(self, split):
        self.__init__(self.root_path, split)
    # 获得雷达数据,使用np.fromfile
    def get_lidar(self, idx):
        lidar_file = os.path.join(self.root_split_path, 'velodyne', '%s.bin' % idx)
        assert os.path.exists(lidar_file)
        return np.fromfile(lidar_file, dtype=np.float32).reshape(-1, 4)

    # 获得图像的shape,使用np.array
    def get_image_shape(self, idx):
        img_file = os.path.join(self.root_split_path, 'image_2', '%s.png' % idx)
        assert os.path.exists(img_file)
        return np.array(io.imread(img_file).shape[:2], dtype=np.int32)
    # 获得label的数据,使用get_objects_from_label
    def get_label(self, idx):
        label_file = os.path.join(self.root_split_path, 'label_2', '%s.txt' % idx)
        assert os.path.exists(label_file)
        return object3d_utils.get_objects_from_label(label_file)
    # 获得校准器,使用calibration
    def get_calib(self, idx):
        calib_file = os.path.join(self.root_split_path, 'calib', '%s.txt' % idx)
        assert os.path.exists(calib_file)
        return calibration.Calibration(calib_file)
    # 获得路面数据,使用f.readlines
    def get_road_plane(self, idx):
        plane_file = os.path.join(self.root_split_path, 'planes', '%s.txt' % idx)
        with open(plane_file, 'r') as f:
            lines = f.readlines()
        lines = [float(i) for i in lines[3].split()]
        plane = np.asarray(lines)
        # Ensure normal is always facing up, this is in the rectified camera coordinate
        if plane[1] > 0:
            plane = -plane
        norm = np.linalg.norm(plane[0:3])
        plane = plane / norm
        return plane
    @staticmethod
    # 获得在图片shape内的点云的flag,1代表有效
    def get_fov_flag(pts_rect, img_shape, calib):
        '''
        Valid point should be in the image (and in the PC_AREA_SCOPE)
        :param pts_rect:
        :param img_shape:
        :return:
        '''
        pts_img, pts_rect_depth = calib.rect_to_img(pts_rect)
        # x坐标在范围内
        val_flag_1 = np.logical_and(pts_img[:, 0] >= 0, pts_img[:, 0] < img_shape[1])
        # y坐标在范围内
        val_flag_2 = np.logical_and(pts_img[:, 1] >= 0, pts_img[:, 1] < img_shape[0])
        # 逻辑与
        val_flag_merge = np.logical_and(val_flag_1, val_flag_2)
        pts_valid_flag = np.logical_and(val_flag_merge, pts_rect_depth >= 0)
        return pts_valid_flag
    # 获得数据的基本信息
    def get_infos(self, num_workers=4, has_label=True, count_inside_pts=True, sample_id_list=None):
        import concurrent.futures as futures
        # 根据sample_idx,拿出一帧数据的info,info是字典,字典里面包含小字典
        # point_cloud,image,calib,annos,num_points_in_gt
        def process_single_scene(sample_idx):
            print('%s sample_idx: %s' % (self.split, sample_idx))
            info = {}
            pc_info = {'num_features': 4, 'lidar_idx': sample_idx}
            info['point_cloud'] = pc_info
            
            image_info = {'image_idx': sample_idx, 'image_shape': self.get_image_shape(sample_idx)}
            info['image'] = image_info
            
            calib = self.get_calib(sample_idx)
            P2 = np.concatenate([calib.P2, np.array([[0., 0., 0., 1.]])], axis=0)
            R0_4x4 = np.zeros([4, 4], dtype=calib.R0.dtype)
            R0_4x4[3, 3] = 1.
            R0_4x4[:3, :3] = calib.R0
            V2C_4x4 = np.concatenate([calib.V2C, np.array([[0., 0., 0., 1.]])], axis=0)
            calib_info = {'P2': P2, 'R0_rect': R0_4x4, 'Tr_velo_to_cam': V2C_4x4}
            info['calib'] = calib_info

            if has_label:
                # obj_list是一个list,一个gt_box一个元素
                obj_list = self.get_label(sample_idx)
                annotations = {}
                annotations['name'] = np.array([obj.cls_type for obj in obj_list])
                annotations['truncated'] = np.array([obj.truncation for obj in obj_list])
                annotations['occluded'] = np.array([obj.occlusion for obj in obj_list])
                annotations['alpha'] = np.array([obj.alpha for obj in obj_list])
                annotations['bbox'] = np.concatenate([obj.box2d.reshape(1, 4) for obj in obj_list], axis=0)
                annotations['dimensions'] = np.array([[obj.l, obj.h, obj.w] for obj in obj_list])  # lhw(camera) format
                annotations['location'] = np.concatenate([obj.loc.reshape(1, 3) for obj in obj_list], axis=0)
                annotations['rotation_y'] = np.array([obj.ry for obj in obj_list])
                annotations['score'] = np.array([obj.score for obj in obj_list])
                annotations['difficulty'] = np.array([obj.level for obj in obj_list], np.int32)

                num_objects = len([obj.cls_type for obj in obj_list if obj.cls_type != 'DontCare'])
                num_gt = len(annotations['name'])
                index = list(range(num_objects)) + [-1] * (num_gt - num_objects)
                annotations['index'] = np.array(index, dtype=np.int32)

                loc = annotations['location'][:num_objects]
                dims = annotations['dimensions'][:num_objects]
                rots = annotations['rotation_y'][:num_objects]
                loc_lidar = calib.rect_to_lidar(loc)
                l, h, w = dims[:, 0:1], dims[:, 1:2], dims[:, 2:3]
                gt_boxes_lidar = np.concatenate([loc_lidar, w, l, h, rots[..., np.newaxis]], axis=1)
                annotations['gt_boxes_lidar'] = gt_boxes_lidar

                info['annos'] = annotations

                # 获得可以投影到图片范围内的点
                if count_inside_pts:
                    points = self.get_lidar(sample_idx)
                    calib = self.get_calib(sample_idx)
                    pts_rect = calib.lidar_to_rect(points[:, 0:3])

                    fov_flag = self.get_fov_flag(pts_rect, info['image']['image_shape'], calib)
                    pts_fov = points[fov_flag]
                    corners_lidar = box_utils.boxes3d_to_corners3d_lidar(gt_boxes_lidar)
                    num_points_in_gt = -np.ones(num_gt, dtype=np.int32)
                    # num_points_in_gt是一个array,初始全部是1,后面记录每个框里面由多少的点
                    for k in range(num_objects):
                        flag = box_utils.in_hull(pts_fov[:, 0:3], corners_lidar[k])
                        num_points_in_gt[k] = flag.sum()
                    annotations['num_points_in_gt'] = num_points_in_gt

            return info

        # temp = process_single_scene(self.sample_id_list[0])
        sample_id_list = sample_id_list if sample_id_list is not None else self.sample_id_list
        # 并行计算的函数
        # ThreadPoolExecutor 是 Executor 的子类,它使用***线程池***来异步执行调用
        with futures.ThreadPoolExecutor(num_workers) as executor:
            infos = executor.map(process_single_scene, sample_id_list)
        return list(infos)

    # 用trainfile产生groundtruth_database,意思就是只保存训练数据中的gt_box及其包围的点的信息,用于数据增强
    def create_groundtruth_database(self, info_path=None, used_classes=None, split='train'):
        database_save_path = Path(self.root_path) / ('gt_database' if split == 'train' else ('gt_database_%s' % split))
        db_info_save_path = Path(self.root_path) / ('kitti_dbinfos_%s.pkl' % split)

        database_save_path.mkdir(parents=True, exist_ok=True)
        all_db_infos = {}

        with open(info_path, 'rb') as f:
            infos = pickle.load(f)

        for k in range(len(infos)):
            print('gt_database sample: %d/%d' % (k + 1, len(infos)))
            info = infos[k]
            sample_idx = info['point_cloud']['lidar_idx']
            points = self.get_lidar(sample_idx)
            annos = info['annos']
            names = annos['name']
            difficulty = annos['difficulty']
            bbox = annos['bbox']
            gt_boxes = annos['gt_boxes_lidar']

            num_obj = gt_boxes.shape[0]
            # 找到在gt_box中的点的序号
            point_indices = roiaware_pool3d_utils.points_in_boxes_cpu(
                torch.from_numpy(points[:, 0:3]), torch.from_numpy(gt_boxes)
            ).numpy()  # (nboxes, npoints)

            for i in range(num_obj):
                filename = '%s_%s_%d.bin' % (sample_idx, names[i], i)
                filepath = database_save_path / filename
                gt_points = points[point_indices[i] > 0]

                gt_points[:, :3] -= gt_boxes[i, :3]
                with open(filepath, 'w') as f:
                    gt_points.tofile(f)

                if (used_classes is None) or names[i] in used_classes:
                    db_path = str(filepath.relative_to(self.root_path))  # gt_database/xxxxx.bin
                    db_info = {'name': names[i], 'path': db_path, 'image_idx': sample_idx, 'gt_idx': i,
                               'box3d_lidar': gt_boxes[i], 'num_points_in_gt': gt_points.shape[0],
                               'difficulty': difficulty[i], 'bbox': bbox[i], 'score': annos['score'][i]}
                    if names[i] in all_db_infos:
                        all_db_infos[names[i]].append(db_info)
                    else:
                        all_db_infos[names[i]] = [db_info]
        for k, v in all_db_infos.items():
            print('Database %s: %d' % (k, len(v)))

        with open(db_info_save_path, 'wb') as f:
            pickle.dump(all_db_infos, f)

不够详细的可以参考笔者另一篇博客: SASSD数据预处理.

2 网络结构搭建及训练

依旧从train.py开始,这里可以看到搭建一个外部监督的深度学习神经网络所需要的各个部分

  1. train_set:可以读取数据的类,必须包含def __getitem__(self, index):根据序号读取数据
  2. train_loader:一般是pytorch自带的训练数据迭代器
  3. model:模型核心,要有forward
  4. optimizer:优化器
  5. lr_scheduler:学习率调整策略器
  6. lr_warmup_scheduler:warmup学习率调整器(warmup中学习率变化是两段的)
  7. train_model:训练器
def main():
    # 加载参数
    args, cfg = parge_config()
    if args.launcher == 'none':
        dist_train = False  # 分布式训练关闭
    else:
        args.batch_size, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
            args.batch_size, args.tcp_port, args.local_rank, backend='nccl'
        )
        dist_train = True
    if args.fix_random_seed:    # 随机数的产生是随机的,但是如果指定seed的话,随机数的产生是固定的,保证了算法的无差别复现性
        common_utils.set_random_seed(666)
    # 输出路径,root + output + cfg的名称 + 额外部分(默认没有)
    output_dir = cfg.ROOT_DIR / 'output' / cfg.TAG / args.extra_tag
    output_dir.mkdir(parents=True, exist_ok=True)
    # checkpoint文件的路径
    ckpt_dir = output_dir / 'ckpt'
    ckpt_dir.mkdir(parents=True, exist_ok=True)
    # log文件的名称
    log_file = output_dir / ('log_train_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
    logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)

    # log to file logger.info就是要写进log文件的内容
    logger.info('**********************Start logging**********************')
    gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
    logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)

    if dist_train:
        # 分布式多GPU训练
        total_gpus = dist.get_world_size()
        logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
    # args参数都写进log
    for key, val in vars(args).items():
        logger.info('{:16} {}'.format(key, val))
    log_config_to_file(cfg, logger=logger)

    # tensorboard文件
    tb_log = SummaryWriter(log_dir=str(output_dir / 'tensorboard')) if cfg.LOCAL_RANK == 0 else None

    # -----------------------create dataloader & network & optimizer---------------------------
    # train_set:全部数据
    # train_loader:torch自带的迭代数据器
    # train_sampler:在分布式训练时起作用

    # 见pcdet.datasets
    # train_set是一个经过了初始化的KittiDataset类,后面使用时需要调用KittiDataset类中的子函数
    # train_loader是pytorch中自带的带有迭代功能的训练数据生成器
    # train_sampler在分布式训练时会用到,将总的train_set进行分组采样到各个GPU
    train_set, train_loader, train_sampler = build_dataloader(
        cfg.DATA_CONFIG.DATA_DIR, args.batch_size, dist_train, workers=args.workers, logger=logger, training=True
    )

    # model=SECONDNet,SECONDNet中初始化一点,主要是后面的执行,而在Detector3D中对model的各个部分进行初始化
    model = build_network(train_set)
    if args.sync_bn:
        model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
    model.cuda()

    optimizer = build_optimizer(model, cfg.MODEL.TRAIN.OPTIMIZATION)

    # load checkpoint if it is possible
    start_epoch = it = 0
    last_epoch = -1
    # 加载模型
    if args.pretrained_model is not None:
        model.load_params_from_file(filename=args.pretrained_model, to_cpu=dist, logger=logger)
    # 加载优化器模型
    if args.ckpt is not None:
        it, start_epoch = model.load_params_with_optimizer(args.ckpt, to_cpu=dist, optimizer=optimizer, logger=logger)
        last_epoch = start_epoch + 1
    else:
        ckpt_list = glob.glob(str(ckpt_dir / '*checkpoint_epoch_*.pth'))
        if len(ckpt_list) > 0:
            ckpt_list.sort(key=os.path.getmtime)
            it, start_epoch = model.load_params_with_optimizer(
                ckpt_list[-1], to_cpu=dist, optimizer=optimizer, logger=logger
            )
            last_epoch = start_epoch + 1

    model.train()  # before wrap to DistributedDataParallel to support fixed some parameters
    if dist_train:
        model = nn.parallel.DistributedDataParallel(model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()])
    logger.info(model)

    lr_scheduler, lr_warmup_scheduler = build_scheduler(
        optimizer, total_iters_each_epoch=len(train_loader), total_epochs=args.epochs,
        last_epoch=last_epoch, optim_cfg=cfg.MODEL.TRAIN.OPTIMIZATION
    )

    # -----------------------start training---------------------------
    logger.info('**********************Start training %s(%s)**********************' % (cfg.TAG, args.extra_tag))
    # 基于上面定义好的模型的各个部分,训练模型
    train_model(
        model,
        optimizer,
        train_loader,
        model_func=model_fn_decorator(),
        lr_scheduler=lr_scheduler,
        optim_cfg=cfg.MODEL.TRAIN.OPTIMIZATION,
        start_epoch=start_epoch,
        total_epochs=args.epochs,
        start_iter=it,
        rank=cfg.LOCAL_RANK,
        tb_log=tb_log,
        ckpt_save_dir=ckpt_dir,
        train_sampler=train_sampler,
        lr_warmup_scheduler=lr_warmup_scheduler,
        ckpt_save_interval=args.ckpt_save_interval,
        max_ckpt_save_num=args.max_ckpt_save_num
    )

    logger.info('**********************End training**********************')

3 数据增强

这种简单粗暴的数据增强方式在后来的非常多模型中都得到了应用,在 这篇博客.中有详细解释。
class KittiDataset(BaseKittiDataset):入手,训练数据生成主要来自于这里:

example = self.prepare_data(input_dict=input_dict, has_label='annos' in info)

BaseKittiDataset中定义了prepare_data,数据增强也在其中:

def prepare_data(self, input_dict, has_label=True):
    """
    :param input_dict:
        sample_idx: string
        calib: object, calibration related
        points: (N, 3 + C1)
        gt_boxes_lidar: optional, (N, 7) [x, y, z, w, l, h, rz] in LiDAR coordinate, z is the bottom center
        gt_names: optional, (N), string
    :param has_label: bool
    :return:
        voxels: (N, max_points_of_each_voxel, 3 + C2), float
        num_points: (N), int
        coordinates: (N, 3), [idx_z, idx_y, idx_x]
        num_voxels: (N)
        voxel_centers: (N, 3)
        calib: object
        gt_boxes: (N, 8), [x, y, z, w, l, h, rz, gt_classes] in LiDAR coordinate, z is the bottom center
        points: (M, 3 + C)
    """
    # 这几个东西不用筛选
    sample_idx = input_dict['sample_idx']
    points = input_dict['points']
    calib = input_dict['calib']

    if has_label:
        gt_boxes = input_dict['gt_boxes_lidar'].copy()
        gt_names = input_dict['gt_names'].copy()

    if self.training:
        # 筛选gt的类别
        selected = common_utils.drop_arrays_by_name(gt_names, ['DontCare', 'Sign'])
        gt_boxes = gt_boxes[selected]
        gt_names = gt_names[selected]
        gt_boxes_mask = np.array([n in self.class_names for n in gt_names], dtype=np.bool_)
        # 开始采样,数据增强
        if self.db_sampler is not None:
            road_planes = self.get_road_plane(sample_idx) \
                if cfg.DATA_CONFIG.AUGMENTATION.DB_SAMPLER.USE_ROAD_PLANE else None
            sampled_dict = self.db_sampler.sample_all(
                self.root_path, gt_boxes, gt_names, road_planes=road_planes,
                num_point_features=cfg.DATA_CONFIG.NUM_POINT_FEATURES['total'], calib=calib
            )

            if sampled_dict is not None:
                # 将sample的gt信息和原信息合并,移除原点云中的部分点,防止碰撞
                sampled_gt_names = sampled_dict['gt_names']
                sampled_gt_boxes = sampled_dict['gt_boxes']
                sampled_points = sampled_dict['points']
                sampled_gt_masks = sampled_dict['gt_masks']

                gt_names = np.concatenate([gt_names, sampled_gt_names], axis=0)
                gt_boxes = np.concatenate([gt_boxes, sampled_gt_boxes])
                gt_boxes_mask = np.concatenate([gt_boxes_mask, sampled_gt_masks], axis=0)

                points = box_utils.remove_points_in_boxes3d(points, sampled_gt_boxes)
                points = np.concatenate([sampled_points, points], axis=0)
        # 给每一个box加入噪声,也是数据增强的一种方法
        noise_per_object_cfg = cfg.DATA_CONFIG.AUGMENTATION.NOISE_PER_OBJECT
        if noise_per_object_cfg.ENABLED:
            gt_boxes, points = \
                augmentation_utils.noise_per_object_v3_(
                gt_boxes,
                points,
                gt_boxes_mask,
                rotation_perturb=noise_per_object_cfg.GT_ROT_UNIFORM_NOISE,
                center_noise_std=noise_per_object_cfg.GT_LOC_NOISE_STD,
                num_try=100
            )
        # 又用mask筛选了一次
        gt_boxes = gt_boxes[gt_boxes_mask]
        gt_names = gt_names[gt_boxes_mask]

        # 将class_names转化为序号1 2 3等
        gt_classes = np.array([self.class_names.index(n) + 1 for n in gt_names], dtype=np.int32)

        # 给全局的点云进行数据增强 翻转 旋转 缩放
        noise_global_scene = cfg.DATA_CONFIG.AUGMENTATION.NOISE_GLOBAL_SCENE
        if noise_global_scene.ENABLED:
            gt_boxes, points = augmentation_utils.random_flip(gt_boxes, points)
            gt_boxes, points = augmentation_utils.global_rotation(
                gt_boxes, points, rotation=noise_global_scene.GLOBAL_ROT_UNIFORM_NOISE
            )
            gt_boxes, points = augmentation_utils.global_scaling(
                gt_boxes, points, *noise_global_scene.GLOBAL_SCALING_UNIFORM_NOISE
            )

        # 规定有效点云的范围
        pc_range = self.voxel_generator.point_cloud_range
        # 再次按照有没有超过range来进行筛选
        mask = box_utils.mask_boxes_outside_range(gt_boxes, pc_range)
        gt_boxes = gt_boxes[mask]
        gt_classes = gt_classes[mask]
        gt_names = gt_names[mask]

        # limit rad to [-pi, pi]
        gt_boxes[:, 6] = common_utils.limit_period(gt_boxes[:, 6], offset=0.5, period=2 * np.pi)

    # 找出要使用的数据,这里是全部使用了,4-4
    points = points[:, :cfg.DATA_CONFIG.NUM_POINT_FEATURES['use']]
    if cfg.DATA_CONFIG[self.mode].SHUFFLE_POINTS:
        np.random.shuffle(points)
    # 生成的体素包括三个部分
    voxel_grid = self.voxel_generator.generate(points)

    # Support spconv 1.0 and 1.1
    # coordinates是一个点一个坐标,体素中的坐标
    try:
        voxels, coordinates, num_points = voxel_grid
    except:
        voxels = voxel_grid["voxels"]
        coordinates = voxel_grid["coordinates"]
        num_points = voxel_grid["num_points_per_voxel"]

    # 每个点对应的voxel的中心坐标
    voxel_centers = (coordinates[:, ::-1] + 0.5) * self.voxel_generator.voxel_size \
                    + self.voxel_generator.point_cloud_range[0:3]

    if cfg.DATA_CONFIG.MASK_POINTS_BY_RANGE:
        points = common_utils.mask_points_by_range(points, cfg.DATA_CONFIG.POINT_CLOUD_RANGE)

    example = {}
    if has_label:
        if not self.training:
            # for eval_utils eval阶段的数据
            selected = common_utils.keep_arrays_by_name(gt_names, self.class_names)
            gt_boxes = gt_boxes[selected]
            gt_names = gt_names[selected]
            gt_classes = np.array([self.class_names.index(n) + 1 for n in gt_names], dtype=np.int32)

        if 'TARGET_CONFIG' in cfg.MODEL.RPN.BACKBONE \
            and cfg.MODEL.RPN.BACKBONE.TARGET_CONFIG.GENERATED_ON == 'dataset':
            seg_labels, part_labels, bbox_reg_labels = \
                self.generate_voxel_part_targets(voxel_centers, gt_boxes, gt_classes)
            example['seg_labels'] = seg_labels
            example['part_labels'] = part_labels
            if bbox_reg_labels is not None:
                example['bbox_reg_labels'] = bbox_reg_labels

        gt_boxes = np.concatenate((gt_boxes, gt_classes.reshape(-1, 1).astype(np.float32)), axis=1)

        example.update({
            'gt_boxes': gt_boxes
        })
    example.update({
        'voxels': voxels,
        'num_points': num_points,
        'coordinates': coordinates,
        'voxel_centers': voxel_centers,
        'calib': input_dict['calib'],
        'points': points
    })
    return example

4 训练

PCDet-master\tools\train_utils\train_utils.py中:

model.train()   # 告诉大家,现在是训练模式
optimizer.zero_grad()   # 梯度归零

# model_func先将batch中的数据转化为tensor,在直接给入model,计算出来三个输出
# disp_dict这里是空的,没用
loss, tb_dict, disp_dict = model_func(model, batch)

loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()

accumulated_iter += 1

一般在看源码时,我都只注意数据流的传输,即输入数据的产生,训练过程中的数据格式,输出的数据三个部分,再加上各个框架不同的模型搭建方法。优化器,学习率调整策略数学理论要求太高了,溜了溜了。
下一篇注释网络的核心部分。

  • 8
    点赞
  • 42
    收藏
    觉得还不错? 一键收藏
  • 2
    评论
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值