ego坐标系投影到uv平面


def reference_porint_project(reference_points, map_range, img_metas):
    
    points_shape = list(reference_points.shape)
    points_shape[-1] = 1
    
    ## reference_points  2d -> 3d
    reference_points_3d = torch.cat((reference_points,torch.zeros(points_shape,device=reference_points.device)),dim=-1)
    
    # 0-1尺度 --> ego map尺度
    reference_points_3d[..., 0:1] = reference_points_3d[..., 0:1]*(map_range[3] - map_range[0]) + map_range[0]  #x轴
    reference_points_3d[..., 1:2] = reference_points_3d[..., 1:2]*(map_range[4] - map_range[1]) + map_range[1]  #y轴
    reference_points_3d[..., 2:3] = reference_points_3d[..., 2:3]*(map_range[5] - map_range[2]) + map_range[2]  #z轴
    
    cam2ego_rotation = reference_points_3d.new_tensor(img_metas[0]['cam2ego_rotation'])
    cam2ego_translation = reference_points_3d.new_tensor(img_metas[0]['cam2ego_translation'])
    cam_intrinsic = reference_points_3d.new_tensor(img_metas[0]['cam_intrinsic'])
    
     ### ego 2 cam
    reference_points_3d = reference_points_3d - cam2ego_translation
    reference_points_3d = reference_points_3d @ cam2ego_rotation
    
    ### 3D to 2D projection
    proj_mat_expanded = torch.eye(
          4, device=cam_intrinsic.device, dtype=cam_intrinsic.dtype)
    proj_mat_expanded[:3, :3] = cam_intrinsic
    proj_mat = proj_mat_expanded
    
    points_4 = torch.cat([reference_points_3d, reference_points_3d.new_ones(points_shape)], dim=-1)
    point_2d = points_4 @ proj_mat.T
    reference_points_uv_origin = point_2d[..., :2] / point_2d[..., 2:3]
    # if with_depth:
    #     reference_points_uv_origin = torch.cat([reference_points_uv_origin, point_2d[..., 2:3]], dim=-1)
    
    reference_points_uv_origin = (reference_points_uv_origin-1).round()
    
    
    fov_inds = ((reference_points_uv_origin[:,:, 0] < img_metas[0]['img_shape'][1])
                & (reference_points_uv_origin[:,:, 0] >= 0)
                & (reference_points_uv_origin[:,: ,1] < img_metas[0]['img_shape'][0])
                & (reference_points_uv_origin[:,: ,1] >= 0))
    
    reference_points_uv = reference_points_uv_origin[0][fov_inds[0], :2]
    
    # 图像尺度 --> 0-1尺度
    reference_points_uv[..., 0] /= img_metas[0]['img_shape'][1] # 长宽归一化
    reference_points_uv[..., 1] /= img_metas[0]['img_shape'][0]
    
    
    return reference_points_uv
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值