monodepth2代码阅读 (1):class BackprojectDepth(nn.Module)

import torch
import torch.nn as nn
import numpy as np

class BackprojectDepth(nn.Module):
    """Layer to transform a depth image into a point cloud
    """
    def __init__(self, batch_size, height, width):
        super(BackprojectDepth, self).__init__()

        self.batch_size = batch_size
        self.height = height
        self.width = width

        meshgrid = np.meshgrid(range(self.width), range(self.height), indexing='xy')

        # [array([[0, 1], [0, 1], [0, 1]]), 
        #  array([[0, 0], [1, 1], [2, 2]])]
        self.meshgrid = meshgrid

        # [[[0. 1.],[0. 1.],[0. 1.]]
        #  [[0. 0.],[1. 1.],[2. 2.]]]
        self.id_coords = np.stack(meshgrid, axis=0).astype(np.float32)

        # tensor([[[0., 1.],[0., 1.],[0., 1.]],
        #         [[0., 0.],[1., 1.],[2., 2.]]])
        self.id_coords = nn.Parameter(torch.from_numpy(self.id_coords),
                                      requires_grad=False)

        # tensor([[[1., 1., 1., 1., 1., 1.]]])
        self.ones = nn.Parameter(torch.ones(self.batch_size, 1, self.height * self.width),
                                 requires_grad=False)

        # print(self.id_coords[0].view(-1)) # tensor([0., 1., 0., 1., 0., 1.])
        # print(self.id_coords[1].view(-1)) # tensor([0., 0., 1., 1., 2., 2.])
        # print([self.id_coords[0].view(-1), self.id_coords[1].view(-1)]) # [tensor([0., 1., 0., 1., 0., 1.]), tensor([0., 0., 1., 1., 2., 2.])]

        # tensor([[0., 1., 0., 1., 0., 1.],
        #         [0., 0., 1., 1., 2., 2.]])
        # print(torch.stack([self.id_coords[0].view(-1), self.id_coords[1].view(-1)], 0))

        # tensor([[[0., 1., 0., 1., 0., 1.],
        #          [0., 0., 1., 1., 2., 2.]]])
        self.pix_coords = torch.unsqueeze(torch.stack(
            [self.id_coords[0].view(-1), self.id_coords[1].view(-1)], 0), 0)
        

        # tensor([[[0., 1., 0., 1., 0., 1.],
        #          [0., 0., 1., 1., 2., 2.]],

        #         [[0., 1., 0., 1., 0., 1.],
        #          [0., 0., 1., 1., 2., 2.]]])  batch=2
        self.pix_coords = self.pix_coords.repeat(batch_size, 1, 1)


        # tensor([[[0., 1., 0., 1., 0., 1.],
        #          [0., 0., 1., 1., 2., 2.],
        #          [1., 1., 1., 1., 1., 1.]],

        #         [[0., 1., 0., 1., 0., 1.],
        #          [0., 0., 1., 1., 2., 2.],
        #          [1., 1., 1., 1., 1., 1.]]])
        self.pix_coords = nn.Parameter(torch.cat([self.pix_coords, self.ones], 1),
                                       requires_grad=False)

    def forward(self, depth, inv_K):
        # 这一句是求相机归一化坐标
        cam_points = torch.matmul(inv_K[:, :3, :3], self.pix_coords)
        # 乘以深度
        cam_points = depth.view(self.batch_size, 1, -1) * cam_points
        # 末尾加1, 齐次化坐标
        cam_points = torch.cat([cam_points, self.ones], 1)

        return cam_points

if __name__ == "__main__":
    bp = BackprojectDepth(2, 3, 2)
    # print(bp.meshgrid)
    print(bp.pix_coords.shape)

    # x = np.linspace(0, 2, 2)
    # y = np.linspace(0, 1, 2)
    # print("x:", x)
    # print("y:", y)

    # x = [1, 2, 3, 4]
    # print("xx:", x[:2])

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值