ResidualCoder

这段代码展示了用于三维目标检测的残差编码解码器的实现,包括ResidualCoder、PreviousResidualDecoder和PointResidualCoder。编码器将几何中心位置、尺寸和角度信息与锚点进行比较,生成残差编码,而解码器则根据这些编码和锚点信息还原出目标框。这种编码方式用于减少计算量并提高模型的训练效率。
摘要由CSDN通过智能技术生成
location:pcdet/utils/box_coder_utils.py

import numpy as np
import torch


# 残差编码器
class ResidualCoder(object):
    def __init__(self, code_size=7, encode_angle_by_sincos=False, **kwargs):
        super().__init__()
        self.code_size = code_size
        self.encode_angle_by_sincos = encode_angle_by_sincos  # 通过sin/cos编码角度
        if self.encode_angle_by_sincos:  # True
            self.code_size += 1

    # 编码
    def encode_torch(self, boxes, anchors):
        """
        Args:                  几何中心位置, x-y-z三个方向的长度w,l,h
            boxes: (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
            anchors: (N, 7 + C) [x, y, z, dx, dy, dz, heading or *[cos, sin], ...]
        Returns:
        """
        anchors[:, 3:6] = torch.clamp_min(anchors[:, 3:6], min=1e-5)  # 将输入张量每个元素的夹紧到区间 [min,max]
        boxes[:, 3:6] = torch.clamp_min(boxes[:, 3:6], min=1e-5)

        xa, ya, za, dxa, dya, dza, ra, *cas = torch.split(anchors, 1, dim=-1)  # 将tensor分成块结构,每块一个元素(x,y,z,w,l,h,θ)
        xg, yg, zg, dxg, dyg, dzg, rg, *cgs = torch.split(boxes, 1, dim=-1)

        diagonal = torch.sqrt(dxa ** 2 + dya ** 2)  # 对角线d值
        xt = (xg - xa) / diagonal   # △x
        yt = (yg - ya) / diagonal   # △y
        zt = (zg - za) / dza        # △z
        dxt = torch.log(dxg / dxa)  # △w
        dyt = torch.log(dyg / dya)  # △l
        dzt = torch.log(dzg / dza)  # △h
        if self.encode_angle_by_sincos:  # True
            rt_cos = torch.cos(rg) - torch.cos(ra)  # cos(θgt-θa)
            rt_sin = torch.sin(rg) - torch.sin(ra)  # sin(θgt-θa)
            rts = [rt_cos, rt_sin]  # [cos, sin]
        else:
            rts = [rg - ra]  # θgt-θa

        cts = [g - a for g, a in zip(cgs, cas)]
        return torch.cat([xt, yt, zt, dxt, dyt, dzt, *rts, *cts], dim=-1)  # 将两个tensor拼接在一起

    # 解码
    def decode_torch(self, box_encodings, anchors):
        """
        Args:
            box_encodings: (B, N, 7 + C) or (N, 7 + C) [x, y, z, dx, dy, dz, heading or *[cos, sin], ...] 框编码
            anchors: (B, N, 7 + C) or (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
        Returns:
        """
        xa, ya, za, dxa, dya, dza, ra, *cas = torch.split(anchors, 1, dim=-1)  # anchors的(x,y,z,w,l,h,θ)
        if not self.encode_angle_by_sincos:  # not True = False
            xt, yt, zt, dxt, dyt, dzt, rt, *cts = torch.split(box_encodings, 1, dim=-1)
        else:
            xt, yt, zt, dxt, dyt, dzt, cost, sint, *cts = torch.split(box_encodings, 1, dim=-1)  # 将编码拆分

        diagonal = torch.sqrt(dxa ** 2 + dya ** 2)  # 对角线d值
        xg = xt * diagonal + xa  # 解码后的xgt
        yg = yt * diagonal + ya  # 解码后的ygt
        zg = zt * dza + za       # 解码后的zgt

        dxg = torch.exp(dxt) * dxa  # 解码后的wgt
        dyg = torch.exp(dyt) * dya  # 解码后的lgt
        dzg = torch.exp(dzt) * dza  # 解码后的hgt

        if self.encode_angle_by_sincos:  # True
            rg_cos = cost + torch.cos(ra)  # cos(θgt-θa) + cos(θa)
            rg_sin = sint + torch.sin(ra)  # sin(θgt-θa) + sin(θa)
            rg = torch.atan2(rg_sin, rg_cos)  # 反正切
        else:
            rg = rt + ra

        cgs = [t + a for t, a in zip(cts, cas)]
        return torch.cat([xg, yg, zg, dxg, dyg, dzg, rg, *cgs], dim=-1)


# 以前的残差解码器
class PreviousResidualDecoder(object):
    def __init__(self, code_size=7, **kwargs):
        super().__init__()
        self.code_size = code_size

    @staticmethod
    def decode_torch(box_encodings, anchors):
        """
        Args:
            box_encodings:  (B, N, 7 + ?) x, y, z, w, l, h, r, custom values
            anchors: (B, N, 7 + C) or (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]

        Returns:

        """
        xa, ya, za, dxa, dya, dza, ra, *cas = torch.split(anchors, 1, dim=-1)
        xt, yt, zt, wt, lt, ht, rt, *cts = torch.split(box_encodings, 1, dim=-1)

        diagonal = torch.sqrt(dxa ** 2 + dya ** 2)
        xg = xt * diagonal + xa
        yg = yt * diagonal + ya
        zg = zt * dza + za

        dxg = torch.exp(lt) * dxa
        dyg = torch.exp(wt) * dya
        dzg = torch.exp(ht) * dza
        rg = rt + ra

        cgs = [t + a for t, a in zip(cts, cas)]
        return torch.cat([xg, yg, zg, dxg, dyg, dzg, rg, *cgs], dim=-1)


# 以前的残差RoI解码器
class PreviousResidualRoIDecoder(object):
    def __init__(self, code_size=7, **kwargs):
        super().__init__()
        self.code_size = code_size

    @staticmethod
    def decode_torch(box_encodings, anchors):
        """
        Args:
            box_encodings:  (B, N, 7 + ?) x, y, z, w, l, h, r, custom values
            anchors: (B, N, 7 + C) or (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]

        Returns:

        """
        xa, ya, za, dxa, dya, dza, ra, *cas = torch.split(anchors, 1, dim=-1)
        xt, yt, zt, wt, lt, ht, rt, *cts = torch.split(box_encodings, 1, dim=-1)

        diagonal = torch.sqrt(dxa ** 2 + dya ** 2)
        xg = xt * diagonal + xa
        yg = yt * diagonal + ya
        zg = zt * dza + za

        dxg = torch.exp(lt) * dxa
        dyg = torch.exp(wt) * dya
        dzg = torch.exp(ht) * dza
        rg = ra - rt

        cgs = [t + a for t, a in zip(cts, cas)]
        return torch.cat([xg, yg, zg, dxg, dyg, dzg, rg, *cgs], dim=-1)


# 点残差编码器
class PointResidualCoder(object):
    def __init__(self, code_size=8, use_mean_size=True, **kwargs):
        super().__init__()
        self.code_size = code_size
        self.use_mean_size = use_mean_size
        if self.use_mean_size:
            self.mean_size = torch.from_numpy(np.array(kwargs['mean_size'])).cuda().float()
            assert self.mean_size.min() > 0

    def encode_torch(self, gt_boxes, points, gt_classes=None):
        """
        Args:
            gt_boxes: (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
            points: (N, 3) [x, y, z]
            gt_classes: (N) [1, num_classes]
        Returns:
            box_coding: (N, 8 + C)
        """
        gt_boxes[:, 3:6] = torch.clamp_min(gt_boxes[:, 3:6], min=1e-5)

        xg, yg, zg, dxg, dyg, dzg, rg, *cgs = torch.split(gt_boxes, 1, dim=-1)
        xa, ya, za = torch.split(points, 1, dim=-1)

        if self.use_mean_size:
            assert gt_classes.max() <= self.mean_size.shape[0]
            point_anchor_size = self.mean_size[gt_classes - 1]
            dxa, dya, dza = torch.split(point_anchor_size, 1, dim=-1)
            diagonal = torch.sqrt(dxa ** 2 + dya ** 2)
            xt = (xg - xa) / diagonal
            yt = (yg - ya) / diagonal
            zt = (zg - za) / dza
            dxt = torch.log(dxg / dxa)
            dyt = torch.log(dyg / dya)
            dzt = torch.log(dzg / dza)
        else:
            xt = (xg - xa)
            yt = (yg - ya)
            zt = (zg - za)
            dxt = torch.log(dxg)
            dyt = torch.log(dyg)
            dzt = torch.log(dzg)

        cts = [g for g in cgs]
        return torch.cat([xt, yt, zt, dxt, dyt, dzt, torch.cos(rg), torch.sin(rg), *cts], dim=-1)

    def decode_torch(self, box_encodings, points, pred_classes=None):
        """
        Args:
            box_encodings: (N, 8 + C) [x, y, z, dx, dy, dz, cos, sin, ...]
            points: [x, y, z]
            pred_classes: (N) [1, num_classes]
        Returns:

        """
        xt, yt, zt, dxt, dyt, dzt, cost, sint, *cts = torch.split(box_encodings, 1, dim=-1)
        xa, ya, za = torch.split(points, 1, dim=-1)

        if self.use_mean_size:
            assert pred_classes.max() <= self.mean_size.shape[0]
            point_anchor_size = self.mean_size[pred_classes - 1]
            dxa, dya, dza = torch.split(point_anchor_size, 1, dim=-1)
            diagonal = torch.sqrt(dxa ** 2 + dya ** 2)
            xg = xt * diagonal + xa
            yg = yt * diagonal + ya
            zg = zt * dza + za

            dxg = torch.exp(dxt) * dxa
            dyg = torch.exp(dyt) * dya
            dzg = torch.exp(dzt) * dza
        else:
            xg = xt + xa
            yg = yt + ya
            zg = zt + za
            dxg, dyg, dzg = torch.split(torch.exp(box_encodings[..., 3:6]), 1, dim=-1)

        rg = torch.atan2(sint, cost)

        cgs = [t for t in cts]
        return torch.cat([xg, yg, zg, dxg, dyg, dzg, rg, *cgs], dim=-1)

分析编码部分:

# 编码
    def encode_torch(self, boxes, anchors):
    """
    Args:          几何中心位置, x-y-z三个方向的长度w,l,h
        boxes: (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
        anchors: (N, 7 + C) [x, y, z, dx, dy, dz, heading or *[cos, sin], ...]
    Returns:
    """

输入的boxes:
可以看到,有很多重复的,这是为啥呢?看输入的anchor,就知道了。

tensor([[ 25.9664, -11.6394,  -0.6327,   3.6209,   1.7396,   1.6385,   0.1595],                                                                                 | 0/1238 [00:00<?, ?it/s]
        [ 25.9664, -11.6394,  -0.6327,   3.6209,   1.7396,   1.6385,   0.1595],
        [ 25.9664, -11.6394,  -0.6327,   3.6209,   1.7396,   1.6385,   0.1595],
        [ 25.9664, -11.6394,  -0.6327,   3.6209,   1.7396,   1.6385,   0.1595],
        [ 25.9664, -11.6394,  -0.6327,   3.6209,   1.7396,   1.6385,   0.1595],
        [ 25.9664, -11.6394,  -0.6327,   3.6209,   1.7396,   1.6385,   0.1595],
        [ 25.9664, -11.6394,  -0.6327,   3.6209,   1.7396,   1.6385,   0.1595],
        [ 25.9664, -11.6394,  -0.6327,   3.6209,   1.7396,   1.6385,   0.1595],
        [ 33.7401,  -5.0012,  -0.5899,   4.4806,   1.7497,   1.6587,   0.1495],
        [ 33.7401,  -5.0012,  -0.5899,   4.4806,   1.7497,   1.6587,   0.1495],
        [ 33.7401,  -5.0012,  -0.5899,   4.4806,   1.7497,   1.6587,   0.1495],
        [ 33.7401,  -5.0012,  -0.5899,   4.4806,   1.7497,   1.6587,   0.1495],
        [ 33.7401,  -5.0012,  -0.5899,   4.4806,   1.7497,   1.6587,   0.1495],
        [ 33.7401,  -5.0012,  -0.5899,   4.4806,   1.7497,   1.6587,   0.1495],
        [ 33.7401,  -5.0012,  -0.5899,   4.4806,   1.7497,   1.6587,   0.1495],
        [ 33.7401,  -5.0012,  -0.5899,   4.4806,   1.7497,   1.6587,   0.1495],
        [ 24.0087,  -3.0596,  -0.6675,   4.4806,   1.7497,   1.6587,   0.2195],
        [ 24.0087,  -3.0596,  -0.6675,   4.4806,   1.7497,   1.6587,   0.2195],
        [ 24.0087,  -3.0596,  -0.6675,   4.4806,   1.7497,   1.6587,   0.2195],
        [ 24.0087,  -3.0596,  -0.6675,   4.4806,   1.7497,   1.6587,   0.2195],
        [ 24.0087,  -3.0596,  -0.6675,   4.4806,   1.7497,   1.6587,   0.2195],
        [ 24.0087,  -3.0596,  -0.6675,   4.4806,   1.7497,   1.6587,   0.2195],
        [ 24.0087,  -3.0596,  -0.6675,   4.4806,   1.7497,   1.6587,   0.2195],
        [ 24.0087,  -3.0596,  -0.6675,   4.4806,   1.7497,   1.6587,   0.2195],
        [ 34.4791,  -1.0433,  -0.7200,   4.0962,   1.6486,   1.4160,  -0.6905],
        [ 34.4791,  -1.0433,  -0.7200,   4.0962,   1.6486,   1.4160,  -0.6905],
        [ 34.4791,  -1.0433,  -0.7200,   4.0962,   1.6486,   1.4160,  -0.6905],
        [ 34.4791,  -1.0433,  -0.7200,   4.0962,   1.6486,   1.4160,  -0.6905],
        [ 34.4791,  -1.0433,  -0.7200,   4.0962,   1.6486,   1.4160,  -0.6905],
        [ 34.4791,  -1.0433,  -0.7200,   4.0962,   1.6486,   1.4160,  -0.6905],
        [ 34.4791,  -1.0433,  -0.7200,   4.0962,   1.6486,   1.4160,  -0.6905],
        [ 34.4791,  -1.0433,  -0.7200,   4.0962,   1.6486,   1.4160,  -0.6905],
        [ 38.4560,   0.3878,  -0.6558,   3.8333,   1.6183,   1.4969,   0.0895],
        [ 38.4560,   0.3878,  -0.6558,   3.8333,   1.6183,   1.4969,   0.0895],
        [ 38.4560,   0.3878,  -0.6558,   3.8333,   1.6183,   1.4969,   0.0895],
        [ 38.4560,   0.3878,  -0.6558,   3.8333,   1.6183,   1.4969,   0.0895],
        [ 38.4560,   0.3878,  -0.6558,   3.8333,   1.6183,   1.4969,   0.0895],
        [ 38.4560,   0.3878,  -0.6558,   3.8333,   1.6183,   1.4969,   0.0895],
        [ 38.4560,   0.3878,  -0.6558,   3.8333,   1.6183,   1.4969,   0.0895],
        [ 38.4560,   0.3878,  -0.6558,   3.8333,   1.6183,   1.4969,   0.0895],
        [ 25.4642,   2.0706,  -0.7157,   3.7321,   1.6688,   1.5778,   0.0995],
        [ 25.4642,   2.0706,  -0.7157,   3.7321,   1.6688,   1.5778,   0.0995],
        [ 25.4642,   2.0706,  -0.7157,   3.7321,   1.6688,   1.5778,   0.0995],
        [ 25.4642,   2.0706,  -0.7157,   3.7321,   1.6688,   1.5778,   0.0995],
        [ 25.4642,   2.0706,  -0.7157,   3.7321,   1.6688,   1.5778,   0.0995],
        [ 25.4642,   2.0706,  -0.7157,   3.7321,   1.6688,   1.5778,   0.0995],
        [ 25.4642,   2.0706,  -0.7157,   3.7321,   1.6688,   1.5778,   0.0995],
        [ 25.4642,   2.0706,  -0.7157,   3.7321,   1.6688,   1.5778,   0.0995],
        [  7.5736,   3.9231,  -0.9281,   3.3984,   1.5576,   1.4261,   0.0695],
        [  7.5736,   3.9231,  -0.9281,   3.3984,   1.5576,   1.4261,   0.0695],
        [  7.5736,   3.9231,  -0.9281,   3.3984,   1.5576,   1.4261,   0.0695],
        [  7.5736,   3.9231,  -0.9281,   3.3984,   1.5576,   1.4261,   0.0695],
        [  7.5736,   3.9231,  -0.9281,   3.3984,   1.5576,   1.4261,   0.0695],
        [  7.5736,   3.9231,  -0.9281,   3.3984,   1.5576,   1.4261,   0.0695],
        [ 41.6010,  13.3748,  -0.5409,   3.9546,   1.6385,   1.4564,   1.4595],
        [ 41.6010,  13.3748,  -0.5409,   3.9546,   1.6385,   1.4564,   1.4595],
        [ 41.6010,  13.3748,  -0.5409,   3.9546,   1.6385,   1.4564,   1.4595],
        [ 41.6010,  13.3748,  -0.5409,   3.9546,   1.6385,   1.4564,   1.4595],
        [ 41.6010,  13.3748,  -0.5409,   3.9546,   1.6385,   1.4564,   1.4595],
        [ 41.6010,  13.3748,  -0.5409,   3.9546,   1.6385,   1.4564,   1.4595],
        [ 41.6010,  13.3748,  -0.5409,   3.9546,   1.6385,   1.4564,   1.4595],
        [ 41.6010,  13.3748,  -0.5409,   3.9546,   1.6385,   1.4564,   1.4595],
        [ 27.0532,  21.7356,  -0.8251,   4.2075,   1.8307,   1.4767,  -3.0405],
        [ 27.0532,  21.7356,  -0.8251,   4.2075,   1.8307,   1.4767,  -3.0405],
        [ 27.0532,  21.7356,  -0.8251,   4.2075,   1.8307,   1.4767,  -3.0405],
        [ 27.0532,  21.7356,  -0.8251,   4.2075,   1.8307,   1.4767,  -3.0405],
        [ 27.0532,  21.7356,  -0.8251,   4.2075,   1.8307,   1.4767,  -3.0405],
        [ 27.0532,  21.7356,  -0.8251,   4.2075,   1.8307,   1.4767,  -3.0405],
        [ 27.0532,  21.7356,  -0.8251,   4.2075,   1.8307,   1.4767,  -3.0405],
        [ 27.0532,  21.7356,  -0.8251,   4.2075,   1.8307,   1.4767,  -3.0405],
        [ 27.0532,  21.7356,  -0.8251,   4.2075,   1.8307,   1.4767,  -3.0405]],
       device='cuda:0')

输入的anchor:这就是一个batch的所有正样本,上面一个gt_box[ 25.9664, -11.6394, -0.6327, 3.6209, 1.7396, 1.6385, 0.1595]重复了8次,对应anchor8个,比较就可以看出,这就是这个该gt_box对应的正样本,就在附近。

tensor([[ 25.3976, -11.7273,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 25.7191, -11.7273,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 26.0406, -11.7273,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 26.3620, -11.7273,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 26.6835, -11.7273,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 25.7191, -11.4060,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 26.0406, -11.4060,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 26.3620, -11.4060,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 33.4348,  -5.3014,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 33.7563,  -5.3014,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 34.0778,  -5.3014,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 33.1133,  -4.9801,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 33.4348,  -4.9801,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 33.7563,  -4.9801,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 34.0778,  -4.9801,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 34.3993,  -4.9801,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 23.7901,  -3.3736,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 24.1116,  -3.3736,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 23.1472,  -3.0523,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 23.4687,  -3.0523,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 23.7901,  -3.0523,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 24.1116,  -3.0523,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 24.4331,  -3.0523,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 24.7546,  -3.0523,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 33.7563,  -1.1245,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 34.0778,  -1.1245,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 34.3993,  -1.1245,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 34.7207,  -1.1245,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 35.0422,  -1.1245,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 34.0778,  -0.8032,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 34.3993,  -0.8032,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 34.7207,  -0.8032,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 38.2571,   0.1606,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 38.5786,   0.1606,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 38.9001,   0.1606,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 37.9356,   0.4819,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 38.2571,   0.4819,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 38.5786,   0.4819,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 38.9001,   0.4819,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 39.2216,   0.4819,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 25.3976,   1.7671,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 25.7191,   1.7671,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 24.7546,   2.0884,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 25.0761,   2.0884,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 25.3976,   2.0884,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 25.7191,   2.0884,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 26.0406,   2.0884,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 25.3976,   2.4097,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [  7.3942,   3.6949,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [  7.7157,   3.6949,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [  7.0727,   4.0162,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [  7.3942,   4.0162,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [  7.7157,   4.0162,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [  8.0372,   4.0162,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 41.4720,  12.6912,  -1.0000,   3.9000,   1.6000,   1.5600,   1.5700],
        [ 41.4720,  13.0125,  -1.0000,   3.9000,   1.6000,   1.5600,   1.5700],
        [ 41.7935,  13.0125,  -1.0000,   3.9000,   1.6000,   1.5600,   1.5700],
        [ 41.4720,  13.3338,  -1.0000,   3.9000,   1.6000,   1.5600,   1.5700],
        [ 41.7935,  13.3338,  -1.0000,   3.9000,   1.6000,   1.5600,   1.5700],
        [ 41.4720,  13.6551,  -1.0000,   3.9000,   1.6000,   1.5600,   1.5700],
        [ 41.7935,  13.6551,  -1.0000,   3.9000,   1.6000,   1.5600,   1.5700],
        [ 41.4720,  13.9764,  -1.0000,   3.9000,   1.6000,   1.5600,   1.5700],
        [ 27.0050,  21.3662,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 26.3620,  21.6875,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 26.6835,  21.6875,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 27.0050,  21.6875,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 27.3265,  21.6875,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 27.6480,  21.6875,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 26.6835,  22.0087,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 27.0050,  22.0087,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000],
        [ 27.3265,  22.0087,  -1.0000,   3.9000,   1.6000,   1.5600,   0.0000]],
       device='cuda:0')
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值