PointTransformerSeg(
(backbone): Backbone(
(fc1): Sequential(
(0): Linear(in_features=19, out_features=32, bias=True)
(1): ReLU()
(2): Linear(in_features=32, out_features=32, bias=True)
)
(transformer1): TransformerBlock(
(fc1): Linear(in_features=32, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=32, bias=True)
(fc_delta): Sequential(
(0): Linear(in_features=3, out_features=512, bias=True)
(1): ReLU()
(2): Linear(in_features=512, out_features=512, bias=True)
)
(fc_gamma): Sequential(
(0): Linear(in_features=512, out_features=512, bias=True)
(1): ReLU()
(2): Linear(in_features=512, out_features=512, bias=True)
)
(w_qs): Linear(in_features=512, out_features=512, bias=False)
(w_ks): Linear(in_features=512, out_features=512, bias=False)
(w_vs): Linear(in_features=512, out_features=512, bias=False)
)
(transition_downs): ModuleList(
(0): TransitionDown(
(sa): PointNetSetAbstraction(
(mlp_convs): ModuleList(
(0): Conv2d(35, 64, kernel_size=(1, 1), stride=(1, 1))
(1): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1))
)
(mlp_bns): ModuleList(
(0): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
)
(1): TransitionDown(
(sa): PointNetSetAbstraction(
(mlp_convs): ModuleList(
(0): Conv2d(67, 128, kernel_size=(1, 1), stride=(1, 1))
(1): Conv2d(128, 128, kernel_size=(1, 1), stride=(1, 1))
)
(mlp_bns): ModuleList(
(0): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
)
(2): TransitionDown(
(sa): PointNetSetAbstraction(
(mlp_convs): ModuleList(
(0): Conv2d(131, 256, kernel_size=(1, 1), stride=(1, 1))
(1): Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1))
)
(mlp_bns): ModuleList(
(0): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
)
(3): TransitionDown(
(sa): PointNetSetAbstraction(
(mlp_convs): ModuleList(
(0): Conv2d(259, 512, kernel_size=(1, 1), stride=(1, 1))
(1): Conv2d(512, 512, kernel_size=(1, 1), stride=(1, 1))
)
(mlp_bns): ModuleList(
(0): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
)
)
(transformers): ModuleList(
(0): TransformerBlock(
(fc1): Linear(in_features=64, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=64, bias=True)
(fc_delta): Sequential(
(0): Linear(in_features=3, out_features=512, bias=True)
(1): ReLU()
(2): Linear(in_features=512, out_features=512, bias=True)
)
(fc_gamma): Sequential(
(0): Linear(in_features=512, out_features=512, bias=True)
(1): ReLU()
(2): Linear(in_features=512, out_features=512, bias=True)
)
(w_qs): Linear(in_features=512, out_features=512, bias=False)
(w_ks): Linear(in_features=512, out_features=512, bias=False)
(w_vs): Linear(in_features=512, out_features=512, bias=False)
)
(1): TransformerBlock(
(fc1): Linear(in_features=128, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=128, bias=True)
(fc_delta): Sequential(
(0): Linear(in_features=3, out_features=512, bias=True)
(1): ReLU()
(2): Linear(in_features=512, out_features=512, bias=True)
)
(fc_gamma): Sequential(
(0): Linear(in_features=512, out_features=512, bias=True)
(1): ReLU()
(2): Linear(in_features=512, out_features=512, bias=True)
)
(w_qs): Linear(in_features=512, out_features=512, bias=False)
(w_ks): Linear(in_features=512, out_features=512, bias=False)
(w_vs): Linear(in_features=512, out_features=512, bias=False)
)
(2): TransformerBlock(
(fc1): Linear(in_features=256, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=256, bias=True)
(fc_delta): Sequential(
(0): Linear(in_features=3, out_features=512, bias=True)
(1): ReLU()
(2): Linear(in_features=512, out_features=512, bias=True)
)
(fc_gamma): Sequential(
(0): Linear(in_features=512, out_features=512, bias=True)
(1): ReLU()
(2): Linear(in_features=512, out_features=512, bias=True)
)
(w_qs): Linear(in_features=512, out_features=512, bias=False)
(w_ks): Linear(in_features=512, out_features=512, bias=False)
(w_vs): Linear(in_features=512, out_features=512, bias=False)
)
(3): TransformerBlock(
(fc1): Linear(in_features=512, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc_delta): Sequential(
(0): Linear(in_features=3, out_features=512, bias=True)
(1): ReLU()
(2): Linear(in_features=512, out_features=512, bias=True)
)
(fc_gamma): Sequential(
(0): Linear(in_features=512, out_features=512, bias=True)
(1): ReLU()
(2): Linear(in_features=512, out_features=512, bias=True)
)
(w_qs): Linear(in_features=512, out_features=512, bias=False)
(w_ks): Linear(in_features=512, out_features=512, bias=False)
(w_vs): Linear(in_features=512, out_features=512, bias=False)
)
)
)
(fc2): Sequential(
(0): Linear(in_features=512, out_features=512, bias=True)
(1): ReLU()
(2): Linear(in_features=512, out_features=512, bias=True)
(3): ReLU()
(4): Linear(in_features=512, out_features=512, bias=True)
)
(transformer2): TransformerBlock(
(fc1): Linear(in_features=512, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc_delta): Sequential(
(0): Linear(in_features=3, out_features=512, bias=True)
(1): ReLU()
(2): Linear(in_features=512, out_features=512, bias=True)
)
(fc_gamma): Sequential(
(0): Linear(in_features=512, out_features=512, bias=True)
(1): ReLU()
(2): Linear(in_features=512, out_features=512, bias=True)
)
(w_qs): Linear(in_features=512, out_features=512, bias=False)
(w_ks): Linear(in_features=512, out_features=512, bias=False)
(w_vs): Linear(in_features=512, out_features=512, bias=False)
)
(transition_ups): ModuleList(
(0): TransitionUp(
(fc1): Sequential(
(0): Linear(in_features=512, out_features=256, bias=True)
(1): SwapAxes()
(2): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): SwapAxes()
(4): ReLU()
)
(fc2): Sequential(
(0): Linear(in_features=256, out_features=256, bias=True)
(1): SwapAxes()
(2): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): SwapAxes()
(4): ReLU()
)
(fp): PointNetFeaturePropagation(
(mlp_convs): ModuleList()
(mlp_bns): ModuleList()
)
)
(1): TransitionUp(
(fc1): Sequential(
(0): Linear(in_features=256, out_features=128, bias=True)
(1): SwapAxes()
(2): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): SwapAxes()
(4): ReLU()
)
(fc2): Sequential(
(0): Linear(in_features=128, out_features=128, bias=True)
(1): SwapAxes()
(2): BatchNorm1d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): SwapAxes()
(4): ReLU()
)
(fp): PointNetFeaturePropagation(
(mlp_convs): ModuleList()
(mlp_bns): ModuleList()
)
)
(2): TransitionUp(
(fc1): Sequential(
(0): Linear(in_features=128, out_features=64, bias=True)
(1): SwapAxes()
(2): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): SwapAxes()
(4): ReLU()
)
(fc2): Sequential(
(0): Linear(in_features=64, out_features=64, bias=True)
(1): SwapAxes()
(2): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): SwapAxes()
(4): ReLU()
)
(fp): PointNetFeaturePropagation(
(mlp_convs): ModuleList()
(mlp_bns): ModuleList()
)
)
(3): TransitionUp(
(fc1): Sequential(
(0): Linear(in_features=64, out_features=32, bias=True)
(1): SwapAxes()
(2): BatchNorm1d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): SwapAxes()
(4): ReLU()
)
(fc2): Sequential(
(0): Linear(in_features=32, out_features=32, bias=True)
(1): SwapAxes()
(2): BatchNorm1d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): SwapAxes()
(4): ReLU()
)
(fp): PointNetFeaturePropagation(
(mlp_convs): ModuleList()
(mlp_bns): ModuleList()
)
)
)
(transformers): ModuleList(
(0): TransformerBlock(
(fc1): Linear(in_features=256, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=256, bias=True)
(fc_delta): Sequential(
(0): Linear(in_features=3, out_features=512, bias=True)
(1): ReLU()
(2): Linear(in_features=512, out_features=512, bias=True)
)
(fc_gamma): Sequential(
(0): Linear(in_features=512, out_features=512, bias=True)
(1): ReLU()
(2): Linear(in_features=512, out_features=512, bias=True)
)
(w_qs): Linear(in_features=512, out_features=512, bias=False)
(w_ks): Linear(in_features=512, out_features=512, bias=False)
(w_vs): Linear(in_features=512, out_features=512, bias=False)
)
(1): TransformerBlock(
(fc1): Linear(in_features=128, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=128, bias=True)
(fc_delta): Sequential(
(0): Linear(in_features=3, out_features=512, bias=True)
(1): ReLU()
(2): Linear(in_features=512, out_features=512, bias=True)
)
(fc_gamma): Sequential(
(0): Linear(in_features=512, out_features=512, bias=True)
(1): ReLU()
(2): Linear(in_features=512, out_features=512, bias=True)
)
(w_qs): Linear(in_features=512, out_features=512, bias=False)
(w_ks): Linear(in_features=512, out_features=512, bias=False)
(w_vs): Linear(in_features=512, out_features=512, bias=False)
)
(2): TransformerBlock(
(fc1): Linear(in_features=64, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=64, bias=True)
(fc_delta): Sequential(
(0): Linear(in_features=3, out_features=512, bias=True)
(1): ReLU()
(2): Linear(in_features=512, out_features=512, bias=True)
)
(fc_gamma): Sequential(
(0): Linear(in_features=512, out_features=512, bias=True)
(1): ReLU()
(2): Linear(in_features=512, out_features=512, bias=True)
)
(w_qs): Linear(in_features=512, out_features=512, bias=False)
(w_ks): Linear(in_features=512, out_features=512, bias=False)
(w_vs): Linear(in_features=512, out_features=512, bias=False)
)
(3): TransformerBlock(
(fc1): Linear(in_features=32, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=32, bias=True)
(fc_delta): Sequential(
(0): Linear(in_features=3, out_features=512, bias=True)
(1): ReLU()
(2): Linear(in_features=512, out_features=512, bias=True)
)
(fc_gamma): Sequential(
(0): Linear(in_features=512, out_features=512, bias=True)
(1): ReLU()
(2): Linear(in_features=512, out_features=512, bias=True)
)
(w_qs): Linear(in_features=512, out_features=512, bias=False)
(w_ks): Linear(in_features=512, out_features=512, bias=False)
(w_vs): Linear(in_features=512, out_features=512, bias=False)
)
)
(fc3): Sequential(
(0): Linear(in_features=32, out_features=64, bias=True)
(1): ReLU()
(2): Linear(in_features=64, out_features=64, bias=True)
(3): ReLU()
(4): Linear(in_features=64, out_features=50, bias=True)
)
)
PCT的 part_seg 模型 详细信息 记录
最新推荐文章于 2022-07-15 15:24:58 发布