self.backbone_net = Pointnet2MSG(input_channels =input_channels, use_xyz = use_xyz)
网络的Backbone,有三部分 1图片特征提取 2点云特征提取 和 3 融合特征。
1图片特征提取
图片特征提取很简单就是3*3的卷积加上激活函数和归一化。
2融合部分代码
①融合部分的卷积代码
将图片和点云特征拼接起来
对应这部分 应该吧,别的投影到二维图上的代码我也没找到。
②特征融合
③网络主干pointNet++
def _break_up_pc(self, pc):
xyz = pc[..., 0:3].contiguous()
features = (
pc[..., 3:].transpose(1, 2).contiguous()
if pc.size(-1) > 3 else None
)
return xyz, features
def forward(self, pointcloud: torch.cuda.FloatTensor, image=None, xy=None):
xyz, features = self._break_up_pc(pointcloud)
l_xyz, l_features = [xyz], [features]
if cfg.LI_FUSION.ENABLED:
#### normalize xy to [-1,1]
size_range = [1280.0, 384.0]
xy[:, :, 0] = xy[:, :, 0] / (size_range[0] - 1.0) * 2.0 - 1.0
xy[:, :, 1] = xy[:, :, 1] / (size_range[1] - 1.0) * 2.0 - 1.0 # = xy / (size_range - 1.) * 2 - 1.
l_xy_cor = [xy]
img = [image]
###归一化
for i in range(len(self.SA_modules)):
li_xyz, li_features, li_index = self.SA_modules[i](l_xyz[i], l_features[i])
###网络开始
if cfg.LI_FUSION.ENABLED:
li_index = li_index.long().unsqueeze(-1).repeat(1,1,2)
li_xy_cor = torch.gather(l_xy_cor[i],1,li_index)
image = self.Img_Block[i](img[i])
#print(image.shape)
img_gather_feature = Feature_Gather(image,li_xy_cor) #LI模块的第一部分
li_features = self.Fusion_Conv[i](li_features,img_gather_feature)##Point—wise Image Feature
l_xy_cor.append(li_xy_cor)
img.append(image)
l_xyz.append(li_xyz)
l_features.append(li_features)
for i in range(-1, -(len(self.FP_modules) + 1), -1):
l_features[i - 1] = self.FP_modules[i](
l_xyz[i - 1], l_xyz[i], l_features[i - 1], l_features[i]
)
if cfg.LI_FUSION.ENABLED:
#for i in range(1,len(img))
DeConv = []
for i in range(len(cfg.LI_FUSION.IMG_CHANNELS) - 1):
DeConv.append(self.DeConv[i](img[i + 1]))
de_concat = torch.cat(DeConv,dim=1)
###反卷积
img_fusion = F.relu(self.image_fusion_bn(self.image_fusion_conv(de_concat)))
img_fusion_gather_feature = Feature_Gather(img_fusion, xy)
l_features[0] = self.final_fusion_img_point(l_features[0], img_fusion_gather_feature)
return l_xyz[0], l_features[0]
跟论文里面的网络图一样,卷积完事反卷积。