class spa_cnn_local(nn.Module): def __init__(self, input_dim, output_dim, ): super(spa_cnn_local, self).__init__() self.spaConv1 = nn.Conv3d(input_dim, output_dim, kernel_size=[args.kernelSize, args.kernelSize, args.cateNum], stride=1, padding=[int((args.kernelSize - 1) / 2), int((args.kernelSize - 1) / 2), 0]) self.spaConv2 = nn.Conv3d(input_dim, output_dim, kernel_size=[args.kernelSize, args.kernelSize, args.cateNum], stride=1, padding=[int((args.kernelSize - 1) / 2), int((args.kernelSize - 1) / 2), 0]) self.spaConv3 = nn.Conv3d(input_dim, output_dim, kernel_size=[args.kernelSize, args.kernelSize, args.cateNum], stride=1, padding=[int((args.kernelSize - 1) / 2), int((args.kernelSize - 1) / 2), 0]) self.spaConv4 = nn.Conv3d(input_dim, output_dim, kernel_size=[args.kernelSize, args.kernelSize, args.cateNum], stride=1, padding=[int((args.kernelSize - 1) / 2), int((args.kernelSize - 1) / 2), 0]) self.drop = nn.Dropout(args.dropRateL) self.act_lr = nn.LeakyReLU() def forward(self, embeds): cate_1 = self.drop(self.spaConv1(embeds)) cate_2 = self.drop(self.spaConv2(embeds)) cate_3 = self.drop(self.spaConv3(embeds)) cate_4 = self.drop(self.spaConv4(embeds)) spa_cate = torch.cat([cate_1, cate_2, cate_3, cate_4], dim=-1) return self.act_lr(spa_cate + embeds)