paper:Fast Fourier Convolution
1、Fourier Convolution
为了解决现有的局部感受野的局限性,即 现有的 CNN 模型大多采用小尺寸卷积核,导致感受野较小,难以捕捉图像中相距较远的特征信息,这对于需要全局上下文的任务(如人体姿态估计)来说是不够的。所以,这篇论文通过分析跨尺度融合的重要性,认识到CNN 中不同层次的特征图包含不同尺度的信息,跨尺度融合可以有效利用这些互补信息,提升模型的表达能力。进而提出一中 快速傅里叶卷积(Fast Fourier Convolution)。旨在解决现有卷积神经网络 (CNN) 中局部感受野和跨尺度融合的局限性,从而提升模型在各类视觉任务上的表现。
FFC 通过利用傅里叶变换的性质,将局部卷积和全局卷积融合在一个操作单元中。其中,傅里叶变换可以将图像从空间域转换到频域,而在频域中,对单个像素的更新会全局影响所有像素,这为实现全局感受野提供了理论基础。
对于输入X,Fourier Convolution 的实现过程:
-
特征图分割:首先将输入特征图 X 分割成两个部分:局部分支的特征图 Xl 和全局分支的特征图 Xg。其中,Xl 占据了 (1-αin)C 个通道,用于学习局部特征。Xg 占据了 αinC 个通道,用于捕捉长距离的上下文信息。
-
局部分支:在局部分支中,对 Xl 进行常规的小尺寸卷积操作 fl,得到局部分支的输出 Y_L。
-
全局分支:在全局分支中,论文采用了并行结构(全局+半全局),分别使用 Fourier Unit 和 Local Fourier Unit 来完成。在全局分支中,首先通过对 Xg 进行二维离散傅里叶变换 (DFT),将其转换到频域。然后为了降低计算复杂度,使用 1x1 卷积层对频域特征图进行降维。在频域进行卷积操作 fg,捕捉全局信息。然后使用 1x1 卷积层恢复特征图的通道数。最后将频域特征图进行逆傅里叶变换,将其转换回空间域。得到全局分支的输出 Y_G。以上过程构成了 Fourier Unit 操作。
-
半全局分支 (LFU):半全局分支是通过一种 Local Fourier Unit 来完成的。具体来说,首先通过将 Xg 的空间维度减半,得到四个较小的特征图,并将它们沿着通道维度拼接起来。再对拼接后的特征图进行 Fourier Unit 操作。最后将结果进行空间平移和复制,使其与 FU 的输出通道数一致。得到半全局分支的输出 Y_lG。
-
输出:在两个分支分别处理完后,Y_L作为局部分支的输出,Y_G和Y_lG相加作为整个全局分支的输出。
Fourier Convolution 结构图:
Local Fourier Unit 结构图:
2、代码实现
import torch
import torch.nn as nn
class FourierUnit(nn.Module):
def __init__(self, in_channels, out_channels, groups=1):
super(FourierUnit, self).__init__()
self.groups = groups
self.conv_layer = torch.nn.Conv2d(in_channels=in_channels * 2, out_channels=out_channels * 2,
kernel_size=1, stride=1, padding=0, groups=self.groups, bias=False)
self.bn = torch.nn.BatchNorm2d(out_channels * 2)
self.relu = torch.nn.ReLU(inplace=True)
def forward(self, x):
batch, c, h, w = x.size()
r_size = x.size()
ffted = torch.fft.fft2(x, norm="ortho")
ffted_real = ffted.real
ffted_imag = ffted.imag
ffted = torch.cat([ffted_real, ffted_imag], dim=1)
ffted = self.conv_layer(ffted) # (batch, c*2, h, w/2+1)
ffted = self.relu(self.bn(ffted))
ffted_real, ffted_imag = torch.chunk(ffted, 2, dim=1)
ffted = torch.complex(ffted_real, ffted_imag)
output = torch.fft.ifft2(ffted, s=(h, w), norm="ortho").real
return output
class SpectralTransform(nn.Module):
def __init__(self, in_channels, out_channels, stride=1, groups=1, enable_lfu=True):
super(SpectralTransform, self).__init__()
self.enable_lfu = enable_lfu
if stride == 2:
self.downsample = nn.AvgPool2d(kernel_size=(2, 2), stride=2)
else:
self.downsample = nn.Identity()
self.stride = stride
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels, out_channels //
2, kernel_size=1, groups=groups, bias=False),
nn.BatchNorm2d(out_channels // 2),
nn.ReLU(inplace=True)
)
self.fu = FourierUnit(
out_channels // 2, out_channels // 2, groups)
if self.enable_lfu:
self.lfu = FourierUnit(
out_channels // 2, out_channels // 2, groups)
self.conv2 = torch.nn.Conv2d(
out_channels // 2, out_channels, kernel_size=1, groups=groups, bias=False)
def forward(self, x):
x = self.downsample(x)
x = self.conv1(x)
output = self.fu(x)
if self.enable_lfu:
n, c, h, w = x.shape
split_no = 2
split_s_h = h // split_no
split_s_w = w // split_no
xs = torch.cat(torch.split(
x[:, :c // 4], split_s_h, dim=-2), dim=1).contiguous()
xs = torch.cat(torch.split(xs, split_s_w, dim=-1),
dim=1).contiguous()
xs = self.lfu(xs)
xs = xs.repeat(1, 1, split_no, split_no).contiguous()
else:
xs = 0
output = self.conv2(x + output + xs)
return output
class FFC(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size,
ratio_gin, ratio_gout, stride=1, padding=0,
dilation=1, groups=1, bias=False, enable_lfu=True):
super(FFC, self).__init__()
assert stride == 1 or stride == 2, "Stride should be 1 or 2."
self.stride = stride
in_cg = int(in_channels * ratio_gin)
in_cl = in_channels - in_cg
out_cg = int(out_channels * ratio_gout)
out_cl = out_channels - out_cg
self.ratio_gin = ratio_gin
self.ratio_gout = ratio_gout
module = nn.Identity if in_cl == 0 or out_cl == 0 else nn.Conv2d
self.convl2l = module(in_cl, out_cl, kernel_size,
stride, padding, dilation, groups, bias)
module = nn.Identity if in_cl == 0 or out_cg == 0 else nn.Conv2d
self.convl2g = module(in_cl, out_cg, kernel_size,
stride, padding, dilation, groups, bias)
module = nn.Identity if in_cg == 0 or out_cl == 0 else nn.Conv2d
self.convg2l = module(in_cg, out_cl, kernel_size,
stride, padding, dilation, groups, bias)
module = nn.Identity if in_cg == 0 or out_cg == 0 else SpectralTransform
self.convg2g = module(
in_cg, out_cg, stride, 1 if groups == 1 else groups // 2, enable_lfu)
def forward(self, x):
x_l, x_g = x if type(x) is tuple else (x, 0)
out_xl, out_xg = 0, 0
if self.ratio_gout != 1:
out_xl = self.convl2l(x_l) + self.convg2l(x_g)
if self.ratio_gout != 0:
out_xg = self.convl2g(x_l) + self.convg2g(x_g)
return out_xl, out_xg
class FFC_BN_ACT(nn.Module):
def __init__(self, in_channels, out_channels,
kernel_size=3, ratio_gin=0.5, ratio_gout=0.5,
stride=1, padding=1, dilation=1, groups=1, bias=False,
norm_layer=nn.BatchNorm2d, activation_layer=nn.Identity,
enable_lfu=True):
super(FFC_BN_ACT, self).__init__()
self.ffc = FFC(in_channels, out_channels, kernel_size,
ratio_gin, ratio_gout, stride, padding, dilation,
groups, bias, enable_lfu)
lnorm = nn.Identity if ratio_gout == 1 else norm_layer
gnorm = nn.Identity if ratio_gout == 0 else norm_layer
self.bn_l = lnorm(int(out_channels * (1 - ratio_gout)))
self.bn_g = gnorm(int(out_channels * ratio_gout))
lact = nn.Identity if ratio_gout == 1 else activation_layer
gact = nn.Identity if ratio_gout == 0 else activation_layer
self.act_l = lact(inplace=True)
self.act_g = gact(inplace=True)
def forward(self, x):
c = x.shape[1]
c_l = int(c * (1 - self.ffc.ratio_gin))
x_l, x_g = x[:, :c_l, :, :], x[:, c_l:, :, :]
x_l, x_g = self.ffc((x_l, x_g))
x_l = self.act_l(self.bn_l(x_l))
x_g = self.act_g(self.bn_g(x_g))
return torch.cat((x_l, x_g), dim=1)
if __name__ == '__main__':
x = torch.randn(1,16,128,128).cuda()
model = FFC_BN_ACT(16,16).cuda()
out = model(x)
print(out.shape)