DilatedEncoder:来自yolof
代码来自https://github.com/yjh0410此博主
import torch
import torch.nn as nn
# from ..basic.conv import Conv
def get_activation1(name="lrelu", inplace=True):
if name == "silu":
module = nn.SiLU(inplace=inplace)
elif name == "relu":
module = nn.ReLU(inplace=inplace)
elif name == "lrelu":
module = nn.LeakyReLU(0.1, inplace=inplace)
elif name is None:
module = nn.Identity()
else:
raise AttributeError("Unsupported act type: {}".format(name))
return module
# Basic conv1 layer
class Conv1(nn.Module):
def __init__(self, c1, c2, k=1, p=0, s=1, d=1, g=1, act='lrelu', depthwise=False, bias=False):
super(Conv1, self).__init__()
if depthwise:
assert c1 == c2
self.convs = nn.Sequential(
nn.Conv2d(c1, c2, k, stride=s, padding=p, dilation=d, groups=c1, bias=bias),
nn.BatchNorm2d(c2),
get_activation1(name=act),
nn.Conv2d(c2, c2, kernel_size=1, bias=bias),
nn.BatchNorm2d(c2),
get_activation1(name=act)
)
else:
self.convs = nn.Sequential(
nn.Conv2d(c1, c2, k, stride=s, padding=p, dilation=d, groups=g, bias=bias),
nn.BatchNorm2d(c2),
get_activation1(name=act)
)
def forward(self, x):
return self.convs(x)
# Dilated Encoder
class DilatedBottleneck(nn.Module):
def __init__(self, c, d=1, e=0.5, act='lrelu'):
super(DilatedBottleneck, self).__init__()
c_ = int(c * e)
self.branch = nn.Sequential(
Conv1(c, c_, k=1, act=act),
Conv1(c_, c_, k=3, p=d, d=d, act=act),
Conv1(c_, c, k=1, act=act)
)
def forward(self, x):
return x + self.branch(x)
class DilatedEncoder(nn.Module):
""" DilateEncoder """
def __init__(self, c1, c2, act='lrelu', dilation_list=[2, 4, 6, 8]):
super(DilatedEncoder, self).__init__()
self.projector = nn.Sequential(
Conv1(c1, c2, k=1, act=None),
Conv1(c2, c2, k=3, p=1, act=None)
)
encoders = []
for d in dilation_list:
encoders.append(DilatedBottleneck(c=c2, d=d, act=act))
self.encoders = nn.Sequential(*encoders)
def forward(self, x):
x = self.projector(x)
x = self.encoders(x)
return x