import torch
import torch.nn as nn
import torch.nn.functional as F
class HSwish(nn.Module):
def forward(self, x):
return x * F.relu6(x + 3) / 6
class SEBlock(nn.Module):
def __init__(self, in_channels, reduction=4):
super(SEBlock, self).__init__()
self.fc1 = nn.Conv2d(in_channels, in_channels // reduction, kernel_size=1)
self.fc2 = nn.Conv2d(in_channels // reduction, in_channels, kernel_size=1)
def forward(self, x):
scale = F.adaptive_avg_pool2d(x, 1)
scale = F.relu(self.fc1(scale))
scale = torch.sigmoid(self.fc2(scale))
return x * scale
class MobileBottleneck(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, expand_ratio, se=False, nl="RE"):
super(MobileBottleneck, self).__init__()
self.use_res_connect = stride == 1 and in_channels == out_channels
self.nl = nl
hidden_dim = round(in_channels * expand_ratio)
self.expand = in_channels != hidden_dim
self.se = se
if self.expand:
self.conv1 = nn.Conv2d(in_channels, hidden_dim, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(hidden_dim)
self.conv2 = nn.Conv2d(hidden_dim, hidden_dim, kernel_size=kernel_size, stride=stride, padding=kernel_size // 2, groups=hidden_dim, bias=False)
self.bn2 = nn.BatchNorm2d(hidden_dim)
self.se_block = SEBlock(hidden_dim) if self.se else nn.Identity()
self.conv3 = nn.Conv2d(hidden_dim, out_channels, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(out_channels)
self.hswish = HSwish()
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
identity = x
if self.expand:
x = self.conv1(x)
x = self.bn1(x)
x = self.hswish(x) if self.nl == "HS" else self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.se_block(x)
x = self.hswish(x) if self.nl == "HS" else self.relu(x)
x = self.conv3(x)
x = self.bn3(x)
if self.use_res_connect:
x += identity
return x
class MobileNetV3(nn.Module):
def __init__(self, num_classes=1000, mode="small"):
super(MobileNetV3, self).__init__()
self.mode = mode
if mode == "small":
self.cfgs = [
[3, 16, 16, 0, 'RE', 2],
[3, 72, 24, 0, 'RE', 2],
[3, 88, 24, 0, 'RE', 1],
[5, 96, 40, 1, 'HS', 2],
[5, 240, 40, 1, 'HS', 1],
[5, 240, 40, 1, 'HS', 1],
[5, 120, 48, 1, 'HS', 1],
[5, 144, 48, 1, 'HS', 1],
[5, 288, 96, 1, 'HS', 2],
[5, 576, 96, 1, 'HS', 1],
[5, 576, 96, 1, 'HS', 1],
]
self.last_channel = 576
else:
raise ValueError("Unsupported mode: {}".format(mode))
input_channel = 16
self.first_conv = nn.Sequential(
nn.Conv2d(3, input_channel, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(input_channel),
HSwish()
)
self.blocks = self._make_layers(in_channel=input_channel)
self.last_conv = nn.Sequential(
nn.Conv2d(self.cfgs[-1][2], self.last_channel, kernel_size=1, bias=False),
nn.BatchNorm2d(self.last_channel),
HSwish()
)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Sequential(
nn.Linear(self.last_channel, 1024),
HSwish(),
nn.Dropout(0.2),
nn.Linear(1024, num_classes)
)
def _make_layers(self, in_channel):
layers = []
for k, exp, c, se, nl, s in self.cfgs:
out_channel = c
exp_ratio = exp / in_channel
layers.append(MobileBottleneck(in_channel, out_channel, k, s, exp_ratio, se, nl))
in_channel = out_channel
return nn.Sequential(*layers)
def forward(self, x):
x = self.first_conv(x)
x = self.blocks(x)
x = self.last_conv(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
from torchsummary import summary
device = torch.device("cuda:0")
model = MobileNetV3(num_classes=150, mode="small").to(device)
summary(model, input_size=(3, 224, 224))
11-01
1万+
11-28
6752
07-11
511
01-12
“相关推荐”对你有帮助么?
-
非常没帮助
-
没帮助
-
一般
-
有帮助
-
非常有帮助
提交