1. 啥也别说,先上代码
# 依赖项
import time
from torch import nn
import torch
import torch.nn.functional as F
from typing import TypeVar, Union, Tuple, Optional
T = TypeVar('T')
# 代码主体
class SuperConv2d(nn.Module):
"""
Super Conv2d
"""
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: Union[T, Tuple[T, T]],
stride: Union[T, Tuple[T, T]] = 1,
padding: Union[str, Union[T, Tuple[T, T]]] = 0,
dilation: Union[T, Tuple[T, T]] = 1,
bias: bool = True,
use_sep_conv: bool = False,
use_bn: bool = False,
use_activate_function: bool = False,
activate_layer: nn.Module = nn.ReLU(),
activate_name: str = None,
groups: int = 1,
padding_mode: str = 'zeros',
device=None,
dtype=None
) -> None:
super().__init__()
self.use_sep_conv = use_sep_conv
self.use_bn = use_bn
self.use_activate_function = use_activate_function
if self.use_sep_conv:
self.depthwise_conv = nn.Conv2d(in_channels, in_channels, kernel_size, stride, padding, dilation,
groups=in_channels, bias=bias, padding_mode=padding_mode, device=device,
dtype=dtype)
self.pointwise_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
else:
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias,
padding_mode, device, dtype)
if self.use_bn:
self.bn = nn.BatchNorm2d(out_channels)
if self.use_activate_function:
if activate_name is not None:
self.activate_layer = getattr(torch.nn.modules.activation, activate_name)
else:
self.activate_layer = activate_layer
def forward(self, x):
if self.use_sep_conv:
out = self.pointwise_conv(self.depthwise_conv(x))
else:
out = self.conv(x)
if self.use_bn:
out = self.bn(out)
if self.use_activate_function:
out = self.activate_layer(out)
return out
2. 实验
3. 传入参数与nn.Conv2d一致,只不过多了use_sep_conv,use_bn,use_activate_function,activate_layer,activate_name几个多余参数
use_sep_conv: 是否使用深度可分离卷积
use_bn: 是否接BN层 - BN层在卷积层之后
use_activate_function: 是否使用激活函数 - 激活函数最后使用
activate_layer: 可以直接传入一个nn.Module的激活函数层,会直接使用
activate_name: 可以通过姓名指定激活函数,只不过就不能传入特定参数了
ps: 如果使用激活函数,且不传入参数,那么默认ReLU函数