#-------------------------------------------------#
# MISH激活函数
#-------------------------------------------------#
class Mish(nn.Module):
def __init__(self):
super(Mish, self).__init__()
def forward(self, x):
return x * torch.tanh(F.softplus(x))
#-------------------------------------------------#
# SiLU激活函数
#-------------------------------------------------#
# SiLU https://arxiv.org/pdf/1606.08415.pdf ----------------------------------------------------------------------------
class SiLU(nn.Module): # export-friendly version of nn.SiLU()
def __init__(self):
super(SiLU, self).__init__()
def forward(self,x):
return x * torch.sigmoid(x)
#-------------------------------------------------#
# Hardswish激活函数
#-------------------------------------------------#
class Hardswish(nn.Module): # export-friendly version of nn.Hardswish()
def __init__(self):
super(Hardswish, self).__init__()
def forward(self,x):
# return x * F.hardsigmoid(x) # for TorchScript and CoreML
return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0 # for TorchScript, CoreML and ONNX
SMU激活函数
# coding=utf-8
import torch
from torch import nn
from matplotlib import pyplot as plt
# alpha控制x<0的开合角度,alpha=0时,在x<0,y的斜率越小
# mu控制x<0的小范围内的圆滑度
# 当alpha=0时,mu->∞,图像越趋向于ReLU
class SMU(nn.Module):
'''
Implementation of SMU activation.
Shape:
- Input: (N, *) where * means, any number of additional
dimensions
- Output: (N, *), same shape as the input
Parameters:
- alpha: hyper parameter
References:
- See related paper:
https://arxiv.org/abs/2111.04682
Examples:
smu = SMU()
x = torch.Tensor([0.6,-0.3])
x = smu(x)
'''
def __init__(self, alpha = 0.25, mu=100000):
'''
Initialization.
INPUT:
- alpha: hyper parameter
aplha is initialized with zero value by default
'''
super(SMU,self).__init__()
self.alpha = alpha
# initialize mu
self.mu = torch.nn.Parameter(torch.tensor(float(mu)))
def forward(self, x):
return ((1+self.alpha)*x + (1-self.alpha)*x*torch.erf(self.mu*(1-self.alpha)*x))/2
class SMU1(nn.Module):
'''
Implementation of SMU-1 activation.
Shape:
- Input: (N, *) where * means, any number of additional
dimensions
- Output: (N, *), same shape as the input
Parameters:
- alpha: hyper parameter
References:
- See related paper:
https://arxiv.org/abs/2111.04682
Examples:
smu1 = SMU1()
x = torch.Tensor([0.6,-0.3])
x = smu1(x)
'''
def __init__(self, alpha = 0.25):
'''
Initialization.
INPUT:
- alpha: hyper parameter
aplha is initialized with zero value by default
'''
super(SMU1,self).__init__()
self.alpha = alpha
# initialize mu
self.mu = torch.nn.Parameter(torch.tensor(4.352665993287951e-9))
def forward(self, x):
return ((1+self.alpha)*x+torch.sqrt(torch.square(x-self.alpha*x)+torch.square(self.mu)))/2
def test_SMU(x):
smu_activation = SMU()
print(f'SMU = {smu_activation(x)}')
def test_SMU1(x):
smu1_activation=SMU1()
print(f'SMU1 = {smu1_activation(x)}')
def test():
x = torch.Tensor([0.6,-0.3])
test_SMU(x)
test_SMU1(x)
if __name__ == '__main__':
test()
正在实现自适应激活函数,实验中。。。。。