9.非线性激活
为神经网络中引入一些非线性特征。最常见的是nn.ReLu(),nn.Sigmoid()
1. nn.ReLu()
官方文档:https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html?highlight=relu#torch.nn.ReLU
需要给input指定一个batch_size
R e L U ( x ) = ( x ) + = m a x ( 0 , x ) ReLU(x)=(x)+=max(0,x) ReLU(x)=(x)+=max(0,x)
代码
import torch
from torch import nn
#input是一个(2, 2)的tensor矩阵
input = torch.tensor([[1, -0.5],
[-1, 3]])
#希望变成四维矩阵
input = torch.reshape(input, (-1, 1, 2, 2))
print(input.shape)
#输出结果是(1, 1, 2, 2)
#形状应当是:([[[[1, -0.5],
[-1, 3]]])
#然后写一下神经网络
class MyNet(nn.Module):
def __init__(self):
super(MyNet, self).__init__()
self.relu1 = nn.ReLu(input, inplace=False)
#需要输入参数:inplace:bool=False
def forward(self, input):
output = self.relu1(input)
return output
mynet = MyNet()
output = mynet(input)
print(output)
#输出为:([[[[1, 0], ,复数被截断
[0, 3]]])
inplace
假如input=-1,若ReLu(input, inplace=True),则input=0
inplace=False 情况下,output = ReLu(input, inplace=False),input=-1 保持住,而output=0
inplace就是判定是否对原变量进行替换,一般情况下保持False,防止数据丢失
2. nn.Sigmoid()
官方文档:https://pytorch.org/docs/stable/generated/torch.nn.Sigmoid.html?highlight=sigmoid#torch.nn.Sigmoid
$$
Sigmoid(x)=σ(x)=\frac1{1+exp(−x)}
$$
代码
import torch
import torchvision
from torch import nn
from torch.utils.data import DataLoader
dataset = torchvision.datasets.CIFAR10("./data", train=False, download=True,
transform = torchbison.transforms.ToTensor())
dataloader = DataLoader(dataset, batch_size=64)
class MyNet(nn.Module):
def __init__(self):
super(MyNet, self).__init__()
self.sigmoid1 = nn.Sigmoid()
def forward(self, x):
output = self.sigmoid1(x)
return output
mynet = MyNet()
writer = SummaryWriter("logs")
step = 0
for data in dataloader:
imgs, targets = data
writer.add_images("input", imgs, global_step = step)
output = mynet(imgs)
writer.add_images("output", output, step)
step += 1
writer.close()
#tensorboard --logdir=logs