非线性变换的主要目的就是给网中加入一些非线性特征
非线性变化基本操作:
在进行网络初始化时,发现ReLU有一个参数inplace:
#inplace:True:input进行替换 False:input不变,返回一个output
input = -1
ReLU(input, inplace=True)
input = 0
input = -1
output = ReLU(input, inplace = False)
input = -1
output = 0
import torch
from torch import nn
from torch.nn import ReLU
input = torch.tensor([[1, -0.5],
[1, 3]])
#第一个参数输入-1,自动计算batch_size
input = torch.reshape(input, (-1, 1, 2, 2))
class Test(nn.Module):
def __init__(self):
super(Test, self).__init__()
self.relu1 = ReLU()
def forward(self, input):
output = self.relu1(input)
return output
test1 = Test()
output = test1(input)
print(output)
输出结果(负数截断):
tensor([[[[1., 0.],
[1., 3.]]]])
非线性变换的主要目的是在网络当中引入一些非线性特征,非线性越多才能训练出符合各种特征的模型
import torch
import torchvision
from torch import nn
from torch.nn import ReLU, Sigmoid
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
input = torch.tensor([[1, -0.5],
[1, 3]])
#第一个参数输入-1,自动计算batch_size
input = torch.reshape(input, (-1, 1, 2, 2))
dataset = torchvision.datasets.CIFAR10("../data", train=False, download=True, transform=torchvision.transforms.ToTensor())
dataloader = DataLoader(dataset, batch_size=64)
class Test(nn.Module):
def __init__(self):
super(Test, self).__init__()
#inplace:True:input进行替换 False:input不变,返回一个output
self.relu1 = ReLU()
#relu对图片处理结果不是很明显,所以采用sigmoid进行处理
#隐层神经元输出,取值范围为(0,1),它可以将一个实数映射到(0,1)的区间,可以用来做二分类
self.sigmoid1 = Sigmoid()
def forward(self, input):
output = self.sigmoid1(input)
return output
test1 = Test()
writer = SummaryWriter("logs_relu")
step = 0
for data in dataloader:
imgs, targets = data
writer.add_images("input", imgs, global_step=step)
output = test1(imgs)
writer.add_images("output", output, step)
step = step + 1
writer.close()
结果: