13.1 ReLU的使用
本节中所学习的Pytorch官方文档地址link
13.1.1 ReLU的直观理解
13.1.2 代码实现
"inplace"的直观理解
# 默认"inplace=False";一般建议"inplace=False",保留原始数据
import torch
from torch import nn
from torch.nn import ReLU
input = torch.tensor([[1, -0.5],
[-1, 3]])
input = torch.reshape(input, (-1, 1, 2, 2))
print(input.shape)
class Avlon(nn.Module):
def __init__(self):
super().__init__()
self.relu1 = ReLU()
def forward(self, input):
output = self.relu1(input)
return output
avlon = Avlon()
output = avlon(input)
print(output)
运行后
13.2 Sigmoid的使用
本节中所学习的Pytorch官方文档地址link
13.2.1 Sigmoid的直观理解
13.2.2 代码实现
import torch
import torchvision
from torch import nn
from torch.nn import ReLU, Sigmoid
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
input = torch.tensor([[1, -0.5],
[-1, 3]])
input = torch.reshape(input, (-1, 1, 2, 2))
print(input.shape)
dataset = torchvision.datasets.CIFAR10("../data", train=False, download=True,
transform=torchvision.transforms.ToTensor())
dataloader = DataLoader(dataset, batch_size=64)
class Avlon(nn.Module):
def __init__(self):
super().__init__()
self.relu1 = ReLU()
self.sigmoid1 = Sigmoid()
def forward(self, input):
# output = self.relu1(input)
output = self.sigmoid1(input)
return output
avlon = Avlon()
# output = avlon(input)
# print(output)
writer = SummaryWriter("../logs_relu")
step = 0
for data in dataloader:
imgs, targets = data
writer.add_images("input", imgs, global_step=step)
output = avlon(imgs)
writer.add_images("output", output, step)
step += 1
writer.close()
运行后