11.初识Pytorch激活函数,线性层及其他层(Batch Normalization)等并尽可能对其进行可视化

跟之前的卷积层类似。

  1. 激活函数
  • ReLU激活函数,上代码:
import torch.nn as nn
import torchvision
from torchvision import transforms
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader


class MyActivation(nn.Module):
    def __init__(self):
        super(MyActivation, self).__init__()
        self.activation1 = nn.ReLU(inplace=False)

    def forward(self, x):
        return self.activation1(x)


writer = SummaryWriter("logs")
tran_tensor = transforms.ToTensor()
dataset = torchvision.datasets.CIFAR10(root="../dataset", transform=tran_tensor, train=False, download=True)
dataloader = DataLoader(dataset=dataset, batch_size=64, shuffle=True, num_workers=0, drop_last=False)

step = 0
for data in dataloader:
    imgs, targets = data
    print("imgs.shape", imgs.shape)
    writer.add_images("original_imgs", imgs, step)
    activation = MyActivation()
    outputs = activation(imgs)
    print("outputs.shape", outputs.shape)
    writer.add_images("outputs", outputs, step)
writer.close()

run之后,输入命令行:

tensorboard --logdir=logs

结果:
在这里插入图片描述


  • sigmoid激活函数,上代码:
import torch.nn as nn
import torchvision
from torchvision import transforms
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader


class MyActivation(nn.Module):
    def __init__(self):
        super(MyActivation, self).__init__()
        self.activation1 = nn.Sigmoid()

    def forward(self, x):
        return self.activation1(x)


writer = SummaryWriter("logs")
tran_tensor = transforms.ToTensor()
dataset = torchvision.datasets.CIFAR10(root="../dataset", transform=tran_tensor, train=False, download=True)
dataloader = DataLoader(dataset=dataset, batch_size=64, shuffle=True, num_workers=0, drop_last=False)

step = 0
for data in dataloader:
    imgs, targets = data
    print("imgs.shape", imgs.shape)
    writer.add_images("original_imgs", imgs, step)
    activation = MyActivation()
    outputs = activation(imgs)
    print("outputs.shape", outputs.shape)
    writer.add_images("outputs", outputs, step)
writer.close()

run之后,输入命令行:

tensorboard --logdir=logs

结果:
在这里插入图片描述

  1. 线性层
    nn.Linear,上代码:
import torch
import torchvision
import torch.nn as nn
from torchvision import transforms
from torch.utils.data import DataLoader

# 线性层类
class MyLinear(nn.Module):
    def __init__(self):
        super(MyLinear, self).__init__()
        self.linear1 = nn.Linear(196608, 10)

    def forward(self, x):
        output = self.linear1(x)
        return output


tran_tensor = transforms.ToTensor()
dataset = torchvision.datasets.CIFAR10(root="../dataset", train=False, transform=tran_tensor, download=True)
dataloder = DataLoader(dataset=dataset, batch_size=64, shuffle=True, num_workers=0, drop_last=True)

mylinear = MyLinear()

for data in dataloder:
    imgs, targes = data
    # torch.Size([64,3,32,32])
    print(imgs.shape)
    outputs = torch.reshape(imgs, (1, 1, 1, -1))
    # torch.Size([1,1,1,196608])
    print(outputs.shape)
    # torch.Size([1,1,1,10])
    outputs = mylinear(outputs)
    print(outputs.shape)

结果:
在这里插入图片描述

  1. Batch Normalization(不负责且片面讲效果可加速,也可提高精度)

nn.Conv2dnn.BatchNorm2d上代码:

import torch
from torch.utils.data import DataLoader
from torchvision import transforms
import torch.nn as nn
import torchvision
from torch.utils.tensorboard import SummaryWriter


class MyBatchNormalization(nn.Module):
    def __init__(self):
        super(MyBatchNormalization, self).__init__()
        self.conv1 = nn.Conv2d(in_channels=3, out_channels=6, kernel_size=3, stride=1, padding=0)
        self.batch_norm = nn.BatchNorm2d(6)

    def forward(self, x):
        x = self.conv1(x)
        x = self.batch_norm(x)
        return x


writer = SummaryWriter("logs")

tran_tensor = transforms.ToTensor()
dataset = torchvision.datasets.CIFAR10(root='../dataset', train=False, transform=tran_tensor, download=True)
dataloader = DataLoader(dataset=dataset, batch_size=64, shuffle=True, num_workers=0, drop_last=True)

my_batch_norm = MyBatchNormalization()
step = 0
for data in dataloader:
    imgs, targets = data
    print("imgs.shape", imgs.shape)
    writer.add_images("input", imgs, step)
    outputs = my_batch_norm(imgs)
    outputs = torch.reshape(outputs, (-1, 3, 30, 30))
    print("outputs.shape", outputs.shape)
    writer.add_images("outputs", outputs, step)
    step = step + 1

run之后,运行命令:

tensorboard --logdir=logs

结果
在这里插入图片描述

上一章 10.初识Pytorch使用池化层并对其进行可视化
下一章 12.初识Pytorch搭建网络 LeNet-5复现(含nn.Sequential用法)

  • 3
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值