池化层的编程跟之前的卷积层类似。
- 最大池化层,上代码:
import torch.nn as nn
import torch
import torchvision
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
# 创建的单层最大池化层类
# Pytorch中模型的三要素 1.nn.Module
class MyPool(nn.Module):
# Pytorch中模型的三要素 2.__init__()
def __init__(self):
super(MyPool, self).__init__()
self.maxpool1 = torch.nn.MaxPool2d(kernel_size=3, stride=1, padding=0)
# Pytorch中模型的三要素 3. forward()
def forward(self, x):
x = self.maxpool1(x)
return x
# 将数据集图像从PIL格式转化为Tensor格式
tran_tensor = transforms.ToTensor()
# 使用CIFAR10数据集
# root数据集的位置,train是否是训练集,transform数据集,download是否下载数据集
dataset = torchvision.datasets.CIFAR10(root="../dataset", train=False, transform=tran_tensor, download=True)
# 加载数据集dataloader
# dataset数据集的位置,batch_size一次性加载的数据集图像,shuffle是否洗牌,num_workers单多线程,drop_last是否舍去最后总数据集个数除以batch_size个数的余数
dataloader = DataLoader(dataset=dataset, batch_size=64, shuffle=True, num_workers=0, drop_last=False)
# 创建Tensorboard.SummaryWriter模板
writer = SummaryWriter("logs")
step = 0
for data in dataloader:
imgs, targets = data
print("imgs.shape", imgs.shape)
writer.add_images("original_imgs", imgs, step)
# 创建MyPool类
mypool = MyPool()
# 将图像喂给单层最大池化层的模型
outputs = mypool(imgs)
print("outputs.shape", outputs.shape)
writer.add_images("outputs", outputs, step)
step = step + 1
writer.close()
run
之后,输入命令行:
tensorboard --logdir=logs
结果:
- 平均池化层,上代码:
import torch.nn as nn
import torch
import torchvision
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
class MyPool(nn.Module):
def __init__(self):
super(MyPool, self).__init__()
self.avgpool1 = torch.nn.AvgPool2d(kernel_size=3, stride=1, padding=0)
def forward(self, x):
x = self.avgpool1(x)
return x
tran_tensor = transforms.ToTensor()
dataset = torchvision.datasets.CIFAR10(root="../dataset", train=False, transform=tran_tensor, download=True)
dataloader = DataLoader(dataset=dataset, batch_size=64, shuffle=True, num_workers=0, drop_last=False)
writer = SummaryWriter("logs")
step = 0
for data in dataloader:
imgs, targets = data
print("imgs.shape", imgs.shape)
writer.add_images("original_imgs", imgs, step)
mypool = MyPool()
outputs = mypool(imgs)
print("outputs.shape", outputs.shape)
writer.add_images("outputs", outputs, step)
step = step + 1
writer.close()
run
之后,输入命令行:
tensorboard --logdir=logs
结果:
上一章 9.初识Pytorch使用卷积层并对其进行可视化
下一章 11.初识Pytorch激活函数,线性层及其他层(Batch Normalization)等并尽可能对其进行可视化