为什么要最大池
保留输入的特征,同时减少数据量,加快训练速度
MaxPool2d
图1. 参数
stride:默认为池化核大小
ceil_mode:为True则保留边缘数据
注意输入输出的维度,必要时需要进行维度变换。
实例
import torch
import torchvision
from torch import nn
from torch.nn import MaxPool2d
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchvision import datasets
# 输入数据
input = torch.tensor([[1, 2, 0, 3, 1],
[0, 1, 2, 3, 1],
[1, 2, 1, 0, 0],
[5, 2, 3, 1, 1],
[2, 1, 0, 1, 1]], dtype=torch.float32)
# input = torch.reshape(input, (-1, 1, 5, 5))
dataset = torchvision.datasets.CIFAR10("../pytorch_learn/dataset2", train=False, transform=torchvision.transforms.ToTensor(), download=True)
dataloader = DataLoader(dataset, batch_size=64)
###构建神经网络
class tudui(nn.Module):
def __init__(self):
super(tudui, self).__init__()
self.maxpool = MaxPool2d(kernel_size=3, ceil_mode=False)
def forward(self, input):
output = self.maxpool(input)
return output
pool = tudui() #创建实例
# output = pool(input)
# print(output)
writer = SummaryWriter("logs_maxpool")
step = 0
for data in dataloader:
img, traget = data
writer.add_images("maxpool", img, step)
output = pool(img)
# 经过池化后数据通道不变,不需要进行变换
writer.add_images("output", output, step)
step = step + 1
writer.close()