最大池化(下采样)
目的:保留输入的特征,同时减少数据量。参数更少了,使得训练的更快。
参数:
kernel_size:卷积核大小
ceil_mode:Ture保留、False不保留
注意输入输出都是四个参数或三个
import torch
from torch import nn
from torch.nn import MaxPool2d
input=torch.tensor([[1,2,0,3,1],
[0,1,2,3,1],
[1,2,1,0,0],
[5,2,3,1,1],
[2,1,0,1,1]
],dtype=torch.float32)
input=torch.reshape(input,(-1,1,5,5))
print(input.shape)
class Demo(nn.Module):
def __init__(self) -> None:
super().__init__()
self.maxpool1=MaxPool2d(kernel_size=3,ceil_mode=True)
def forward(self,input):
output=self.maxpool1(input)
return output
demo=Demo()
output=demo(input)
print(output)
结果:
import torch
import torchvision.datasets
from torch import nn
from torch.nn import MaxPool2d
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
dataset=torchvision.datasets.CIFAR10("dataset_CIFAR10",train=False,
download=True,
transform=torchvision.transforms.ToTensor())
dataloader=DataLoader(dataset,batch_size=64)
class Demo(nn.Module):
def __init__(self) -> None:
super().__init__()
self.maxpool1=MaxPool2d(kernel_size=3,ceil_mode=True)
def forward(self,input):
output=self.maxpool1(input)
return output
demo=Demo()
writer=SummaryWriter("logs_maxpool")
step=0
for data in dataloader:
imgs,targets=data
writer.add_images("input",imgs,step)
output=demo(imgs)
writer.add_images("output",output,step)
step=step+1
writer.close()
池化的通道数不会改变哦~池化后参数量大大减少,所以经常在卷积层后加一层池化层,再加一层非线性激活。