#测试卷积尺寸
import torch
in_channels,out_channels = 5,10
width,height = 100,100
kernel_size = 3
batch_size = 1
input = torch.randn(batch_size,
in_channels,
width,
height)
conv_layer = torch.nn.Conv2d(
in_channels,
out_channels,
kernel_size=kernel_size
)
output = conv_layer(input)
print(input.shape)
print(output.shape)
print(conv_layer.weight.shape)#卷积层权重尺寸
# torch.Size([1, 5, 100, 100])
# torch.Size([1, 10, 98, 98])
# torch.Size([10, 5, 3, 3])
定义卷积层,注意四个值,输入通道数、输出通道数、卷积核大小、
import torch
input = [3,4,6,5,7,
2,4,6,8,2,
1,6,7,8,4,
9,7,4,6,2,
3,7,5,4,1]
input = torch.Tensor(input).view(1,1,5,5)
conv_layer = torch.nn.Conv2d(1,1,kernel_size=3,padding=1,bias=False)
kernel = torch.Tensor([1,2,3,4,5,6,7,8,9]).view(1,1,3,3)
conv_layer.weight.data = kernel.data
output = conv_layer(input)
print(output)
#输出结果
# tensor([[[[ 91., 168., 224., 215., 127.],
# [114., 211., 295., 262., 149.],
# [192., 259., 282., 214., 122.],
# [194., 251., 253., 169., 86.],
# [ 96., 112., 110., 68., 31.]]]],
# grad_fn=<MkldnnConvolutionBackward>)
#
将上面代码的padding=1改为stride=2.结果如下:
tensor([[[[211., 262.],
[251., 169.]]]], grad_fn=<MkldnnConvolutionBackward>)
pooling层不会改变通道的数量
语言的艺术:错误率从3%到2%,不是降低了1%,而是降低了百分之30