一般地,在CNN等网络中,都是通过卷积过滤器对目标进行计算,然而这些计算都是建立在高维数据。
最后,项目需要对数据进行分类或者识别,就需要全连接层Linear,这时候就需要将高维数据平铺变为低位数据。
Lenet 例子
class Lenet(nn.Module):
def __init__(self):
super(Lenet, self).__init__()
‘’‘卷积、池化处理’‘’
self.conv = nn.Sequential(
nn.Conv2d(1, 6, 3, stride=1, padding=1),
nn.MaxPool2d(2, 2),
nn.Conv2d(6, 16, 5, stride=1, padding=0),
nn.MaxPool2d(2, 2)
)
‘’‘全连接层’‘’
self.fc = nn.Sequential(
nn.Linear(400, 120),
nn.Linear(120, 84),
nn.Linear(84, 10)
)
def forward(self, x):
out = self.conv(x) #卷积网络(高维数据)
out = out.view(out.size(0), -1) #高维数据 ‘压’成 低维数据
out = self.fc(out) #全连接层(低维数据)
return out
lenet = Lenet()
例子
import torch as t
a = t.arange(24)
a = a.reshape(4, 3, 2)
x = a.view(a.size(0), -1)
y = a.view(a.size(1), -1)
‘’‘输出对比’‘’
a = tensor([[[ 0, 1],
[ 2, 3],
[ 4, 5]],
[[ 6, 7],
[ 8, 9],
[10, 11]],
[[12, 13],
[14, 15],
[16, 17]],
[[18, 19],
[20, 21],
[22, 23]]])
a.size is torch.Size([4, 3, 2])
x = tensor([[ 0, 1, 2, 3, 4, 5],
[ 6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17],
[18, 19, 20, 21, 22, 23]])
x.size is torch.Size([4, 6])
y = tensor([[ 0, 1, 2, 3, 4, 5, 6, 7],
[ 8, 9, 10, 11, 12, 13, 14, 15],
[16, 17, 18, 19, 20, 21, 22, 23]])
y.size is torch.Size([3, 8])
ps:
x = a.view(a.size(0), -1)
等价于
x = x.view(x.size()[0], -1)