1. 在第一步的卷积中:padding = 2,stride=1
2.在FeatureMap的64@4*4输出后,要进行flatten操作,操作后输出为64*4*4=1024的长度,所以要经过两个全连接层:
第一个input_size=1024,output_size = 64
第二个input_size=64,output_size = 10
输出为10分类结果
import torch
import torchvision
from torch import nn
from torch.nn import Conv2d, MaxPool2d, Linear, Flatten, Sequential
from torch.utils.data.dataloader import DataLoader
class Module(nn.Module):
def __init__(self):
super().__init__()
# self.conv1 = Conv2d(3,32,kernel_size=(5,5),padding=2,stride=1,dilation=1)
# self.maxpool1=MaxPool2d(kernel_size=(2,2))
# self.conv2 = Conv2d(32,32,kernel_size=(5,5),padding=2)
# self.maxpool2 = MaxPool2d(kernel_size=(2,2))
# self.conv3 = Conv2d(32, 64, kernel_size=(5, 5), padding=2)
# self.maxpool3 = MaxPool2d(kernel_size=(2,2))
# self.flatten = Flatten()
# self.linear1 = Linear(1024,64)
# self.linear2 =Linear(64,10)
self.model1 =Sequential(
Conv2d(3, 32, kernel_size=(5, 5), padding=2, stride=1, dilation=1),
MaxPool2d(kernel_size=(2, 2)),
Conv2d(32, 32, kernel_size=(5, 5), padding=2),
MaxPool2d(kernel_size=(2, 2)),
Conv2d(32, 64, kernel_size=(5, 5), padding=2),
MaxPool2d(kernel_size=(2, 2)),
Flatten(),
Linear(1024, 64),
Linear(64, 10),
)
def forward(self, input):
# input=self.conv1(input)
# input = self.maxpool1(input)
# input = self.conv2(input)
# input = self.maxpool2(input)
# input = self.conv3(input)
# input = self.maxpool3(input)
# input = self.flatten(input)
# input = self.linear1(input)
# output = self.linear2(input)
output = self.model1(input)
return output
train_data = torchvision.datasets.CIFAR10('dataset',transform=torchvision.transforms.ToTensor(),train=True,download=True)
test_data = torchvision.datasets.CIFAR10('dataset',transform=torchvision.transforms.ToTensor(),train=False,download=True)
# data_loader = DataLoader(test_data,batch_size=)
module =Module()
# 64相当于有64个图片
input = torch.ones((64,3,32,32))
output = module(input)
print(output)
如果你也碰到Flatten类不能调用的情况,可以参考:
(3条消息) Pytorch 1.0.0版本问题之module ‘torch.nn‘ has no attribute ‘Flatten‘_可基大萌萌哒的马鹿的博客-CSDN博客
先用一个随便的矩阵测试