卷积池化的小网络
最开始in-channel为3,out-channel为32,kernel是给的,所以卷积Conv2d函数所需的padding和stride是通过公式算出来的
整个网络代码如下:
import torch from torch import nn from torch.nn import Conv2d, MaxPool2d, Flatten, Linear class Tudui(nn.Module): def __init__(self): super(Tudui, self).__init__() self.conv1=Conv2d(3,32,5,padding=2) #其中的参数padding和stride是通过公式算出来的,in_channels: int,out_channels: int, kernel_size是已有的 self.maxpool1=MaxPool2d(2) self.conv2=Conv2d(32,32,5,padding=2) self.maxpool2=MaxPool2d(2) self.conv3=Conv2d(32,64,5,padding=2) self.maxpool3=MaxPool2d(2) self.flatten=Flatten() self.linear1=Linear(1024,64) self.linear2=Linear(64,10) def forward(self,x): x=self.conv1(x) x=self.maxpool1(x) x=self.conv2(x) x=self.maxpool2(x) x=self.conv3(x) x=self.maxpool3(x) x=self.flatten(x) x=self.linear1(x) x=self.linear2(x) return x tudui=Tudui() print(tudui) input=torch.ones((64,3,32,32)) output=tudui(input) print(output.shape) #输出:torch.Size([64, 10]),64是batch-size
输出:
用 Sequential简化代码:
可视化模型结构:
writer=SummaryWriter("../logs_seq") writer.add_graph(tudui,input) writer.close()
在timi...进行操作