本文根据此图搭建的神经网络模型:
这种神经网络模型中,大部分数组都已经给出,但有些数据需要根据已有的数据进行计算,例如每次进行卷积操作时padding的值就需要自行计算,最后进行线性层时的输入也要自行计算
计算padding的公式:
图中HWC分别代表着高度(hgiht),宽度w(width),通道数(Channels)
代码展示:
import torch
from torch import nn
from torch.utils.tensorboard import SummaryWriter
# 开始搭神经网络
class Mymodule(nn.Module):
def __init__(self):
super().__init__()
# self.conv1 = nn.Conv2d(in_channels=3,out_channels=32,kernel_size=5,padding=2)
# self.maxpool1 = nn.MaxPool2d(2)
# self.conv2 = nn.Conv2d(in_channels=32,out_channels=32,kernel_size=2,padding=2)
# self.maxpool2 = nn.MaxPool2d(2)
# self.conv3 = nn.Conv2d(in_channels=32,out_channels=64,kernel_size=5,padding=2)
# self.maxpool3 = nn.MaxPool2d(2)
# self.flatten = nn.Flatten()
# self.linear1 = nn.Linear(in_features=1024,out_features=64)
# self.linear2 = nn.Linear(in_features=64,out_features=10)
# Sequential模块就相当于一条流水线,将各个功能传入进去后会按照一定顺序执行,类似于compose模块的功能
# 使用Sequential可以使程序更具有可读性,但输出结果与上述方法完全相同
self.module = nn.Sequential(nn.Conv2d(in_channels=3,out_channels=32,kernel_size=5,padding=2),
nn.MaxPool2d(2),
nn.Conv2d(in_channels=32,out_channels=32,kernel_size=2,padding=2),
nn.MaxPool2d(2),
nn.Conv2d(in_channels=32,out_channels=64,kernel_size=5,padding=2),
nn.MaxPool2d(2),
nn.Flatten(),
nn.Linear(in_features=1024,out_features=64),
nn.Linear(in_features=64,out_features=10))
def forward(self,imgs):
# imgs = self.conv1(imgs)
# imgs = self.maxpool1(imgs)
# imgs = self.conv2(imgs)
# imgs = self.maxpool2(imgs)
# imgs = self.conv3(imgs)
# imgs = self.maxpool3(imgs)
# imgs = self.flatten(imgs)
# imgs = self.linear1(imgs)
# imgs = self.linear2(imgs)
# 当使用quential方法后,再进行传参时就会变得很简洁
imgs = self.module(imgs)
return imgs
module = Mymodule()
writer = SummaryWriter("glo")
img = torch.ones([64,3,32,32])
print(img.shape)
# 利用此方法可以可视化显示图片在神经网络中的操作过程
writer.add_graph(module,img)
img = module(img)
print(img.shape)
writer.close()
当进入可视化界面后:
(双击小方块可以看到每一步的细致操作)