利用tensorboard显示模型示意图使用到的函数add_graph中的参数
add_graph(model, input_to_model=None, verbose=False, **kwargs)
参数
model (torch.nn.Module): 待可视化的网络模型
input_to_model (torch.Tensor or list of torch.Tensor, optional): 待输入神经网络的变量或一组变量
verbose表示详细信息,verbose=FALSE,意思就是设置运行的时候不显示详细信息
1.AlexNet
import torch
from torch import nn
from torchstat import stat
class AlexNet(nn.Module):
def __init__(self,num_classes):
super(AlexNet,self).__init__()
self.features=nn.Sequential(
nn.Conv2d(3,64,kernel_size=11,stride=4,padding=2),
nn.ReLU(True),
nn.MaxPool2d(kernel_size=3,stride=2),
nn.Conv2d(64,192,kernel_size=5,padding=2),
nn.ReLU(True),
nn.MaxPool2d(kernel_size=3,stride=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1), # b, 384, 13, 13
nn.ReLU(True),
nn.Conv2d(384, 256, kernel_size=3, padding=1), # b, 256, 13, 13
nn.ReLU(True),
nn.Conv2d(256, 256, kernel_size=3, padding=1), # b, 256, 13, 13
nn.ReLU(True),
nn.MaxPool2d(kernel_size=3, stride=2)) # b, 256, 6, 6
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(256*6*6, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Linear(4096, num_classes))
def forward(self,x):
x=self.features(x)
print(x.size())
x=x.view(x.size(0),256*6*6)
x=self.classifier(x)
return x
model=AlexNet(10)
stat(model,(3,224,224))
2.vgg net
# VGG-16模型
from torch import nn
from torchstat import stat
class VGG(nn.Module):
def __init__(self, num_classes):
super(VGG, self).__init__() # b, 3, 224, 224
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, padding=1), # b, 64, 224, 224
nn.ReLU(True),
nn.Conv2d(64, 64, kernel_size=3, padding=1), # b, 64, 224, 224
nn.ReLU(True),
nn.MaxPool2d(kernel_size=2, stride=2), # b, 64, 112, 112
nn.Conv2d(64, 128, kernel_size=3, padding=1), # b, 128, 112, 112
nn.ReLU(True),
nn.Conv2d(128, 128, kernel_size=3, padding=1), # b, 128, 112, 112
nn.ReLU(True),
nn.MaxPool2d(kernel_size=2, stride=2), # b, 128, 56, 56
nn.Conv2d(128, 256, kernel_size=3, padding=1), # b, 256, 56, 56
nn.ReLU(True),
nn.Conv2d(256, 256, kernel_size=3, padding=1), # b, 256, 56, 56
nn.ReLU(True),
nn.Conv2d(256, 256, kernel_size=3, padding=1), # b, 256, 56, 56
nn.ReLU(True),
nn.MaxPool2d(kernel_size=2, stride=2), # b, 256, 28, 28
nn.Conv2d(256, 512, kernel_size=3, padding=1), # b, 512, 28, 28
nn.ReLU(True),
nn.Conv2d(512, 512, kernel_size=3, padding=1), # b, 512, 28, 28
nn.ReLU(True),
nn.Conv2d(512, 512, kernel_size=3, padding=1), # b, 512, 28, 28
nn.ReLU(True),
nn.MaxPool2d(kernel_size=2, stride=2), # b, 512, 14, 14
nn.Conv2d(512, 512, kernel_size=3, padding=1), # b, 512, 14, 14