【网络可视化】Graphviz + torchviz模块


一、安装教程

一个完整的安装教程
一个非常详细的教程!
这里是官网地址~~大概率用不上

二、example

1.网络结构

刚好在看SE,就用这个了,选了res18

import torch
import torch.nn as nn
import torch.nn.functional as F
from torchinfo import summary


class SE_Block(nn.Module):                         # Squeeze-and-Excitation block
    def __init__(self, in_planes):
        super(SE_Block, self).__init__()
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.conv1 = nn.Conv2d(in_planes, in_planes // 16, kernel_size=1)
        self.relu = nn.ReLU()
        self.conv2 = nn.Conv2d(in_planes // 16, in_planes, kernel_size=1)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        x = self.avgpool(x)
        x = self.conv1(x)
        x = self.relu(x)
        x = self.conv2(x)
        out = self.sigmoid(x)
        return out


class BasicBlock(nn.Module):      # 左侧的 residual block 结构(18-layer、34-layer)
    expansion = 1

    def __init__(self, in_planes, planes, stride=1):      # 两层卷积 Conv2d + Shutcuts
        super(BasicBlock, self).__init__()
        self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3,
                               stride=stride, padding=1, bias=False)
        self.bn1 = nn.BatchNorm2d(planes)
        self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
                               stride=1, padding=1, bias=False)
        self.bn2 = nn.BatchNorm2d(planes)

        self.SE = SE_Block(planes)           # Squeeze-and-Excitation block

        self.shortcut = nn.Sequential()
        if stride != 1 or in_planes != self.expansion*planes:      # Shutcuts用于构建 Conv Block 和 Identity Block
            self.shortcut = nn.Sequential(            #一个序列容器,用于搭建神经网络的模块被按照被传入构造器的顺序添加到nn.Sequential()容器中。
                nn.Conv2d(in_planes, self.expansion*planes,
                          kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(self.expansion*planes)
            )

    def forward(self, x):
        out = F.relu(self.bn1(self.conv1(x)))
        out = self.bn2(self.conv2(out))
        SE_out = self.SE(out)
        out = out * SE_out
        out += self.shortcut(x)
        out = F.relu(out)
        return out


class Bottleneck(nn.Module):      # 右侧的 residual block 结构(50-layer、101-layer、152-layer)
    expansion = 4

    def __init__(self, in_planes, planes, stride=1):      # 三层卷积 Conv2d + Shutcuts
        super(Bottleneck, self).__init__()
        self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
        self.bn1 = nn.BatchNorm2d(planes)
        self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
                               stride=stride, padding=1, bias=False)
        self.bn2 = nn.BatchNorm2d(planes)
        self.conv3 = nn.Conv2d(planes, self.expansion*planes,
                               kernel_size=1, bias=False)
        self.bn3 = nn.BatchNorm2d(self.expansion*planes)

        self.SE = SE_Block(self.expansion*planes)           # Squeeze-and-Excitation block

        self.shortcut = nn.Sequential()
        if stride != 1 or in_planes != self.expansion*planes:      # Shutcuts用于构建 Conv Block 和 Identity Block
            self.shortcut = nn.Sequential(
                nn.Conv2d(in_planes, self.expansion*planes,
                          kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(self.expansion*planes)
            )

    def forward(self, x):
        out = F.relu(self.bn1(self.conv1(x)))
        out = F.relu(self.bn2(self.conv2(out)))
        out = self.bn3(self.conv3(out))
        SE_out = self.SE(out)
        out = out * SE_out
        out += self.shortcut(x)
        out = F.relu(out)
        return out


class SE_ResNet(nn.Module):
    def __init__(self, block, num_blocks, num_classes=1000):
        super(SE_ResNet, self).__init__()
        self.in_planes = 64

        self.conv1 = nn.Conv2d(3, 64, kernel_size=3,
                               stride=1, padding=1, bias=False)                  # conv1
        self.bn1 = nn.BatchNorm2d(64)
        self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)       # conv2_x
        self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)      # conv3_x
        self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)      # conv4_x
        self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)      # conv5_x
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.linear = nn.Linear(512 * block.expansion, num_classes)

    def _make_layer(self, block, planes, num_blocks, stride):
        strides = [stride] + [1]*(num_blocks-1)
        layers = []
        for stride in strides:
            layers.append(block(self.in_planes, planes, stride))
            self.in_planes = planes * block.expansion
        return nn.Sequential(*layers)

    def forward(self, x):
        x = F.relu(self.bn1(self.conv1(x)))
        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)
        x = self.avgpool(x)
        x = torch.flatten(x, 1)
        out = self.linear(x)
        return out


def SE_ResNet18():
    return SE_ResNet(BasicBlock, [2, 2, 2, 2])


def SE_ResNet34():
    return SE_ResNet(BasicBlock, [3, 4, 6, 3])


def SE_ResNet50():
    return SE_ResNet(Bottleneck, [3, 4, 6, 3])


def SE_ResNet101():
    return SE_ResNet(Bottleneck, [3, 4, 23, 3])


def SE_ResNet152():
    return SE_ResNet(Bottleneck, [3, 8, 36, 3])


def test():
    net = SE_ResNet18()
    y = net(torch.randn(1, 3, 224, 224))
    print(y.size())
    print(summary(net, (1, 3, 224, 224)))
    


if __name__ == '__main__':
    test()

torch.Size([1, 1000])
===============================================================================================
Layer (type:depth-idx)                        Output Shape              Param #
===============================================================================================
SE_ResNet                                     [1, 1000]                 --
├─Conv2d: 1-1                                 [1, 64, 224, 224]         1,728
├─BatchNorm2d: 1-2                            [1, 64, 224, 224]         128
├─Sequential: 1-3                             [1, 64, 224, 224]         --
│    └─BasicBlock: 2-1                        [1, 64, 224, 224]         --
│    │    └─Conv2d: 3-1                       [1, 64, 224, 224]         36,864
│    │    └─BatchNorm2d: 3-2                  [1, 64, 224, 224]         128
│    │    └─Conv2d: 3-3                       [1, 64, 224, 224]         36,864
│    │    └─BatchNorm2d: 3-4                  [1, 64, 224, 224]         128
│    │    └─SE_Block: 3-5                     [1, 64, 1, 1]             580
│    │    └─Sequential: 3-6                   [1, 64, 224, 224]         --
│    └─BasicBlock: 2-2                        [1, 64, 224, 224]         --
│    │    └─Conv2d: 3-7                       [1, 64, 224, 224]         36,864
│    │    └─BatchNorm2d: 3-8                  [1, 64, 224, 224]         128
│    │    └─Conv2d: 3-9                       [1, 64, 224, 224]         36,864
│    │    └─BatchNorm2d: 3-10                 [1, 64, 224, 224]         128
│    │    └─SE_Block: 3-11                    [1, 64, 1, 1]             580
│    │    └─Sequential: 3-12                  [1, 64, 224, 224]         --
├─Sequential: 1-4                             [1, 128, 112, 112]        --
│    └─BasicBlock: 2-3                        [1, 128, 112, 112]        --
│    │    └─Conv2d: 3-13                      [1, 128, 112, 112]        73,728
│    │    └─BatchNorm2d: 3-14                 [1, 128, 112, 112]        256
│    │    └─Conv2d: 3-15                      [1, 128, 112, 112]        147,456
│    │    └─BatchNorm2d: 3-16                 [1, 128, 112, 112]        256
│    │    └─SE_Block: 3-17                    [1, 128, 1, 1]            2,184
│    │    └─Sequential: 3-18                  [1, 128, 112, 112]        8,448
│    └─BasicBlock: 2-4                        [1, 128, 112, 112]        --
│    │    └─Conv2d: 3-19                      [1, 128, 112, 112]        147,456
│    │    └─BatchNorm2d: 3-20                 [1, 128, 112, 112]        256
│    │    └─Conv2d: 3-21                      [1, 128, 112, 112]        147,456
│    │    └─BatchNorm2d: 3-22                 [1, 128, 112, 112]        256
│    │    └─SE_Block: 3-23                    [1, 128, 1, 1]            2,184
│    │    └─Sequential: 3-24                  [1, 128, 112, 112]        --
├─Sequential: 1-5                             [1, 256, 56, 56]          --
│    └─BasicBlock: 2-5                        [1, 256, 56, 56]          --
│    │    └─Conv2d: 3-25                      [1, 256, 56, 56]          294,912
│    │    └─BatchNorm2d: 3-26                 [1, 256, 56, 56]          512
│    │    └─Conv2d: 3-27                      [1, 256, 56, 56]          589,824
│    │    └─BatchNorm2d: 3-28                 [1, 256, 56, 56]          512
│    │    └─SE_Block: 3-29                    [1, 256, 1, 1]            8,464
│    │    └─Sequential: 3-30                  [1, 256, 56, 56]          33,280
│    └─BasicBlock: 2-6                        [1, 256, 56, 56]          --
│    │    └─Conv2d: 3-31                      [1, 256, 56, 56]          589,824
│    │    └─BatchNorm2d: 3-32                 [1, 256, 56, 56]          512
│    │    └─Conv2d: 3-33                      [1, 256, 56, 56]          589,824
│    │    └─BatchNorm2d: 3-34                 [1, 256, 56, 56]          512
│    │    └─SE_Block: 3-35                    [1, 256, 1, 1]            8,464
│    │    └─Sequential: 3-36                  [1, 256, 56, 56]          --
├─Sequential: 1-6                             [1, 512, 28, 28]          --
│    └─BasicBlock: 2-7                        [1, 512, 28, 28]          --
│    │    └─Conv2d: 3-37                      [1, 512, 28, 28]          1,179,648
│    │    └─BatchNorm2d: 3-38                 [1, 512, 28, 28]          1,024
│    │    └─Conv2d: 3-39                      [1, 512, 28, 28]          2,359,296
│    │    └─BatchNorm2d: 3-40                 [1, 512, 28, 28]          1,024
│    │    └─SE_Block: 3-41                    [1, 512, 1, 1]            33,312
│    │    └─Sequential: 3-42                  [1, 512, 28, 28]          132,096
│    └─BasicBlock: 2-8                        [1, 512, 28, 28]          --
│    │    └─Conv2d: 3-43                      [1, 512, 28, 28]          2,359,296
│    │    └─BatchNorm2d: 3-44                 [1, 512, 28, 28]          1,024
│    │    └─Conv2d: 3-45                      [1, 512, 28, 28]          2,359,296
│    │    └─BatchNorm2d: 3-46                 [1, 512, 28, 28]          1,024
│    │    └─SE_Block: 3-47                    [1, 512, 1, 1]            33,312
│    │    └─Sequential: 3-48                  [1, 512, 28, 28]          --
├─AdaptiveAvgPool2d: 1-7                      [1, 512, 1, 1]            --
├─Linear: 1-8                                 [1, 1000]                 513,000
===============================================================================================
Total params: 11,770,912
Trainable params: 11,770,912
Non-trainable params: 0
Total mult-adds (G): 27.22
===============================================================================================
Input size (MB): 0.60
Forward/backward pass size (MB): 481.71
Params size (MB): 47.08
Estimated Total Size (MB): 529.40
===============================================================================================

2.可视化

from torchviz import make_dot 
model = SE_ResNet18()
x =torch.randn(1, 3, 224, 224).requires_grad_(True)
y =model(x)
M = make_dot(y, params=dict(list(model.named_parameters()) + [('x', x)]))  # 实例化 make_dot
M.format="png"
M.directory="C:/Users/ting/Desktop/SE"   #路径自己修改
M.view()

会输出一张图片,在你设置的路径下。

在这里插入图片描述


总结

还算好用吧,但是感觉图不太简洁,以后还要继续尝试其他的可视化方法

  • 1
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值