使用了发现还挺好用,自己整理一下
和torchinfo进行了对比,还是这个更直观显示每一层的结构,但是不会像torchinfo一样显示模块部分。各有优劣。
from torchsummary import summary
model1 = MobileNetV2(n_class=2).cuda()
summary(model1, input_size=(3, 256, 256))
输出显示
----------------------------------------------------------------
Layer (type) Output Shape Param #
================================================================
Conv2d-1 [-1, 32, 128, 128] 864
BatchNorm2d-2 [-1, 32, 128, 128] 64
ReLU6-3 [-1, 32, 128, 128] 0
Conv2d-4 [-1, 32, 128, 128] 288
BatchNorm2d-5 [-1, 32, 128, 128] 64
ReLU6-6 [-1, 32, 128, 128] 0
Conv2d-7 [-1, 16, 128, 128] 512
BatchNorm2d-8 [-1, 16, 128, 128] 32
InvertedResidual-9 [-1, 16, 128, 128] 0
Conv2d-10 [-1, 96, 128, 128] 1,536
BatchNorm2d-11 [-1, 96, 128, 128] 192
ReLU6-12 [-1, 96, 128, 128] 0
Conv2d-13 [-1, 96, 64, 64] 864
BatchNorm2d-14 [-1, 96, 64, 64] 192
ReLU6-15 [-1, 96, 64, 64] 0
Conv2d-16 [-1, 24, 64, 64] 2,304
BatchNorm2d-17 [-1, 24, 64, 64] 48
InvertedResidual-18 [-1, 24, 64, 64] 0
Conv2d-19 [-1, 144, 64, 64] 3,456
BatchNorm2d-20 [-1, 144, 64, 64] 288
ReLU6-21 [-1, 144, 64, 64] 0
Conv2d-22 [-1, 144, 64, 64] 1,296
BatchNorm2d-23 [-1, 144, 64, 64] 288
ReLU6-24 [-1, 144, 64, 64] 0
Conv2d-25 [-1, 24, 64, 64] 3,456
BatchNorm2d-26 [-1, 24, 64, 64] 48
InvertedResidual-27 [-1, 24, 64, 64] 0
Conv2d-28 [-1, 144, 64, 64] 3,456
BatchNorm2d-29 [-1, 144, 64, 64] 288
ReLU6-30 [-1, 144, 64, 64] 0
Conv2d-31 [-1, 144, 32, 32] 1,296
BatchNorm2d-32 [-1, 144, 32, 32] 288
ReLU6-33 [-1, 144, 32, 32] 0
Conv2d-34 [-1, 32, 32, 32] 4,608
BatchNorm2d-35 [-1, 32, 32, 32] 64
InvertedResidual-36 [-1, 32, 32, 32] 0
Conv2d-37 [-1, 192, 32, 32] 6,144
BatchNorm2d-38 [-1, 192, 32, 32] 384
ReLU6-39 [-1, 192, 32, 32] 0
Conv2d-40 [-1, 192, 32, 32] 1,728
BatchNorm2d-41 [-1, 192, 32, 32] 384
ReLU6-42 [-1, 192, 32, 32] 0
Conv2d-43 [