pytorch 减小显存消耗,优化显存使用, 计算模型的中间变量

一、 模型如下:

i is 0, m is RFBNet(
  (base): ModuleList(
    (0): Conv2d(1, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (1): ReLU(inplace)
    (2): Conv2d(24, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (3): ReLU(inplace)
    (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (5): Conv2d(24, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (6): ReLU(inplace)
    (7): Conv2d(48, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (8): ReLU(inplace)
    (9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (10): Conv2d(48, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (11): ReLU(inplace)
    (12): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (13): ReLU(inplace)
    (14): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (15): ReLU(inplace)
    (16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (17): Conv2d(96, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (18): ReLU(inplace)
    (19): Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (20): ReLU(inplace)
    (21): Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (22): ReLU(inplace)
  )
  (Norm): BasicRFB_a(
    (branch0): Sequential(
      (0): BasicConv(
        (conv): Conv2d(192, 48, kernel_size=(1, 1), stride=(1, 1), bias=False)
        (bn): BatchNorm2d(48, eps=1e-05, momentum=0.01, affine=True, track_running_stats=True)
        (relu): ReLU(inplace)
      )
      (1): BasicConv(
        (conv): Conv2d(48, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
        (bn): BatchNorm2d(48, eps=1e-05, momentum=0.01, affine=True, track_running_stats=True)
      )
    )
    (branch1): Sequential(
      (0): BasicConv(
        (conv): Conv2d(192, 48, kernel_size=(1, 1), stride=(1, 1), bias=False)
        (bn): BatchNorm2d(48, eps=1e-05, momentum=0.01, affine=True, track_running_stats=True)
        (relu): ReLU(inplace)
      )
      (1): BasicConv(
        (conv): Conv2d(48, 48, kernel_size=(3, 1), stride=(1, 1), padding=(1, 0), bias=False)
        (bn): BatchNorm2d(48, eps=1e-05, momentum=0.01, affine=True, track_running_stats=True)
        (relu): ReLU(inplace)
      )
      (2): BasicConv(
        (conv): Conv2d(48, 48, kernel_size=(3, 3), stride=(1, 1), padding=(3, 3), dilation=(3, 3), bias=False)
        (bn): BatchNorm2d(48, eps=1e-05, momentum=0.01, affine=True, track_running_stats=True)
      )
    )
    (branch2): Sequential(
      (0): BasicConv(
        (conv): Conv2d(192, 48, kernel_size=(1, 1), stride=(1, 1), bias=False)
        (bn): BatchNorm2d(48, eps=1e-05, momentum=0.01, affine=True, track_running_stats=True)
        (relu): ReLU(inplace)
      )
      (1): BasicConv(
        (conv): Conv2d(48, 48, kernel_size=(1, 3), stride=(1, 1), padding=(0, 1), bias=False)
        (bn): BatchNorm2d(48, eps=1e-05, momentum=0.01, affine=True, track_running_stats=True)
        (relu): ReLU(inplace)
      )
      (2): BasicConv(
        (conv): Conv2d(48, 48, kernel_size=(3, 3), stride=(1, 1), padding=(3, 3), dilation=(3, 3), bias=False)
        (bn): BatchNorm2d(48, eps=1e-05, momentum=0.01, affine=True, track_running_stats=True)
      )
    )
    (branch3): Sequential(
      (0): BasicConv(
        (conv): Conv2d(192, 24, kernel_size=(1, 1), stride=(1, 1), bias=False)
        (bn): BatchNorm2d(24, eps=1e-05, momentum=0.01, affine=True, track_running_stats=True)
        (relu): ReLU(inplace)
      )
      (1): BasicConv(
        (conv): Conv2d(24, 36, kernel_size=(1, 3), stride=(1, 1), padding=(0, 1), bias=False)
        (bn): BatchNorm2d(36, eps=1e-05, momentum=0.01, affine=True, track_running_stats=True)
        (relu): ReLU(inplace)
      )
      (2): BasicConv(
        (conv): Conv2d(36, 48, kernel_size=(3, 1), stride=(1, 1), padding=(1, 0), bias=False)
        (bn): BatchNorm2d(48, eps=1e-05, momentum=0.01, affine=True, track_running_stats=True)
        (relu): ReLU(inplace)
      )
      (3): BasicConv(
        (conv): Conv2d(48, 48, kernel_size=(3, 3), stride=(1, 1), padding=(5, 5), dilation=(5, 5), bias=False)
        (bn): BatchNorm2d(48, eps=1e-05, momentum=0.01, affine=True, track_running_stats=True)
      )
    )
    (ConvLinear): BasicConv(
      (conv): Conv2d(192, 192, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn): BatchNorm2d(192, eps=1e-05, momentum=0.01, affine=True, track_running_stats=True)
    )
    (shortcut): BasicConv(
      (conv): Conv2d(192, 192, kernel_size=(1, 1), stride=(1, 1), bias=False)
      (bn): BatchNorm2d(192, eps=1e-05, momentum=0.01, affine=True, track_running_stats=True)
    )
    (relu): ReLU()
  )
  (loc): ModuleList(
    (0): Conv2d(192, 20, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  )
  (conf): ModuleList(
    (0): Conv2d(192, 25, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  )
  (softmax): Softmax()
)


i is 1, m is ModuleList(
  (0): Conv2d(1, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (1): ReLU(inplace)
  (2): Conv2d(24, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (3): ReLU(inplace)
  (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
  (5): Conv2d(24, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (6): ReLU(inplace)
  (7): Conv2d(48, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (8): ReLU(inplace)
  (9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
  (10): Conv2d(48, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (11): ReLU(inplace)
  (12): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (13): ReLU(inplace)
  (14): Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (15): ReLU(inplace)
  (16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
  (17): Conv2d(96, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (18): ReLU(inplace)
  (19): Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (20): ReLU(inplace)
  (21): Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
  (22): ReLU(inplace)
)


i is 2, m is Conv2d(1, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
i is 3, m is ReLU(inplace)
i is 4, m is Conv2d(24, 24, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
i is 5, m is ReLU(inplace)
i is 6, m is MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
i is 7, m is Conv2d(24, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
i is 8, m is ReLU(inplace)
i is 9, m is Conv2d(48, 48, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
i is 10, m is ReLU(inplace)
i is 11, m is MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
i is 12, m is Conv2d(48, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
i is 13, m is ReLU(inplace)
i is 14, m is Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
i is 15, m is ReLU(inplace)
i is 16, m is Conv2d(96, 96, kernel_size=(3, 3),

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值