如何查看pytorch中层的名字

    for idx, m in enumerate(net.named_modules()):
        print(idx, '->', m)

此时输出的就是层的名字,比如说“conv.1”,"fc.3"等等

0 -> ('', Net(
  (static_conv): Sequential(
    (0): Conv2d(1, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
    (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  )
  (conv): Sequential(
    (0): IFNode(
      v_threshold=1.0, v_reset=0.0, detach_reset=False
      (surrogate_function): ATan(alpha=2.0, spiking=True)
    )
    (1): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
    (3): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (4): IFNode(
      v_threshold=1.0, v_reset=0.0, detach_reset=False
      (surrogate_function): ATan(alpha=2.0, spiking=True)
    )
    (5): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
  )
  (fc): Sequential(
    (0): Flatten(start_dim=1, end_dim=-1)
    (1): Dropout(p=0.7)
    (2): Linear(in_features=6272, out_features=1152, bias=False)
    (3): LIFNode(
      v_threshold=1.0, v_reset=0.0, tau=2.0
      (surrogate_function): ATan(alpha=2.0, spiking=True)
    )
    (4): Dropout(p=0.7)
    (5): Linear(in_features=1152, out_features=128, bias=False)
    (6): LIFNode(
      v_threshold=1.0, v_reset=0.0, tau=2.0
      (surrogate_function): ATan(alpha=2.0, spiking=True)
    )
    (7): Linear(in_features=128, out_features=5, bias=False)
    (8): LIFNode(
      v_threshold=1.0, v_reset=0.0, tau=2.0
      (surrogate_function): ATan(alpha=2.0, spiking=True)
    )
  )
))
1 -> ('static_conv', Sequential(
  (0): Conv2d(1, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
  (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
))
2 -> ('static_conv.0', Conv2d(1, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False))
3 -> ('static_conv.1', BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True))
4 -> ('conv', Sequential(
  (0): IFNode(
    v_threshold=1.0, v_reset=0.0, detach_reset=False
    (surrogate_function): ATan(alpha=2.0, spiking=True)
  )
  (1): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
  (2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
  (3): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (4): IFNode(
    v_threshold=1.0, v_reset=0.0, detach_reset=False
    (surrogate_function): ATan(alpha=2.0, spiking=True)
  )
  (5): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
))
5 -> ('conv.0', IFNode(
  v_threshold=1.0, v_reset=0.0, detach_reset=False
  (surrogate_function): ATan(alpha=2.0, spiking=True)
))
6 -> ('conv.0.surrogate_function', ATan(alpha=2.0, spiking=True))
7 -> ('conv.1', MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False))
8 -> ('conv.2', Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False))
9 -> ('conv.3', BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True))
10 -> ('conv.4', IFNode(
  v_threshold=1.0, v_reset=0.0, detach_reset=False
  (surrogate_function): ATan(alpha=2.0, spiking=True)
))
11 -> ('conv.4.surrogate_function', ATan(alpha=2.0, spiking=True))
12 -> ('conv.5', MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False))
13 -> ('fc', Sequential(
  (0): Flatten(start_dim=1, end_dim=-1)
  (1): Dropout(p=0.7)
  (2): Linear(in_features=6272, out_features=1152, bias=False)
  (3): LIFNode(
    v_threshold=1.0, v_reset=0.0, tau=2.0
    (surrogate_function): ATan(alpha=2.0, spiking=True)
  )
  (4): Dropout(p=0.7)
  (5): Linear(in_features=1152, out_features=128, bias=False)
  (6): LIFNode(
    v_threshold=1.0, v_reset=0.0, tau=2.0
    (surrogate_function): ATan(alpha=2.0, spiking=True)
  )
  (7): Linear(in_features=128, out_features=5, bias=False)
  (8): LIFNode(
    v_threshold=1.0, v_reset=0.0, tau=2.0
    (surrogate_function): ATan(alpha=2.0, spiking=True)
  )
))
14 -> ('fc.0', Flatten(start_dim=1, end_dim=-1))
15 -> ('fc.1', Dropout(p=0.7))
16 -> ('fc.2', Linear(in_features=6272, out_features=1152, bias=False))
17 -> ('fc.3', LIFNode(
  v_threshold=1.0, v_reset=0.0, tau=2.0
  (surrogate_function): ATan(alpha=2.0, spiking=True)
))
18 -> ('fc.3.surrogate_function', ATan(alpha=2.0, spiking=True))
19 -> ('fc.4', Dropout(p=0.7))
20 -> ('fc.5', Linear(in_features=1152, out_features=128, bias=False))
21 -> ('fc.6', LIFNode(
  v_threshold=1.0, v_reset=0.0, tau=2.0
  (surrogate_function): ATan(alpha=2.0, spiking=True)
))
22 -> ('fc.6.surrogate_function', ATan(alpha=2.0, spiking=True))
23 -> ('fc.7', Linear(in_features=128, out_features=5, bias=False))
24 -> ('fc.8', LIFNode(
  v_threshold=1.0, v_reset=0.0, tau=2.0
  (surrogate_function): ATan(alpha=2.0, spiking=True)
))
25 -> ('fc.8.surrogate_function', ATan(alpha=2.0, spiking=True))
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值