named_modules named_parameters

从名字就可以看出两者的区别
一个是模块信息,一个是参数,但还是想要更直观一点。
文字的说法可以看这里

这里展示的网络模型的代码来自https://github.com/Yanqi-Chen/Gradient-Rewiring
这里进行的修改就是打印出了对应的两个不同的信息:

import torch
import torch.nn as nn
import torch.nn.functional as F
from spikingjelly.clock_driven import functional, layer, surrogate, neuron
from torchvision import transforms

class Cifar10Net(nn.Module):
    def __init__(self, T=8, v_threshold=1.0, v_reset=0.0, tau=2.0, surrogate_function=surrogate.ATan()):
        super().__init__()

        self.train_times = 0
        self.epochs = 0
        self.max_test_acccuracy = 0
        self.T = T

        self.static_conv = nn.Sequential(
            nn.Conv2d(3, 256, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(256),
        )

        self.conv = nn.Sequential(
            neuron.LIFNode(v_threshold=v_threshold, v_reset=v_reset, tau=tau, surrogate_function=surrogate_function, detach_reset=True),

            nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(256),
            neuron.LIFNode(v_threshold=v_threshold, v_reset=v_reset, tau=tau, surrogate_function=surrogate_function, detach_reset=True),

            nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(256),
            neuron.LIFNode(v_threshold=v_threshold, v_reset=v_reset, tau=tau, surrogate_function=surrogate_function, detach_reset=True),

            nn.MaxPool2d(2, 2),  # 16 * 16

            nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(256),
            neuron.LIFNode(v_threshold=v_threshold, v_reset=v_reset, tau=tau, surrogate_function=surrogate_function, detach_reset=True),

            nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(256),
            neuron.LIFNode(v_threshold=v_threshold, v_reset=v_reset, tau=tau, surrogate_function=surrogate_function, detach_reset=True),

            nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(256),
            neuron.LIFNode(v_threshold=v_threshold, v_reset=v_reset, tau=tau, surrogate_function=surrogate_function, detach_reset=True),

            nn.MaxPool2d(2, 2)  # 8 * 8
        )
        self.fc = nn.Sequential(
            nn.Flatten(),
            layer.Dropout(0.5),
            
            nn.Linear(256 * 8 * 8, 128 * 4 * 4, bias=False),
            neuron.LIFNode(v_threshold=v_threshold, v_reset=v_reset, tau=tau, surrogate_function=surrogate_function, detach_reset=True),

            nn.Linear(128 * 4 * 4, 100, bias=False),
            neuron.LIFNode(v_threshold=v_threshold, v_reset=v_reset, tau=tau, surrogate_function=surrogate_function, detach_reset=True)
        )
        self.boost = nn.AvgPool1d(10, 10)

    def forward(self, x):
        x = self.static_conv(x)
        out_spikes_counter = self.boost(self.fc(self.conv(x)).unsqueeze(1)).squeeze(1)
        for _ in range(1, self.T):
            out_spikes_counter += self.boost(self.fc(self.conv(x)).unsqueeze(1)).squeeze(1)

        return out_spikes_counter

if __name__ == "__main__":
    net = Cifar10Net()
    # print(net)

    print('named_modules:')
    for name, module in net.named_modules():
        print('name:{}, module {}'.format(name, module))
    print('#####################################################')
    print('named_parameters:')
    for name, param in net.named_parameters():
        print('name:{}, param {}'.format(name, param))

最后输出的结果大概时以下两种不同的
在这里插入图片描述
上面是一些模块的详细信息。
下面就是固定的权重


对模块的信息进行判断,可能还有中间变量可以存取,比如这里判断是否存在某种属性:

     for name, module in net.named_modules():
         if hasattr(module, 'monitor'):
             spike_times[name] = 0

权重可以用来取出来采取不同的操作,比如优化:

    BN_list = ['static_conv.1', 'conv.2', 'conv.5', 'conv.9', 'conv.12', 'conv.15']
    for name, param in net.named_parameters():
        if any(BN_name in name for BN_name in BN_list):
            bn_params += [param]
            ttl_cnt += param.numel()
        else:
            weight_params += [param]
            w_cnt += param.numel()
            ttl_cnt += param.numel()
torch.save(model.state_dict(), r'./saved_model/' + str(args.arch) + '_' + str(args.batch_size) + '_' + str(args.dataset) + '_' + str(args.epoch) + '.pth') # 计算GFLOPs flops = 0 for name, module in model.named_modules(): if isinstance(module, torch.nn.Conv2d): flops += module.weight.numel() * 2 * module.in_channels * module.out_channels * module.kernel_size[ 0] * module.kernel_size[1] / module.stride[0] / module.stride[1] elif isinstance(module, torch.nn.Linear): flops += module.weight.numel() * 2 * module.in_features start_event = torch.cuda.Event(enable_timing=True) end_event = torch.cuda.Event(enable_timing=True) start_event.record() with torch.no_grad(): output = UNet(args,3,1).to(device) end_event.record() torch.cuda.synchronize() elapsed_time_ms = start_event.elapsed_time(end_event) gflops = flops / (elapsed_time_ms * 10 ** 6) print("GFLOPs: {:.2f}".format(gflops)) return best_iou, aver_iou, aver_dice, aver_hd, aver_accuracy, aver_recall, aver_precision, aver_f1score, aver_memory, fps, parameters, gflops出现错误 best_iou,aver_iou,aver_dice,aver_hd, aver_accuracy, aver_recall, aver_precision, aver_f1score, aver_memory, FPS, parameters, gflops = val(model,best_iou,val_dataloader) File "D:/BaiduNetdiskDownload/0605_ghostv2unet _tunnelcrack/ghostunet++/UNET++/main.py", line 143, in val return best_iou, aver_iou, aver_dice, aver_hd, aver_accuracy, aver_recall, aver_precision, aver_f1score, aver_memory, fps, parameters, gflops UnboundLocalError: local variable 'gflops' referenced before assignment怎么修改
06-08
这个错误是因为在函数中定义了 `gflops` 变量,但是在函数返回时并没有为它赋值,导致出现了未赋值的情况。可以将 `gflops` 变量在函数一开始就赋一个初始值,比如设为0。代码修改如下: ``` def val(model, best_iou, val_dataloader, device): model.eval() aver_iou = 0 aver_dice = 0 aver_hd = 0 aver_accuracy = 0 aver_recall = 0 aver_precision = 0 aver_f1score = 0 aver_memory = 0 fps = 0 parameters = sum(param.numel() for param in model.parameters()) gflops = 0 # 在这里为 gflops 赋一个初始值 with torch.no_grad(): for step, (images, labels) in enumerate(val_dataloader): images = images.to(device) labels = labels.to(device) outputs = model(images) iou, dice, hd, accuracy, recall, precision, f1score = eval_metrics(outputs, labels) memory = torch.cuda.max_memory_allocated() / 1024.0 / 1024.0 aver_iou += iou aver_dice += dice aver_hd += hd aver_accuracy += accuracy aver_recall += recall aver_precision += precision aver_f1score += f1score aver_memory += memory aver_iou /= len(val_dataloader) aver_dice /= len(val_dataloader) aver_hd /= len(val_dataloader) aver_accuracy /= len(val_dataloader) aver_recall /= len(val_dataloader) aver_precision /= len(val_dataloader) aver_f1score /= len(val_dataloader) aver_memory /= len(val_dataloader) fps = len(val_dataloader.dataset) / (time.time() - start_time) # 统计模型的GFLOPs flops = 0 for name, module in model.named_modules(): if isinstance(module, torch.nn.Conv2d): flops += module.weight.numel() * 2 * module.in_channels * module.out_channels * module.kernel_size[0] * module.kernel_size[1] / module.stride[0] / module.stride[1] elif isinstance(module, torch.nn.Linear): flops += module.weight.numel() * 2 * module.in_features start_event = torch.cuda.Event(enable_timing=True) end_event = torch.cuda.Event(enable_timing=True) start_event.record() with torch.no_grad(): output = UNet(args, 3, 1).to(device) end_event.record() torch.cuda.synchronize() elapsed_time_ms = start_event.elapsed_time(end_event) gflops = flops / (elapsed_time_ms * 10 ** 6) print("GFLOPs: {:.2f}".format(gflops)) return best_iou, aver_iou, aver_dice, aver_hd, aver_accuracy, aver_recall, aver_precision, aver_f1score, aver_memory, fps, parameters, gflops ```
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

binqiang2wang

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值
>