yolov8模型稀疏化剪枝

1. 模型稀疏化训练(增加权重稀疏化的代码进行训练)
--ultralytics/engine/trainer.py
  约在345行:
    # Backward
    self.scaler.scale(self.loss).backward()

    # ========== 新增 ==========
    l1_lambda = 1e-2 * (1 - 0.9 * epoch / self.epochs)
    for k, m in self.model.named_modules():
        if isinstance(m, nn.BatchNorm2d):
            m.weight.grad.data.add_(l1_lambda * torch.sign(m.weight.data))
            m.bias.grad.data.add_(1e-2 * torch.sign(m.bias.data))
    # ========== 新增 ==========

    # Optimize - https://pytorch.org/docs/master/notes/amp_examples.html
2. 开始训练
--ultralytics-main/train.py
    训练代码如下:
        from ultralytics import YOLO
        import torch

        model = YOLO('yolov8s.yaml') #或.pt

        yaml_path = 'my_datasets.yaml'

        device = 'cuda' if torch.cuda.is_available() else 'cpu'

        model.train(data=yaml_path, imgsz=640, batch=8, device=device)
        

3. 剪枝:运行下列模块
    ---ultralytics-main/prune_new.py
    ---模型加载使用last.pt,这是第二步训练的结果
    prune_new.py代码如下:
    from ultralytics import YOLO
    import torch
    from ultralytics.nn.modules import Bottleneck, Conv, C2f, SPPF, Detect

    # Load a model
    yolo = YOLO("runs/detect/train24/weights/last.pt")  # build a new model from scratch
    model = yolo.model

    ws = []
    bs = []

    for name, m in model.named_modules():
        if isinstance(m, torch.nn.BatchNorm2d):
            w = m.weight.abs().detach()
            b = m.bias.abs().detach()
            ws.append(w)
            bs.append(b)
            print(name, w.max().item(), w.min().item(), b.max().item(), b.min().item())
    # keep
    factor = 0.8
    ws = torch.cat(ws)
    threshold = torch.sort(ws, descending=True)[0][int(len(ws) * factor)]
    print(threshold)


    def prune_conv(conv1: Conv, conv2: Conv):
        gamma = conv1.bn.weight.data.detach()
        beta  = conv1.bn.bias.data.detach()
        # if gamma.abs().min() > f or beta.abs().min() > 0.1:
        #     return
        
        # idxs = torch.argsort(gamma.abs() * coeff + beta.abs(), descending=True)
        keep_idxs = []
        local_threshold = threshold
        while len(keep_idxs) < 8:
            keep_idxs = torch.where(gamma.abs() >= local_threshold)[0]
            local_threshold = local_threshold * 0.5
        n = len(keep_idxs)
        # n = max(int(len(idxs) * 0.8), p)
        print(n / len(gamma) * 100)
        # scale = len(idxs) / n
        conv1.bn.weight.data = gamma[keep_idxs]
        conv1.bn.bias.data   = beta[keep_idxs]
        conv1.bn.running_var.data = conv1.bn.running_var.data[keep_idxs]
        conv1.bn.running_mean.data = conv1.bn.running_mean.data[keep_idxs]
        conv1.bn.num_features = n
        conv1.conv.weight.data = conv1.conv.weight.data[keep_idxs]
        conv1.conv.out_channels = n
        
        if conv1.conv.bias is not None:
            conv1.conv.bias.data = conv1.conv.bias.data[keep_idxs]

        if not isinstance(conv2, list):
            conv2 = [conv2]
            
        for item in conv2:
            if item is not None:
                if isinstance(item, Conv):
                    conv = item.conv
                else:
                    conv = item
                conv.in_channels = n
                conv.weight.data = conv.weight.data[:, keep_idxs]
        
    def prune(m1, m2):
        if isinstance(m1, C2f):      # C2f as a top conv
            m1 = m1.cv2
        
        if not isinstance(m2, list): # m2 is just one module
            m2 = [m2]
            
        for i, item in enumerate(m2):
            if isinstance(item, C2f) or isinstance(item, SPPF):
                m2[i] = item.cv1
        
        prune_conv(m1, m2)

    for name, m in model.named_modules():
        if isinstance(m, Bottleneck):
            prune_conv(m.cv1, m.cv2)
            
    seq = model.model
    for i in range(3, 9):
        if i in [6, 4, 9]: continue
        prune(seq[i], seq[i+1])
        
    detect:Detect = seq[-1]
    last_inputs   = [seq[15], seq[18], seq[21]]
    colasts       = [seq[16], seq[19], None]
    for last_input, colast, cv2, cv3 in zip(last_inputs, colasts, detect.cv2, detect.cv3):
        prune(last_input, [colast, cv2[0], cv3[0]])
        prune(cv2[0], cv2[1])
        prune(cv2[1], cv2[2])
        prune(cv3[0], cv3[1])
        prune(cv3[1], cv3[2])

    for name, p in yolo.model.named_parameters():
        p.requires_grad = True

    #保存剪纸后的权重

    torch.save(yolo.ckpt,"prune.pt")
    print("done")

    
4. 剪枝后的模型训练
    --- 1)注释掉第一步中增加的代码
    --- 2)在这个模块下:ultralytics/engine/model.py,335row,修改如下:
        self.trainer = (trainer or self._smart_load('trainer'))(overrides=args, _callbacks=self.callbacks)
        if not args.get('resume'):  # manually set model only if not resuming
            # 剪枝后再训练时把下面两行注释掉,增加:self.trainer.model = self.model.train()
            # self.trainer.model = self.trainer.get_model(weights=self.model if self.ckpt else None, cfg=self.model.yaml)
            # self.model = self.trainer.model
            # # 新增==============================
            self.trainer.model = self.model.train()
            # # 新增==============================

        self.trainer.hub_session = self.session  # attach optional HUB session

     --- 3)剪枝后微调训练  

-ultralytics-main/train.py
    训练代码如下:
        from ultralytics import YOLO
        import torch

        model = YOLO('prune.pt') #或.pt

        yaml_path = 'my_datasets.yaml'

        device = 'cuda' if torch.cuda.is_available() else 'cpu'

         注意:若有多块GPU进行并行训练时,需指定成一块,否则会出现数据在不同设备上无法做运算的情况。

        model.train(data=yaml_path, imgsz=640, batch=8, device=device)

        注意:第一次剪枝训练的时候是没有这个一步修改的,但是第二次在增加剪枝代码也就是在做第一步操作的时候,需要将这一步的修改代码方向操作。
  
======================================================================
注意事项:
1.第三步与第四步合并,但在运行prune_new.py之前,要把第四步的代码修改完成。

2.运行prune_new.py模块会报错,大概意思是,数据同时在两个设备cpu和gpu设备上,不能进行计算。需要修改下列代码:
在ultralytics/utils/loss.py模块下,大概160行,在代码
pred_dist = pred_dist.view(b, a, 4, c // 4).softmax(3).matmul(self.proj.type(pred_dist.dtype))
之上添加pred_dist = pred_dist.to(self.device) self.proj = self.proj.to(self.device)


如此问题就解决了

  • 16
    点赞
  • 24
    收藏
    觉得还不错? 一键收藏
  • 4
    评论
评论 4
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值