Ultralytics YOLOv8.0.225 的onnx导出

Ultralytics YOLOv8.0.225 🚀的onnx导出代码:

import argparse
import os
import torch
from onnxsim import simplify
from ultralytics.nn import SegmentationModel
from ultralytics.nn.modules import C2f
from ultralytics.nn.tasks import attempt_load_weights

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--weights', type=str, default='../yolov8n-cls.pt', help='weights path')
    parser.add_argument('--official_weights_onnx', type=str, default="official_weights_onnx",
                        help='official_weights_onnx')
    parser.add_argument('--img_size', nargs='+', type=int, default=[640, 640], help='image size')
    parser.add_argument('--batch-size', type=int, default=1, help='batch size')
    opt = parser.parse_args()
    opt.img_size *= 2 if len(opt.img_size) == 1 else 1  # expand
    save_p = opt.official_weights_onnx
    if not os.path.exists(save_p):
        os.makedirs(save_p)
    img = torch.zeros((opt.batch_size, 3, *opt.img_size))
    model = attempt_load_weights(opt.weights,
                                 device=torch.device('cpu'),
                                 inplace=True,
                                 fuse=True)
    model.model[-1].export = True  # set Detect() layer export=True
    for k, m in model.named_modules():
        if isinstance(m, C2f):
            # EdgeTPU does not support FlexSplitV while split provides cleaner ONNX graph
            m.forward = m.forward_split
        else:
            m.dynamic = False
            m.export = True
            m.format = "onnx"

    model.eval()
    model.model[-1].export = True  # set Detect() layer export=True
    y = model(img)  # dry run
    save_p = "official_weights_onnx"
    if not os.path.exists(save_p):
        os.makedirs(save_p)
    # ONNX export
    try:
        import onnx

        print('\nStarting ONNX export with onnx %s...' % onnx.__version__)
        f = save_p + "/" + "BS_" + str(opt.batch_size) + "_" + str(list(opt.img_size)[0]) + "_" + opt.weights.replace("pt","onnx").split("/")[-1]  # filename
        model.fuse()  # only for ONNX

        print("===========  onnx =========== ")
        input_names = ["data"]

        torch.onnx.export(model, img, f, verbose=False, opset_version=12, input_names=['images'],
                          output_names=['output0', 'output1'] if isinstance(model, SegmentationModel) else ['output0'])

        # Checks
        onnx_model = onnx.load(f)  # load onnx model
        onnx.checker.check_model(onnx_model)  # check onnx model
        print(onnx.helper.printable_graph(onnx_model.graph))  # print a human readable model
        print('ONNX export success, saved as %s' % f)
        onnx_model = onnx.load(f)  # load onnx model
        model_simp, check = simplify(onnx_model)
        assert check, "Simplified ONNX model could not be validated"
        SAVE_INFO_SIM = f.split("BS")
        SAVE_SIM_NAME = SAVE_INFO_SIM[-2] + "sim_BS" + SAVE_INFO_SIM[-1]
        onnx.save(model_simp, SAVE_SIM_NAME)
        print('finished exporting Simplified onnx as ', SAVE_SIM_NAME)
    except Exception as e:
        print('ONNX export failure: %s' % e)
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

lindsayshuo

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值