pytorch自定义算子并导出onnx计算图详细代码教程

(torch_env) root@f123:/mnt/d/work/learning-cuda-trt/learning-cuda-trt-main/tensorrt-basic-1.8-integrate-easyplugin# python gen-onnx.py
对应opset文件夹代码在这里: /root/anaconda3/envs/torch_env/lib/python3.9/site-packages/torch/onnx
inference output =
tensor([[[[3.9281, 5.9852, 3.9281],
          [5.9852, 8.9989, 5.9852],
          [3.9281, 5.9852, 3.9281]]],


        [[[0.7311, 2.8577, 2.8577],
          [2.8577, 3.9281, 2.8577],
          [2.8577, 2.8577, 0.7311]]]], grad_fn=<MYSELUImplBackward>)
/root/anaconda3/envs/torch_env/lib/python3.9/site-packages/torch/onnx/utils.py:90: UserWarning: 'enable_onnx_checker' is deprecated and ignored. It will be removed in the next PyTorch release. To proceed despite ONNX checker failures, catch torch.onnx.ONNXCheckerError.
  warnings.warn("'enable_onnx_checker' is deprecated and ignored. It will be removed in "
==================================call symbolic
Warning: Unsupported operator Plugin. No schema registered for this operator.
Warning: Unsupported operator Plugin. No schema registered for this operator.
Warning: Unsupported operator Plugin. No schema registered for this operator.
graph(%image : Float(*, 1, *, *, strides=[9, 9, 3, 1], requires_grad=0, device=cpu),
      %conv.weight : Float(1, 1, 3, 3, strides=[9, 9, 3, 1], requires_grad=1, device=cpu),
      %conv.bias : Float(1, strides=[1], requires_grad=1, device=cpu),
      %myselu.param : Float(3, strides=[1], requires_grad=1, device=cpu)):
  %4 : Float(*, 1, *, *, strides=[9, 9, 3, 1], requires_grad=1, device=cpu) = onnx::Conv[dilations=[1, 1], group=1, kernel_shape=[3, 3], pads=[1, 1, 1, 1], strides=[1, 1]](%image, %conv.weight, %conv.bias) # /root/anaconda3/envs/torch_env/lib/python3.9/site-packages/torch/nn/modules/conv.py:442:0
  %5 : Float(3, strides=[1], device=cpu) = onnx::Constant[value= 3  2  1 [ CPUFloatType{3} ]]()
  %output : Float(1, 1, 3, 3, strides=[9, 9, 3, 1], requires_grad=1, device=cpu) = onnx::Plugin[info="{\"attr1_s\": \"\0-0\/0/\/,)\0-.\/+0\/./\0--\/.-\/+/\0-/\/.,\/-.\0-,\//0\//*\0--\//)\/,.\0-.\.00\/-/\", \"attr2_i\": [1, 2, 3], \"attr3_f\": 222}", name="MYSELU"](%4, %myselu.param, %5) # /mnt/d/work/learning-cuda-trt/learning-cuda-trt-main/tensorrt-basic-1.8-integrate-easyplugin/gen-onnx.py:38:0
  return (%output)

Traceback (most recent call last):
  File "/root/anaconda3/envs/torch_env/lib/python3.9/site-packages/torch/onnx/utils.py", line 785, in _export
    _check_onnx_proto(proto)
RuntimeError: No Op registered for Plugin with domain_version of 11

==> Context: Bad node spec for node. Name: Plugin_2 OpType: Plugin

During handling of the above exception, another exception occurred:

Traceback (most recent call last):
  File "/mnt/d/work/learning-cuda-trt/learning-cuda-trt-main/tensorrt-basic-1.8-integrate-easyplugin/gen-onnx.py", line 80, in <module>
    torch.onnx.export(
  File "/root/anaconda3/envs/torch_env/lib/python3.9/site-packages/torch/onnx/__init__.py", line 316, in export
    return utils.export(model, args, f, export_params, verbose, training,
  File "/root/anaconda3/envs/torch_env/lib/python3.9/site-packages/torch/onnx/utils.py", line 107, in export
    _export(model, args, f, export_params, verbose, training, input_names, output_names,
  File "/root/anaconda3/envs/torch_env/lib/python3.9/site-packages/torch/onnx/utils.py", line 787, in _export
    raise ONNXCheckerError(e)
torch.onnx.utils.ONNXCheckerError: No Op registered for Plugin with domain_version of 11

==> Context: Bad node spec for node. Name: Plugin_2 OpType: Plugin
(torch_env) root@f123:/mnt/d/work/learning-cuda-trt/learning-cuda-trt-main/tensorrt-basic-1.8-integrate-easyplugin#
(torch_env) root@f123:/mnt/d/work/learning-cuda-trt/learning-cuda-trt-main/tensorrt-basic-1.8-integrate-easyplugin#
(torch_env) root@f123:/mnt/d/work/learning-cuda-trt/learning-cuda-trt-main/tensorrt-basic-1.8-integrate-easyplugin#
(torch_env) root@f123:/mnt/d/work/learning-cuda-trt/learning-cuda-trt-main/tensorrt-basic-1.8-integrate-easyplugin# python gen-onnx.py
对应opset文件夹代码在这里: /root/anaconda3/envs/torch_env/lib/python3.9/site-packages/torch/onnx
inference output =
tensor([[[[3.9281, 5.9852, 3.9281],
          [5.9852, 8.9989, 5.9852],
          [3.9281, 5.9852, 3.9281]]],


        [[[0.7311, 2.8577, 2.8577],
          [2.8577, 3.9281, 2.8577],
          [2.8577, 2.8577, 0.7311]]]], grad_fn=<MYSELUImplBackward>)
/root/anaconda3/envs/torch_env/lib/python3.9/site-packages/torch/onnx/utils.py:267: UserWarning: `add_node_names' can be set to True only when 'operator_export_type' is `ONNX`. Since 'operator_export_type' is not set to 'ONNX', `add_node_names` argument will be ignored.
  warnings.warn("`{}' can be set to True only when 'operator_export_type' is "
/root/anaconda3/envs/torch_env/lib/python3.9/site-packages/torch/onnx/utils.py:267: UserWarning: `do_constant_folding' can be set to True only when 'operator_export_type' is `ONNX`. Since 'operator_export_type' is not set to 'ONNX', `do_constant_folding` argument will be ignored.
  warnings.warn("`{}' can be set to True only when 'operator_export_type' is "
==================================call symbolic
Warning: Unsupported operator Plugin. No schema registered for this operator.
Warning: Unsupported operator Plugin. No schema registered for this operator.
Warning: Unsupported operator Plugin. No schema registered for this operator.
graph(%image : Float(*, 1, *, *, strides=[9, 9, 3, 1], requires_grad=0, device=cpu),
      %conv.weight : Float(1, 1, 3, 3, strides=[9, 9, 3, 1], requires_grad=1, device=cpu),
      %conv.bias : Float(1, strides=[1], requires_grad=1, device=cpu),
      %myselu.param : Float(3, strides=[1], requires_grad=1, device=cpu)):
  %4 : Float(*, 1, *, *, strides=[9, 9, 3, 1], requires_grad=1, device=cpu) = onnx::Conv[dilations=[1, 1], group=1, kernel_shape=[3, 3], pads=[1, 1, 1, 1], strides=[1, 1]](%image, %conv.weight, %conv.bias) # /root/anaconda3/envs/torch_env/lib/python3.9/site-packages/torch/nn/modules/conv.py:442:0
  %5 : Float(3, strides=[1], device=cpu) = onnx::Constant[value= 3  2  1 [ CPUFloatType{3} ]]()
  %output : Float(1, 1, 3, 3, strides=[9, 9, 3, 1], requires_grad=1, device=cpu) = onnx::Plugin[info="{\"attr1_s\": \"\0-0\/0/\/,)\0-.\/+0\/./\0--\/.-\/+/\0-/\/.,\/-.\0-,\//0\//*\0--\//)\/,.\0-.\.00\/-/\", \"attr2_i\": [1, 2, 3], \"attr3_f\": 222}", name="MYSELU"](%4, %myselu.param, %5) # /mnt/d/work/learning-cuda-trt/learning-cuda-trt-main/tensorrt-basic-1.8-integrate-easyplugin/gen-onnx.py:38:0
  return (%output)

Done.!
(torch_env)

解决:

    # enable_onnx_checker=False

更改为:

    operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK

pytorch自定义算子并导出onnx计算图详细代码教程_operatorexporttypes_蛇皮小娃娃的博客-CSDN博客

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.onnx
import torch.autograd
import os

class MYSELUImpl(torch.autograd.Function):

    # reference: https://pytorch.org/docs/1.10/onnx.html#torch-autograd-functions
    @staticmethod
    def symbolic(g, x, p):
        print("==================================call symbolic")
        return g.op("MYSELU", x, p, 
            g.op("Constant", value_t=torch.tensor([3, 2, 1], dtype=torch.float32)),
            attr1_s="这是字符串属性", 
            attr2_i=[1, 2, 3], 
            attr3_f=222
        )

    @staticmethod
    def forward(ctx, x, p):
        return x * 1 / (1 + torch.exp(-x))


class MYSELU(nn.Module):
    def __init__(self, n):
        super().__init__()
        self.param = nn.parameter.Parameter(torch.arange(n).float())

    def forward(self, x):
        return MYSELUImpl.apply(x, self.param)


class Model(nn.Module):
    def __init__(self):
        super().__init__()

        self.conv = nn.Conv2d(1, 1, 3, padding=1)
        self.myselu = MYSELU(3)
        self.conv.weight.data.fill_(1)
        self.conv.bias.data.fill_(0)
    
    def forward(self, x):
        x = self.conv(x)
        x = self.myselu(x)
        return x


# 这个包对应opset11的导出代码,如果想修改导出的细节,可以在这里修改代码
# import torch.onnx.symbolic_opset11
print("对应opset文件夹代码在这里:", os.path.dirname(torch.onnx.__file__))

model = Model().eval()
input = torch.tensor([
    # batch 0
    [
        [1,   1,   1],
        [1,   1,   1],
        [1,   1,   1],
    ],
        # batch 1
    [
        [-1,   1,   1],
        [1,   0,   1],
        [1,   1,   -1]
    ]
], dtype=torch.float32).view(2, 1, 3, 3)

output = model(input)
print(f"inference output = \n{output}")

dummy = torch.zeros(1, 1, 3, 3)
torch.onnx.export(
    model, 

    # 这里的args,是指输入给model的参数,需要传递tuple,因此用括号
    (dummy,), 

    # 储存的文件路径
    "workspace/demo.onnx", 

    # 打印详细信息
    verbose=True, 

    # 为输入和输出节点指定名称,方便后面查看或者操作
    input_names=["image"], 
    output_names=["output"], 

    # 这里的opset,指,各类算子以何种方式导出,对应于symbolic_opset11
    opset_version=10, 

    # 表示他有batch、height、width3个维度是动态的,在onnx中给其赋值为-1
    # 通常,我们只设置batch为动态,其他的避免动态
    dynamic_axes={
        "image": {0: "batch", 2: "height", 3: "width"},
        "output": {0: "batch", 2: "height", 3: "width"},
    },

    # 对于插件,需要禁用onnx检查
    # enable_onnx_checker=False
    operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK

)

print("Done.!")

 

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值