run onnx model with tf-backend and onnxruntime-gpu


onnx.__version__
'1.8.0'

1. prepare model

import torchvision.models as models
resnet18 = models.resnet18(pretrained=True)
Downloading: "https://download.pytorch.org/models/resnet18-5c106cde.pth" to C:\Users\KangningCAI/.cache\torch\hub\checkpoints\resnet18-5c106cde.pth
100.0%
import torch
import torchvision

dummy_input = torch.randn(10, 3, 224, 224, device='cuda')
model = torchvision.models.resnet18(pretrained=True).cuda()
print(model(dummy_input))
# 可以根据模块图形的数值设置输入输出的显示名称。这些设置不会改变此图形的语义。只是会变得更加可读了。
#该网络的输入包含了输入的扁平表(flat list)。也就是说传入forward()里面的值,其后是扁平表的参数。
#你可以指定一部分名字,例如指定一个比该模块输入数量更少的表,随后我们会从一开始就设定名字。
input_names = [ "actual_input_1" ] + [ "learned_%d" % i for i in range(16) ]
output_names = [ "output1" ]

torch.onnx.export(model, dummy_input, "resnet18.onnx", verbose=True,
                  input_names=input_names, output_names=output_names)
tensor([[-1.4664, -1.2065, -0.2031,  ..., -0.3920,  5.7189,  3.1503],
        [-1.5115, -0.8432,  0.7770,  ...,  1.0732, -1.6643,  0.2798],
        [ 0.3694, -1.5528, -1.0725,  ...,  0.9766,  4.3036,  0.3204],
        ...,
        [ 1.1199,  0.8067,  2.0376,  ...,  0.2713, -0.2034,  0.0839],
        [-0.0470,  0.7559, -1.8203,  ..., -0.0480,  1.7802,  1.0056],
        [-1.1093, -2.6424, -1.1345,  ..., -1.4737,  0.6720,  0.4368]],
       device='cuda:0', grad_fn=<AddmmBackward>)
graph(%actual_input_1 : Float(10:150528, 3:50176, 224:224, 224:1, requires_grad=0, device=cuda:0),
      %fc.weight : Float(1000:512, 512:1, requires_grad=1, device=cuda:0),
      %fc.bias : Float(1000:1, requires_grad=1, device=cuda:0),
      %193 : Float(64:147, 3:49, 7:7, 7:1, requires_grad=0, device=cuda:0),
      %194 : Float(64:1, requires_grad=0, device=cuda:0),
      %196 : Float(64:576, 64:9, 3:3, 3:1, requires_grad=0, device=cuda:0),
      %197 : Float(64:1, requires_grad=0, device=cuda:0),
      %199 : Float(64:576, 64:9, 3:3, 3:1, requires_grad=0, device=cuda:0),
      %200 : Float(64:1, requires_grad=0, device=cuda:0),
      %202 : Float(64:576, 64:9, 3:3, 3:1, requires_grad=0, device=cuda:0),
      %203 : Float(64:1, requires_grad=0, device=cuda:0),
      %205 : Float(64:576, 64:9, 3:3, 3:1, requires_grad=0, device=cuda:0),
      %206 : Float(64:1, requires_grad=0, device=cuda:0),
      %208 : Float(128:576, 64:9, 3:3, 3:1, requires_grad=0, device=cuda:0),
      %209 : Float(128:1, requires_grad=0, device=cuda:0),
      %211 : Float(128:1152, 128:9, 3:3, 3:1, requires_grad=0, device=cuda:0),
      %212 : Float(128:1, requires_grad=0, device=cuda:0),
      %214 : Float(128:64, 64:1, 1:1, 1:1, requires_grad=0, device=cuda:0),
      %215 : Float(128:1, requires_grad=0, device=cuda:0),
      %217 : Float(128:1152, 128:9, 3:3, 3:1, requires_grad=0, device=cuda:0),
      %218 : Float(128:1, requires_grad=0, device=cuda:0),
      %220 : Float(128:1152, 128:9, 3:3, 3:1, requires_grad=0, device=cuda:0),
      %221 : Float(128:1, requires_grad=0, device=cuda:0),
      %223 : Float(256:1152, 128:9, 3:3, 3:1, requires_grad=0, device=cuda:0),
      %224 : Float(256:1, requires_grad=0, device=cuda:0),
      %226 : Float(256:2304, 256:9, 3:3, 3:1, requires_grad=0, device=cuda:0),
      %227 : Float(256:1, requires_grad=0, device=cuda:0),
      %229 : Float(256:128, 128:1, 1:1, 1:1, requires_grad=0, device=cuda:0),
      %230 : Float(256:1, requires_grad=0, device=cuda:0),
      %232 : Float(256:2304, 256:9, 3:3, 3:1, requires_grad=0, device=cuda:0),
      %233 : Float(256:1, requires_grad=0, device=cuda:0),
      %235 : Float(256:2304, 256:9, 3:3, 3:1, requires_grad=0, device=cuda:0),
      %236 : Float(256:1, requires_grad=0, device=cuda:0),
      %238 : Float(512:2304, 256:9, 3:3, 3:1, requires_grad=0, device=cuda:0),
      %239 : Float(512:1, requires_grad=0, device=cuda:0),
      %241 : Float(512:4608, 512:9, 3:3, 3:1, requires_grad=0, device=cuda:0),
      %242 : Float(512:1, requires_grad=0, device=cuda:0),
      %244 : Float(512:256, 256:1, 1:1, 1:1, requires_grad=0, device=cuda:0),
      %245 : Float(512:1, requires_grad=0, device=cuda:0),
      %247 : Float(512:4608, 512:9, 3:3, 3:1, requires_grad=0, device=cuda:0),
      %248 : Float(512:1, requires_grad=0, device=cuda:0),
      %250 : Float(512:4608, 512:9, 3:3, 3:1, requires_grad=0, device=cuda:0),
      %251 : Float(512:1, requires_grad=0, device=cuda:0)):
  %192 : Float(10:802816, 64:12544, 112:112, 112:1, requires_grad=1, device=cuda:0) = onnx::Conv[dilations=[1, 1], group=1, kernel_shape=[7, 7], pads=[3, 3, 3, 3], strides=[2, 2]](%actual_input_1, %193, %194)
  %125 : Float(10:802816, 64:12544, 112:112, 112:1, requires_grad=1, device=cuda:0) = onnx::Relu(%192) # D:\Anaconda3\Anaconda3_201910_64\envs\AI_gpu\lib\site-packages\torch\nn\functional.py:1134:0
  %126 : Float(10:200704, 64:3136, 56:56, 56:1, requires_grad=1, device=cuda:0) = onnx::MaxPool[kernel_shape=[3, 3], pads=[1, 1, 1, 1], strides=[2, 2]](%125) # D:\Anaconda3\Anaconda3_201910_64\envs\AI_gpu\lib\site-packages\torch\nn\functional.py:586:0
  %195 : Float(10:200704, 64:3136, 56:56, 56:1, requires_grad=1, device=cuda:0) = onnx::Conv[dilations=[1, 1], group=1, kernel_shape=[3, 3], pads=[1, 1, 1, 1], strides=[1, 1]](%126, %196, %197)
  %129 : Float(10:200704, 64:3136, 56:56, 56:1, requires_grad=1, device=cuda:0) = onnx::Relu(%195) # D:\Anaconda3\Anaconda3_201910_64\envs\AI_gpu\lib\site-packages\torch\nn\functional.py:1134:0
  %198 : Float(10:200704, 64:3136, 56:56, 56:1, requires_grad=1, device=cuda:0) = onnx::Conv[dilations=[1, 1], group=1, kernel_shape=[3, 3], pads=[1, 1, 1, 1], strides=[1, 1]](%129, %199, %200)
  %132 : Float(10:200704, 64:3136, 56:56, 56:1, requires_grad=1, device=cuda:0) = onnx::Add(%198, %126)
  %133 : Float(10:200704, 64:3136, 56:56, 56:1, requires_grad=1, device=cuda:0) = onnx::Relu(%132) # D:\Anaconda3\Anaconda3_201910_64\envs\AI_gpu\lib\site-packages\torch\nn\functional.py:1134:0
  %201 : Float(10:200704, 64:3136, 56:56, 56:1, requires_grad=1, device=cuda:0) = onnx::Conv[dilations=[1, 1], group=1, kernel_shape=[3, 3], pads=[1, 1, 1, 1], strides=[1, 1]](%133, %202, %203)
  %136 : Float(10:200704, 64:3136, 56:56, 56:1, requires_grad=1, device=cuda:0) = onnx::Relu(%201) # D:\Anaconda3\Anaconda3_201910_64\envs\AI_gpu\lib\site-packages\torch\nn\functional.py:1134:0
  %204 : Float(10:200704, 64:3136, 56:56, 56:1, requires_grad
评论 3
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值