ONNX转换TensorRT,并进行推理预测

参考自:
TensorRT官方文档
CookBook

onnx转换TensorRT

#TensorRT 8.5.3.1
#cuda 11.6

import tensorrt as trt
logger = trt.Logger(trt.Logger.WARNING)
builder = trt.Builder(logger)
EXPLICIT_BATCH =  1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) 
network = builder.create_network(EXPLICIT_BATCH)
parser = trt.OnnxParser(network,logger)
success = parser.parse_from_file("alexnet_axes1.onnx")

for idx in range(parser.num_errors):
    print(parser.get_error(idx))
if not success:
    pass # Error handling code here    

config = builder.create_builder_config()
config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE,1<<20)

# 动态输入 如果输入的batch size大于1,或者输入图片的尺寸不固定,需要设定profile,profile包括min,opt,max三个size,
#注意是NCHW还是NHWC,输入名字要和onnx模型一致
profile = builder.create_optimization_profile();
profile.set_shape("input1", (1,3,224,224), (3,3,224,224), (16,3,224,224)) 
#profile.set_shape("input1", (1,224,224,3), (3,448,448,3), (16,800,800,3)) 
config.add_optimization_profile(profile)

serialized_engine = builder.build_serialized_network(network,config)
with open("alexnet_axes1.engine",'wb') as ff:
    ff.write(serialized_engine)

查看onnx模型输入输出

import onnx

onnx_model = onnx.load_model("a.onnx")
print(onnx_model.graph.input)
print(onnx_model.graph.output)

常见错误

均是由于profile输入的名称或者size与onnx模型不一致,注意是NCHW还是NHWC

Error Code 4: Internal Error (Network has dynamic or shape inputs, but no optimization profile has been defined.)
Error Code 4: Internal Error (input_1: dynamic input is missing dimensions in profile 0.)

使用TensorRT engine推理

import time
import numpy as np
import tensorrt as trt
#pip install cuda-python
from cuda import cudart

logger = trt.Logger(trt.Logger.WARNING)
runtime = trt.Runtime(logger)

with open("alexnet_axes1.engine",'rb') as ff:
    engine = runtime.deserialize_cuda_engine(ff.read())

nIO = engine.num_io_tensors
lTensorName = [engine.get_tensor_name(i) for i in range(nIO)]
nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT)

context = engine.create_execution_context()
for i in range(nIO):
    print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i])

def predict(dummy_input):
    context.set_input_shape(lTensorName[0],dummy_input.shape)
    bufferH = []
    bufferH.append(np.ascontiguousarray(dummy_input))
    for i in range(nInput, nIO):
        bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i]))))
    
    bufferD = []
    for i in range(nIO):
        bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
    
    for i in range(nInput):
        cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
    
    for i in range(nIO):
        context.set_tensor_address(lTensorName[i], int(bufferD[i]))
    
    context.execute_async_v3(0) 
    
    for i in range(nInput, nIO):                                                
        cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
    
#    for i in range(nIO):
#       print(lTensorName[i])
#        print(bufferH[i])

    for b in bufferD:                                                          
        cudart.cudaFree(b)
    return  bufferH[1]

if __name__ == "__main__":
    dummy_input = np.random.randn(1,3,224,224).astype(np.float32)
    result = predict(dummy_input)
    print(np.argmax(result,axis=1))

###TensorRT 预测返回nan值
是由于输入的数据类型不对
torch模型的默认数据类型是float32,
如果转换为onnx,那么onnx模型的数据类型也是float32,
使用opencv或者PIL读取的图片数据类型是 int8,
因此需要进行类型转换 :

image.astype(np.float32)
  • 6
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 3
    评论
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值