TensorRT的sample:yolov3_onnx

本文档介绍了如何在Python环境中,利用TensorRT 5.0.2.6和ONNX 1.2.1进行YOLOv3模型的优化。首先,通过yolov3_to_onnx.py将YOLOv3模型转换为ONNX格式,然后使用onnx_to_tensorrt.py将ONNX模型转化为TensorRT引擎,以提升深度学习推理速度。同时,data_processing.py文件用于处理输入数据。
摘要由CSDN通过智能技术生成


吐槽:官方给的例子( https://github.com/NVIDIA/TensorRT/samples/python)居然要python2运行,而且错误好多…
参考: https://www.cnblogs.com/shouhuxianjian/p/10550262.html,稍加修改

环境:

onnx 1.2.1
TensorRT-5.0.2.6
cudn 10.0

结果:

在这里插入图片描述

yolov3_to_onnx.py

from __future__ import print_function
from collections import OrderedDict
import hashlib
import os.path
import wget
import onnx
from onnx import helper
from onnx import TensorProto
import numpy as np
 
import sys
 
class DarkNetParser(object):
    def __init__(self, supported_layers):
        self.layer_configs = OrderedDict()
        self.supported_layers = supported_layers
        self.layer_counter = 0
 
    def parse_cfg_file(self, cfg_file_path):
        with open(cfg_file_path, 'rb') as cfg_file:
            remainder = cfg_file.read()
            remainder = remainder.decode('utf-8') 
            while remainder:
                layer_dict, layer_name, remainder = self._next_layer(remainder)
                if layer_dict:
                    self.layer_configs[layer_name] = layer_dict
 
        return self.layer_configs
 
    def _next_layer(self, remainder):
        remainder = remainder.split('[', 1)
        if len(remainder) == 2:
            remainder = remainder[1]
        else:
            return None, None, None
        remainder = remainder.split(']', 1)
        if len(remainder) == 2:
            layer_type, remainder = remainder
        else:
            return None, None, None
        if remainder.replace(' ', '')[0] == '#':
            remainder = remainder.split('\n', 1)[1]
        layer_param_block, remainder = remainder.split('\n\n', 1)
        layer_param_lines = layer_param_block.split('\n')[1:]
 
        layer_name = str(self.layer_counter).zfill(3) + '_' + layer_type 
        layer_dict = dict(type=layer_type)
 
        if layer_type in self.supported_layers:
            for param_line in layer_param_lines:
                if param_line[0] == '#':
                    continue
                param_type, param_value = self._parse_params(param_line)
                layer_dict[param_type] = param_value
 
        self.layer_counter += 1
 
        return layer_dict, layer_name, remainder
 
    def _parse_params(self, param_line):
        param_line = param_line.replace(' ', '') 
        param_type, param_value_raw = param_line.split('=')
        param_value = None
        if param_type == 'layers':
            layer_indexes = list()
            for index in param_value_raw.split(','):
                layer_indexes.append(int(index))
            param_value = layer_indexes
        elif isinstance(param_value_raw, str) and not param_value_raw.isalpha():
            condition_param_value_positive = param_value_raw.isdigit()
            condition_param_value_negative = param_value_raw[0] == '-' and \
                param_value_raw[1:].isdigit()
            if condition_param_value_positive or condition_param_value_negative:
                param_value = int(param_value_raw)
            else:
                param_value = float(param_value_raw)
        else:
            param_value = str(param_value_raw)
 
        return param_type, param_value
 
class MajorNodeSpecs(object):
 
    def __init__(self, name, channels):
        self.name = name
        self.channels = channels
        self.created_onnx_node = False
        if name is not None and isinstance(channels, int) and channels > 0:
            self.created_onnx_node = True
class ConvParams(object):
    def __init__(self, node_name, batch_normalize, conv_weight_dims):
        self.node_name = node_name
        self.batch_normalize = batch_normalize
        assert len(conv_weight_dims) == 4
        self.conv_weight_dims = conv_weight_dims
 
    def generate_param_name(self, param_category, suffix):
        assert suffix
        assert param_category in ['bn', 'conv']
        assert(suffix in ['scale', 'mean', 'var', 'weights', 'bias'])
        if param_category == 'bn':
            assert self.batch_normalize
            assert suffix in ['scale', 'bias', 'mean', 'var']
        elif param_category == 'conv':
            assert suffix in ['weights', 'bias']
            if suffix == 'bias':
                assert not self.batch_normalize
        param_name = self.node_name + '_' + param_category + '_' + suffix
        return param_name
class WeightLoader(object):
 
    def __init__(self, weights_file_path):
        self.weights_file = self._open_weights_file(weights_file_path)
 
    def load_conv_weights(self, conv_params):
        initializer = list()
        inputs = list()
        if conv_params.batch_normalize:
            bias_init, bias_input = self._create_param_tensors(
                conv_params, 'bn', 'bias')
            bn_scale_init, bn_scale_input = self._create_param_tensors(
                conv_params, 'bn', 'scale')
            bn_mean_init, bn_mean_input = self._create_param_tensors(
                conv_params, 'bn', 'mean')
            bn_var_init, bn_var_input = self._create_param_tensors(
                conv_params, 'bn', 'var')
            initializer.extend(
                [bn_scale_init, bias_init, bn_mean_init, bn_var_init])
            inputs.extend([bn_scale_input, bias_input,
                           bn_mean_input, bn_var_input])
        else:
            bias_init, bias_input = self._create_param_tensors(
                conv_params, 'conv', 'bias')
            initializer.append(bias_init)
            inputs.append(bias_input)
        conv_init, conv_input = self._create_param_tensors(
            conv_params, 'conv', 'weights')
        initializer.append(conv_init)
        inputs.append(conv_input)
 
        return initializer, inputs
 
    def _open_weights_file(self, weights_file_path):
        weights_file = open(weights_file_path, 'rb')
        length_header = 5
        np.ndarray(
            shape=(length_header, ), dtype='int32', buffer=weights_file.read(
                length_header * 4))
        return weights_file
 
    def _create_param_tensors(self, conv_params, param_category, suffix):
        param_name, param_data, param_data_shape = self._load_one_param_type(
            conv_params, param_category, suffix)
        initializer_tensor = helper.make_tensor(
            param_name, TensorProto.FLOAT, param_data_shape, param_data)
        input_tensor = helper.make_tensor_value_info(
            param_name, TensorProto.FLOAT, param_data_shape)
 
        return initializer_tensor, input_tensor
 
    def _load_one_param_type(self, conv_params, param_category, suffix):
        param_name = conv_params.generate_param_name(param_category, suffix)
        channels_out, channels_in, filter_h, filter_w = conv_params.conv_weight_dims
 
        if param_category == 'bn':
            param_shape = [channels_out]
        elif param_category == 'conv':
            if suffix == 'weights':
                param_shape = [channels_out, channels_in, filter_h, filter_w]
            elif suffix == 'bias'
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值