TensorRT优化部署(二)--剖析ONNX架构

系列文章目录

第一章 TensorRT优化部署(一)–TensorRT和ONNX基础
第二章 TensorRT优化部署(二)–剖析ONNX架构
第三章 TensorRT优化部署(三)–ONNX注册算子
第四章 TensorRT模型优化部署(四)–Roofline model
第五章 TensorRT模型优化部署(五)–模型优化部署重点注意
第六章 TensorRT模型优化部署(六)–Quantization量化基础(一)
第七章 TensorRT模型优化部署(七)–Quantization量化(PTQ and QAT)(二)
第八章 TensorRT模型优化部署 (八)–模型剪枝Pruning



前言

自学视频笔记,专题内容,后续会继续补充。


一、Protobuf是什么?

Protobuf(Protocol Buffers)是一种由Google开发的数据序列化格式和协议。一套表示和序列化数据的机制。
推荐文章:Protobuf详解

二、ONNX架构解析

2.1 ONNX架构

ONNX各类proto的定义可参考官方文档
有关ONNX的IR信息IR信息相关文档

整个模型信息----ModelProto
整个网络信息----GraphProto
计算节点conv,linear等—NodeProto
Tensor信息,主要是权重等----TensorProto
Input/Output信息----ValueInfoProto
推荐文章相关Proto使用

2.2 创建ONNX

ONNX提供了一些很方便的api来创建onnx

  • onnx.helper.make_tensor
  • onnx.helper.make_tensor_value_info
  • onnx.helper.make_attribute
  • onnx.helper.make_node
  • onnx.helper.make_graph
  • onnx.helper.make_model

2.2.1 创建简单的onnx

import onnx
from onnx import helper
from onnx import TensorProto

# 理解onnx中的组织结构
#   - ModelProto (描述的是整个模型的信息)
#   --- GraphProto (描述的是整个网络的信息)
#   ------ NodeProto (描述的是各个计算节点,比如conv, linear)
#   ------ TensorProto (描述的是tensor的信息,主要包括权重)
#   ------ ValueInfoProto (描述的是input/output信息)
#   ------ AttributeProto (描述的是node节点的各种属性信息)


def create_onnx():
    # 创建ValueProto
    a = helper.make_tensor_value_info('a', TensorProto.FLOAT, [10, 10])
    x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10, 10])
    b = helper.make_tensor_value_info('b', TensorProto.FLOAT, [10, 10])
    y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [10, 10])

    # 创建NodeProto
    mul = helper.make_node('Mul', ['a', 'x'], 'c', "multiply")
    add = helper.make_node('Add', ['c', 'b'], 'y', "add")

    # 构建GraphProto
    graph = helper.make_graph([mul, add], 'sample-linear', [a, x, b], [y])

    # 构建ModelProto
    model = helper.make_model(graph)

    # 检查model是否有错误
    onnx.checker.check_model(model)
    # print(model)

    # 保存model
    onnx.save(model, "../models/sample-linear.onnx")

    return model


if __name__ == "__main__":
    model = create_onnx()

打印结果

import onnx

def main(): 

    model = onnx.load("../models/sample-linear.onnx")
    onnx.checker.check_model(model)
    print(model)
    graph        = model.graph
    nodes        = graph.node
    inputs       = graph.input
    outputs      = graph.output

    print("\n**************parse input/output*****************")
    for input in inputs:
        input_shape = []
        for d in input.type.tensor_type.shape.dim:
            if d.dim_value == 0:
                input_shape.append(None)
            else:
                input_shape.append(d.dim_value)
        print("Input info: \
                \n\tname:      {} \
                \n\tdata Type: {} \
                \n\tshape:     {}".format(input.name, input.type.tensor_type.elem_type, input_shape))

    for output in outputs:
        output_shape = []
        for d in output.type.tensor_type.shape.dim:
            if d.dim_value == 0:
                output_shape.append(None)
            else:
                output_shape.append(d.dim_value)
        print("Output info: \
                \n\tname:      {} \
                \n\tdata Type: {} \
                \n\tshape:     {}".format(input.name, output.type.tensor_type.elem_type, input_shape))

    print("\n**************parse node************************")
    for node in nodes:
        print("node info: \
                \n\tname:      {} \
                \n\top_type:   {} \
                \n\tinputs:    {} \
                \n\toutputs:   {}".format(node.name, node.op_type, node.input, node.output))


if __name__ == "__main__":
    main()


在这里插入图片描述

2.2.1 创建简单的onnx-CBR

import numpy as np
import onnx
from onnx import numpy_helper


def create_initializer_tensor(
        name: str,
        tensor_array: np.ndarray,
        data_type: onnx.TensorProto = onnx.TensorProto.FLOAT
) -> onnx.TensorProto:

    initializer = onnx.helper.make_tensor(
        name      = name,
        data_type = data_type,
        dims      = tensor_array.shape,
        vals      = tensor_array.flatten().tolist())

    return initializer


def main():
    
    input_batch    = 1;
    input_channel  = 3;
    input_height   = 64;
    input_width    = 64;
    output_channel = 16;

    input_shape    = [input_batch, input_channel, input_height, input_width]
    output_shape   = [input_batch, output_channel, 1, 1]

    ##########################创建input/output################################
    model_input_name  = "input0"
    model_output_name = "output0"

    input = onnx.helper.make_tensor_value_info(
            model_input_name,
            onnx.TensorProto.FLOAT,
            input_shape)

    output = onnx.helper.make_tensor_value_info(
            model_output_name, 
            onnx.TensorProto.FLOAT, 
            output_shape)
    

    ##########################创建第一个conv节点##############################
    conv1_output_name = "conv2d_1.output"
    conv1_in_ch       = input_channel
    conv1_out_ch      = 32
    conv1_kernel      = 3
    conv1_pads        = 1

    # 创建conv节点的权重信息
    conv1_weight    = np.random.rand(conv1_out_ch, conv1_in_ch, conv1_kernel, conv1_kernel)
    conv1_bias      = np.random.rand(conv1_out_ch)

    conv1_weight_name = "conv2d_1.weight"
    conv1_weight_initializer = create_initializer_tensor(
        name         = conv1_weight_name,
        tensor_array = conv1_weight,
        data_type    = onnx.TensorProto.FLOAT)


    conv1_bias_name  = "conv2d_1.bias"
    conv1_bias_initializer = create_initializer_tensor(
        name         = conv1_bias_name,
        tensor_array = conv1_bias,
        data_type    = onnx.TensorProto.FLOAT)

    # 创建conv节点,注意conv节点的输入有3个: input, w, b
    conv1_node = onnx.helper.make_node(
        name         = "conv2d_1",
        op_type      = "Conv",
        inputs       = [
            model_input_name, 
            conv1_weight_name,
            conv1_bias_name
        ],
        outputs      = [conv1_output_name],
        kernel_shape = [conv1_kernel, conv1_kernel],
        pads         = [conv1_pads, conv1_pads, conv1_pads, conv1_pads],
    )

    ##########################创建一个BatchNorm节点###########################
    bn1_output_name = "batchNorm1.output"

    # 为BN节点添加权重信息
    bn1_scale = np.random.rand(conv1_out_ch)
    bn1_bias  = np.random.rand(conv1_out_ch)
    bn1_mean  = np.random.rand(conv1_out_ch)
    bn1_var   = np.random.rand(conv1_out_ch)

    # 通过create_initializer_tensor创建权重,方法和创建conv节点一样
    bn1_scale_name = "batchNorm1.scale"
    bn1_bias_name  = "batchNorm1.bias"
    bn1_mean_name  = "batchNorm1.mean"
    bn1_var_name   = "batchNorm1.var"

    bn1_scale_initializer = create_initializer_tensor(
        name         = bn1_scale_name,
        tensor_array = bn1_scale,
        data_type    = onnx.TensorProto.FLOAT)
    bn1_bias_initializer = create_initializer_tensor(
        name         = bn1_bias_name,
        tensor_array = bn1_bias,
        data_type    = onnx.TensorProto.FLOAT)
    bn1_mean_initializer = create_initializer_tensor(
        name         = bn1_mean_name,
        tensor_array = bn1_mean,
        data_type    = onnx.TensorProto.FLOAT)
    bn1_var_initializer  = create_initializer_tensor(
        name         = bn1_var_name,
        tensor_array = bn1_var,
        data_type    = onnx.TensorProto.FLOAT)

    # 创建BN节点,注意BN节点的输入信息有5个: input, scale, bias, mean, var
    bn1_node = onnx.helper.make_node(
        name    = "batchNorm1",
        op_type = "BatchNormalization",
        inputs  = [
            conv1_output_name,
            bn1_scale_name,
            bn1_bias_name,
            bn1_mean_name,
            bn1_var_name
        ],
        outputs=[bn1_output_name],
    )

    ##########################创建一个ReLU节点###########################
    relu1_output_name = "relu1.output"

    # 创建ReLU节点,ReLU不需要权重,所以直接make_node就好了
    relu1_node = onnx.helper.make_node(
        name    = "relu1",
        op_type = "Relu",
        inputs  = [bn1_output_name],
        outputs = [relu1_output_name],
    )

    ##########################创建一个AveragePool节点####################
    avg_pool1_output_name = "avg_pool1.output"

    # 创建AvgPool节点,AvgPool不需要权重,所以直接make_node就好了
    avg_pool1_node = onnx.helper.make_node(
        name    = "avg_pool1",
        op_type = "GlobalAveragePool",
        inputs  = [relu1_output_name],
        outputs = [avg_pool1_output_name],
    )

    ##########################创建第二个conv节点##############################

    # 创建conv节点的属性
    conv2_in_ch  = conv1_out_ch
    conv2_out_ch = output_channel
    conv2_kernel = 1
    conv2_pads   = 0

    # 创建conv节点的权重信息
    conv2_weight    = np.random.rand(conv2_out_ch, conv2_in_ch, conv2_kernel, conv2_kernel)
    conv2_bias      = np.random.rand(conv2_out_ch)
    
    conv2_weight_name = "conv2d_2.weight"
    conv2_weight_initializer = create_initializer_tensor(
        name         = conv2_weight_name,
        tensor_array = conv2_weight,
        data_type    = onnx.TensorProto.FLOAT)

    conv2_bias_name  = "conv2d_2.bias"
    conv2_bias_initializer = create_initializer_tensor(
        name         = conv2_bias_name,
        tensor_array = conv2_bias,
        data_type    = onnx.TensorProto.FLOAT)

    # 创建conv节点,注意conv节点的输入有3个: input, w, b
    conv2_node = onnx.helper.make_node(
        name         = "conv2d_2",
        op_type      = "Conv",
        inputs       = [
            avg_pool1_output_name,
            conv2_weight_name,
            conv2_bias_name
        ],
        outputs      = [model_output_name],
        kernel_shape = [conv2_kernel, conv2_kernel],
        pads         = [conv2_pads, conv2_pads, conv2_pads, conv2_pads],
    )

    ##########################创建graph##############################
    graph = onnx.helper.make_graph(
        name    = "sample-convnet",
        inputs  = [input],
        outputs = [output],
        nodes   = [
            conv1_node, 
            bn1_node, 
            relu1_node, 
            avg_pool1_node, 
            conv2_node],
        initializer =[
            conv1_weight_initializer, 
            conv1_bias_initializer,
            bn1_scale_initializer, 
            bn1_bias_initializer,
            bn1_mean_initializer, 
            bn1_var_initializer,
            conv2_weight_initializer, 
            conv2_bias_initializer
        ],
    )

    ##########################创建model##############################
    model = onnx.helper.make_model(graph, producer_name="onnx-sample")
    model.opset_import[0].version = 12
    
    ##########################验证&保存model##############################
    model = onnx.shape_inference.infer_shapes(model)
    onnx.checker.check_model(model)
    print("Congratulations!! Succeed in creating {}.onnx".format(graph.name))
    onnx.save(model, "../models/sample-convnet.onnx")


# 使用onnx.helper创建一个最基本的ConvNet
#         input (ch=3, h=64, w=64)
#           |
#          Conv (in_ch=3, out_ch=32, kernel=3, pads=1)
#           |
#        BatchNorm
#           |
#          ReLU
#           |
#         AvgPool
#           |
#          Conv (in_ch=32, out_ch=10, kernel=1, pads=0)
#           |
#         output (ch=10, h=1, w=1)

if __name__ == "__main__":
    main()


在这里插入图片描述
打印文件
parser.py

import onnx
import numpy as np

# 注意,因为weight是以字节的形式存储的,所以要想读,需要转变为float类型
def read_weight(initializer: onnx.TensorProto):
    shape = initializer.dims
    data  = np.frombuffer(initializer.raw_data, dtype=np.float32).reshape(shape)
    print("\n**************parse weight data******************")
    print("initializer info: \
            \n\tname:      {} \
            \n\tdata:    \n{}".format(initializer.name, data))
    

def parse_onnx(model: onnx.ModelProto):
    graph        = model.graph
    initializers = graph.initializer
    nodes        = graph.node
    inputs       = graph.input
    outputs      = graph.output

    print("\n**************parse input/output*****************")
    for input in inputs:
        input_shape = []
        for d in input.type.tensor_type.shape.dim:
            if d.dim_value == 0:
                input_shape.append(None)
            else:
                input_shape.append(d.dim_value)
        print("Input info: \
                \n\tname:      {} \
                \n\tdata Type: {} \
                \n\tshape:     {}".format(input.name, input.type.tensor_type.elem_type, input_shape))

    for output in outputs:
        output_shape = []
        for d in output.type.tensor_type.shape.dim:
            if d.dim_value == 0:
                output_shape.append(None)
            else:
                output_shape.append(d.dim_value)
        print("Output info: \
                \n\tname:      {} \
                \n\tdata Type: {} \
                \n\tshape:     {}".format(input.name, output.type.tensor_type.elem_type, input_shape))

    print("\n**************parse node************************")
    for node in nodes:
        print("node info: \
                \n\tname:      {} \
                \n\top_type:   {} \
                \n\tinputs:    {} \
                \n\toutputs:   {}".format(node.name, node.op_type, node.input, node.output))

    print("\n**************parse initializer*****************")
    for initializer in initializers:
        print("initializer info: \
                \n\tname:      {} \
                \n\tdata_type: {} \
                \n\tshape:     {}".format(initializer.name, initializer.data_type, initializer.dims))


调用parser后的CBR
parse_onnx_cbr.py

import torch
import torch.nn as nn
import torch.onnx
import onnx
from parser import parse_onnx
from parser import read_weight

class Model(torch.nn.Module):
    def __init__(self):
        super().__init__()
        self.conv1 = nn.Conv2d(in_channels=3, out_channels=16, kernel_size=3)
        self.bn1   = nn.BatchNorm2d(num_features=16)
        self.act1  = nn.LeakyReLU()
    
    def forward(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.act1(x)
        return x


def export_norm_onnx():
    input   = torch.rand(1, 3, 5, 5)
    model   = Model()
    model.eval()

    file    = "../models/sample-cbr.onnx"
    torch.onnx.export(
        model         = model, 
        args          = (input,),
        f             = file,
        input_names   = ["input0"],
        output_names  = ["output0"],
        opset_version = 15)
    print("Finished normal onnx export")

def main():
    export_norm_onnx()
    model = onnx.load_model("../models/sample-cbr.onnx")
    parse_onnx(model)

    initializers = model.graph.initializer
    for item in initializers:
        read_weight(item)


if __name__ == "__main__":
    main()

总结

本章介绍了ONNX的初步构建方法,后续章节将介绍ONNX注册算子和ONNX推理引擎 -ONNX Runtime。
推荐文章模型部署入门教程
推荐文章Onnx读写,定义和调试

  • 20
    点赞
  • 28
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值