tensorrt 高级(1):完整的分类器实现

1 将模型到处onnx文件

产生onnx的文件为:gen-onnx.py,代码包括几个步骤

1.1 实现分类器代码

import torch
import torchvision
import cv2
import numpy as np


class Classifier(torch.nn.Module):
    def __init__(self):
        super().__init__()
        #使用torchvision自带的与训练模型, 更多模型请参考:https://tensorvision.readthedocs.io/en/master/
        self.backbone = torchvision.models.resnet18(pretrained=True)  
        
    def forward(self, x):
        feature     = self.backbone(x)
        probability = torch.softmax(feature, dim=1)
        return probability
  • torchvision.models.resnet18输出的不是概率值,需要增加softmax操作,才得到概率输出,因此写了一个Classifier类,在forward函数中增加了torch.softmax,使得输出概率值,这样后处理就比较简单。
  • 这也是一个技巧,能把后处理放到onnx里面,就尽量放在onnx里面,这样利用tensorrt做推理的时候,得到推理结果,就不需要手动写softmax处理了。

1.2 图像预处理

定义好分类模型,为了实现推理,需要对输入图像做预处理,代码如下:

image = cv2.imread("workspace/dog.jpg")
image = cv2.resize(image, (224, 224))            # resize
image = image[..., ::-1]                         # BGR -> RGB
image = image / 255.0
image = (image - imagenet_mean) / imagenet_std   # normalize
image = image.astype(np.float32)                 # float64 -> float32
image = image.transpose(2, 0, 1)                 # HWC -> CHW
image = np.ascontiguousarray(image)              # contiguous array memory
image = image[None, ...]                         # CHW -> 1CHW
image = torch.from_numpy(image)                  # numpy -> torch

1.3 模型推理

model = Classifier().eval()

with torch.no_grad():
    probability   = model(image)
    
predict_class = probability.argmax(dim=1).item()
confidence    = probability[0, predict_class]

labels = open("workspace/labels.imagenet.txt").readlines()
labels = [item.strip() for item in labels]

print(f"Predict: {predict_class}, {confidence}, {labels[predict_class]}")

1.4 将模型到处为onnx文件

dummy = torch.zeros(1, 3, 224, 224)
torch.onnx.export(
    model, (dummy,), "workspace/classifier.onnx", 
    input_names=["image"], 
    output_names=["prob"], 
    dynamic_axes={"image": {0: "batch"}, "prob": {0: "batch"}},
    opset_version=11
)
  • 这里有两个细节:1.我们将输入、输出的动态维度指定为batch ,2 .opset_version指定为11

  • 执行 python gen-onnx.py,就能生成classifier.onnx文件了

完整的gen-onnx.py 代码如下:

import torch
import torchvision
import cv2
import numpy as np


class Classifier(torch.nn.Module):
    def __init__(self):
        super().__init__()
        #使用torchvision自带的与训练模型, 更多模型请参考:https://tensorvision.readthedocs.io/en/master/
        self.backbone = torchvision.models.resnet18(pretrained=True)  
        
    def forward(self, x):
        feature     = self.backbone(x)
        probability = torch.softmax(feature, dim=1)
        return probability
        
# 对每个通道进行归一化有助于模型的训练
imagenet_mean = [0.485, 0.456, 0.406]
imagenet_std  = [0.229, 0.224, 0.225]

image = cv2.imread("workspace/dog.jpg")
image = cv2.resize(image, (224, 224))            # resize
image = image[..., ::-1]                         # BGR -> RGB
image = image / 255.0
image = (image - imagenet_mean) / imagenet_std   # normalize
image = image.astype(np.float32)                 # float64 -> float32
image = image.transpose(2, 0, 1)                 # HWC -> CHW
image = np.ascontiguousarray(image)              # contiguous array memory
image = image[None, ...]                         # CHW -> 1CHW
image = torch.from_numpy(image)                  # numpy -> torch
model = Classifier().eval()

with torch.no_grad():
    probability   = model(image)
    
predict_class = probability.argmax(dim=1).item()
confidence    = probability[0, predict_class]

labels = open("workspace/labels.imagenet.txt").readlines()
labels = [item.strip() for item in labels]

print(f"Predict: {predict_class}, {confidence}, {labels[predict_class]}")

dummy = torch.zeros(1, 3, 224, 224)
torch.onnx.export(
    model, (dummy,), "workspace/classifier.onnx", 
    input_names=["image"], 
    output_names=["prob"], 
    dynamic_axes={"image": {0: "batch"}, "prob": {0: "batch"}},
    opset_version=11
)

2 tensorrt进行推理

有了onnx文件,就可以利用tensorrt进行分类模型的推理了,在推理前需要配置好NvOnnxParser解析器,一般可以通过源码进行构建。构建好之后,在c++文件中包含#include NvOnnxParser.h头文件

2.1 引入必要的头文件

// tensorRT include
// 编译用的头文件
#include <NvInfer.h>

// onnx解析器的头文件
#include <onnx-tensorrt/NvOnnxParser.h>

// 推理用的运行时头文件
#include <NvInferRuntime.h>

// cuda include
#include <cuda_runtime.h>

// system include
#include <stdio.h>
#include <math.h>

#include <iostream>
#include <fstream>
#include <vector>
#include <memory>
#include <functional>
#include <unistd.h>

#include <opencv2/opencv.hpp>

2.2 编译为tensorrt engine文件

  • tensorrt engine的文件格式是以.trtmodel结尾的
  • 定义函数build_model(),代码如下:
bool build_model(){

    if(exists("engine.trtmodel")){
        printf("Engine.trtmodel has exists.\n");
        return true;
    }

    TRTLogger logger;

    // 这是基本需要的组件
    auto builder = make_nvshared(nvinfer1::createInferBuilder(logger));
    auto config = make_nvshared(builder->createBuilderConfig());
    auto network = make_nvshared(builder->createNetworkV2(1));

    // 通过onnxparser解析器解析的结果会填充到network中,类似addConv的方式添加进去
    auto parser = make_nvshared(nvonnxparser::createParser(*network, logger));
    if(!parser->parseFromFile("classifier.onnx", 1)){
        printf("Failed to parse classifier.onnx\n");

        // 注意这里的几个指针还没有释放,是有内存泄漏的,后面考虑更优雅的解决
        return false;
    }
    
    int maxBatchSize = 10;
    printf("Workspace Size = %.2f MB\n", (1 << 28) / 1024.0f / 1024.0f);
    config->setMaxWorkspaceSize(1 << 28);

    // 如果模型有多个输入,则必须多个profile
    auto profile = builder->createOptimizationProfile();
    auto input_tensor = network->getInput(0);
    auto input_dims = input_tensor->getDimensions();
    
    // 配置最小、最优、最大范围
    input_dims.d[0] = 1;
    profile->setDimensions(input_tensor->getName(), nvinfer1::OptProfileSelector::kMIN, input_dims);
    profile->setDimensions(input_tensor->getName(), nvinfer1::OptProfileSelector::kOPT, input_dims);
    input_dims.d[0] = maxBatchSize;
    profile->setDimensions(input_tensor->getName(), nvinfer1::OptProfileSelector::kMAX, input_dims);
    config->addOptimizationProfile(profile);

    auto engine = make_nvshared(builder->buildEngineWithConfig(*network, *config));
    if(engine == nullptr){
        printf("Build engine failed.\n");
        return false;
    }

    // 将模型序列化,并储存为文件
    auto model_data = make_nvshared(engine->serialize());
    FILE* f = fopen("engine.trtmodel", "wb");
    fwrite(model_data->data(), 1, model_data->size(), f);
    fclose(f);

    // 卸载顺序按照构建顺序倒序
    printf("Done.\n");
    return true;
}

make_nvshared是封装的智能指针,实现自动释放内存

  • 1.在解析onnx文件前,需要构建builder,config,network组件。利用nvonnxparser::createParser(*network, logger) 构建模型解析器
  • 2.通过parser->parseFromFile("classifier.onnx", 1),解析onnx文件
  • 3 定义输入的Batchsize,设置最小、最优、最大的Batchsize
  // 配置最小、最优、最大范围
    input_dims.d[0] = 1;
    profile->setDimensions(input_tensor->getName(), nvinfer1::OptProfileSelector::kMIN, input_dims);
    profile->setDimensions(input_tensor->getName(), nvinfer1::OptProfileSelector::kOPT, input_dims);
    input_dims.d[0] = maxBatchSize;
    profile->setDimensions(input_tensor->getName(), nvinfer1::OptProfileSelector::kMAX, input_dims);
    config->addOptimizationProfile(profile);
  • 4 build engine文件,用于推理
 auto engine = make_nvshared(builder->buildEngineWithConfig(*network, *config));
    if(engine == nullptr){
        printf("Build engine failed.\n");
        return false;
    }
  • 5 将模型序列化,并储存为trtmodel格式文件
    // 将模型序列化,并储存为文件
    auto model_data = make_nvshared(engine->serialize());
    FILE* f = fopen("engine.trtmodel", "wb");
    fwrite(model_data->data(), 1, model_data->size(), f);
    fclose(f);

2.3 利用tensorrt进行模型推理

对输入图片进行预处理

1 ) 分配存储输入图片的空间,包括device和host

int input_batch = 1;
int input_channel = 3;
int input_height = 224;
int input_width = 224;
int input_numel = input_batch * input_channel * input_height * input_width;
float* input_data_host = nullptr;
float* input_data_device = nullptr;
checkRuntime(cudaMallocHost(&input_data_host, input_numel * sizeof(float)));
checkRuntime(cudaMalloc(&input_data_device, input_numel * sizeof(float)));

-这里input_batch,input_channel,input_height,input_width可以从egine中获取,这里为了方便直接写死了。

2 ) 读入一张图片,并resize到(224,224),然后执行图像的预处理,包括:1.BGR 格式转换为RGB 2. 图像的标准化 3. to tensor

 // image to float
 auto image = cv::imread("dog.jpg");
 float mean[] = {0.406, 0.456, 0.485};
 float std[]  = {0.225, 0.224, 0.229};

 // 对应于pytorch的代码部分
 cv::resize(image, image, cv::Size(input_width, input_height));
 int image_area = image.cols * image.rows;
 unsigned char* pimage = image.data;
 float* phost_b = input_data_host + image_area * 0;
 float* phost_g = input_data_host + image_area * 1;
 float* phost_r = input_data_host + image_area * 2;
 for(int i = 0; i < image_area; ++i, pimage += 3){
     // 注意这里的顺序rgb调换了
     *phost_r++ = (pimage[0] / 255.0f - mean[0]) / std[0];
     *phost_g++ = (pimage[1] / 255.0f - mean[1]) / std[1];
     *phost_b++ = (pimage[2] / 255.0f - mean[2]) / std[2];
 }
///////////////////////////////////////////////////
  • 将数据拷贝到显卡上
checkRuntime(cudaMemcpyAsync(input_data_device, input_data_host, input_numel * sizeof(float), cudaMemcpyHostToDevice, stream));

3 ) 准备好input_device,还需分配output_device存储空间

 // 3x3输入,对应3x3输出
const int num_classes = 1000;
float output_data_host[num_classes];
float* output_data_device = nullptr;
checkRuntime(cudaMalloc(&output_data_device, sizeof(output_data_host)));

4 ) 指定input batch为1,并通过 execution_context->setBindingDimensions(0, input_dims);进行设置

5 ) 定义bindings指针: float* bindings[] = {input_data_device, output_data_device};,保存输入输出的指针,然后送入execution_context->enqueueV2((void**)bindings, stream, nullptr);去执行推理

6) 推理完后,将out_put_device数据拷贝到out_put_host中,并执行流同步,等待所有操作结束。

// 设置当前推理时,input大小
execution_context->setBindingDimensions(0, input_dims);
float* bindings[] = {input_data_device, output_data_device};
bool success      = execution_context->enqueueV2((void**)bindings, stream, nullptr);
checkRuntime(cudaMemcpyAsync(output_data_host, output_data_device, sizeof(output_data_host), cudaMemcpyDeviceToHost, stream));
checkRuntime(cudaStreamSynchronize(stream));

7 ) 这个时候拿到output_data_host,就是需要的分类概率值,然后拿到label,以及对应的confidence

 float* prob = output_data_host;
 int predict_label = std::max_element(prob, prob + num_classes) - prob;  // 确定预测类别的下标
 auto labels = load_labels("labels.imagenet.txt");
 auto predict_name = labels[predict_label];
 float confidence  = prob[predict_label];    // 获得预测值的置信度
 printf("Predict: %s, confidence = %f, label = %d\n", predict_name.c_str(), confidence, predict_label);

8 )最后释放内存

checkRuntime(cudaStreamDestroy(stream));
checkRuntime(cudaFreeHost(input_data_host));
checkRuntime(cudaFree(input_data_device));
checkRuntime(cudaFree(output_data_device));

完整的推理代码如下
main.cpp


// tensorRT include
// 编译用的头文件
#include <NvInfer.h>

// onnx解析器的头文件
#include <onnx-tensorrt/NvOnnxParser.h>

// 推理用的运行时头文件
#include <NvInferRuntime.h>

// cuda include
#include <cuda_runtime.h>

// system include
#include <stdio.h>
#include <math.h>

#include <iostream>
#include <fstream>
#include <vector>
#include <memory>
#include <functional>
#include <unistd.h>

#include <opencv2/opencv.hpp>

using namespace std;

#define checkRuntime(op)  __check_cuda_runtime((op), #op, __FILE__, __LINE__)

bool __check_cuda_runtime(cudaError_t code, const char* op, const char* file, int line){
    if(code != cudaSuccess){    
        const char* err_name = cudaGetErrorName(code);    
        const char* err_message = cudaGetErrorString(code);  
        printf("runtime error %s:%d  %s failed. \n  code = %s, message = %s\n", file, line, op, err_name, err_message);   
        return false;
    }
    return true;
}

inline const char* severity_string(nvinfer1::ILogger::Severity t){
    switch(t){
        case nvinfer1::ILogger::Severity::kINTERNAL_ERROR: return "internal_error";
        case nvinfer1::ILogger::Severity::kERROR:   return "error";
        case nvinfer1::ILogger::Severity::kWARNING: return "warning";
        case nvinfer1::ILogger::Severity::kINFO:    return "info";
        case nvinfer1::ILogger::Severity::kVERBOSE: return "verbose";
        default: return "unknow";
    }
}

class TRTLogger : public nvinfer1::ILogger{
public:
    virtual void log(Severity severity, nvinfer1::AsciiChar const* msg) noexcept override{
        if(severity <= Severity::kINFO){
            // 打印带颜色的字符,格式如下:
            // printf("\033[47;33m打印的文本\033[0m");
            // 其中 \033[ 是起始标记
            //      47    是背景颜色
            //      ;     分隔符
            //      33    文字颜色
            //      m     开始标记结束
            //      \033[0m 是终止标记
            // 其中背景颜色或者文字颜色可不写
            // 部分颜色代码 https://blog.csdn.net/ericbar/article/details/79652086
            if(severity == Severity::kWARNING){
                printf("\033[33m%s: %s\033[0m\n", severity_string(severity), msg);
            }
            else if(severity <= Severity::kERROR){
                printf("\033[31m%s: %s\033[0m\n", severity_string(severity), msg);
            }
            else{
                printf("%s: %s\n", severity_string(severity), msg);
            }
        }
    }
} logger;

// 通过智能指针管理nv返回的指针参数
// 内存自动释放,避免泄漏
template<typename _T>
shared_ptr<_T> make_nvshared(_T* ptr){
    return shared_ptr<_T>(ptr, [](_T* p){p->destroy();});
}

bool exists(const string& path){

#ifdef _WIN32
    return ::PathFileExistsA(path.c_str());
#else
    return access(path.c_str(), R_OK) == 0;
#endif
}

// 上一节的代码
bool build_model(){

    if(exists("engine.trtmodel")){
        printf("Engine.trtmodel has exists.\n");
        return true;
    }

    TRTLogger logger;

    // 这是基本需要的组件
    auto builder = make_nvshared(nvinfer1::createInferBuilder(logger));
    auto config = make_nvshared(builder->createBuilderConfig());
    auto network = make_nvshared(builder->createNetworkV2(1));

    // 通过onnxparser解析器解析的结果会填充到network中,类似addConv的方式添加进去
    auto parser = make_nvshared(nvonnxparser::createParser(*network, logger));
    if(!parser->parseFromFile("classifier.onnx", 1)){
        printf("Failed to parse classifier.onnx\n");

        // 注意这里的几个指针还没有释放,是有内存泄漏的,后面考虑更优雅的解决
        return false;
    }
    
    int maxBatchSize = 10;
    printf("Workspace Size = %.2f MB\n", (1 << 28) / 1024.0f / 1024.0f);
    config->setMaxWorkspaceSize(1 << 28);

    // 如果模型有多个输入,则必须多个profile
    auto profile = builder->createOptimizationProfile();
    auto input_tensor = network->getInput(0);
    auto input_dims = input_tensor->getDimensions();
    
    // 配置最小、最优、最大范围
    input_dims.d[0] = 1;
    profile->setDimensions(input_tensor->getName(), nvinfer1::OptProfileSelector::kMIN, input_dims);
    profile->setDimensions(input_tensor->getName(), nvinfer1::OptProfileSelector::kOPT, input_dims);
    input_dims.d[0] = maxBatchSize;
    profile->setDimensions(input_tensor->getName(), nvinfer1::OptProfileSelector::kMAX, input_dims);
    config->addOptimizationProfile(profile);

    auto engine = make_nvshared(builder->buildEngineWithConfig(*network, *config));
    if(engine == nullptr){
        printf("Build engine failed.\n");
        return false;
    }

    // 将模型序列化,并储存为文件
    auto model_data = make_nvshared(engine->serialize());
    FILE* f = fopen("engine.trtmodel", "wb");
    fwrite(model_data->data(), 1, model_data->size(), f);
    fclose(f);

    // 卸载顺序按照构建顺序倒序
    printf("Done.\n");
    return true;
}

///

vector<unsigned char> load_file(const string& file){
    ifstream in(file, ios::in | ios::binary);
    if (!in.is_open())
        return {};

    in.seekg(0, ios::end);
    size_t length = in.tellg();

    std::vector<uint8_t> data;
    if (length > 0){
        in.seekg(0, ios::beg);
        data.resize(length);

        in.read((char*)&data[0], length);
    }
    in.close();
    return data;
}

vector<string> load_labels(const char* file){
    vector<string> lines;

    ifstream in(file, ios::in | ios::binary);
    if (!in.is_open()){
        printf("open %d failed.\n", file);
        return lines;
    }
    
    string line;
    while(getline(in, line)){
        lines.push_back(line);
    }
    in.close();
    return lines;
}

void inference(){

    TRTLogger logger;
    auto engine_data = load_file("engine.trtmodel");
    auto runtime   = make_nvshared(nvinfer1::createInferRuntime(logger));
    auto engine = make_nvshared(runtime->deserializeCudaEngine(engine_data.data(), engine_data.size()));
    if(engine == nullptr){
        printf("Deserialize cuda engine failed.\n");
        runtime->destroy();
        return;
    }

    cudaStream_t stream = nullptr;
    checkRuntime(cudaStreamCreate(&stream));
    auto execution_context = make_nvshared(engine->createExecutionContext());

    int input_batch = 1;
    int input_channel = 3;
    int input_height = 224;
    int input_width = 224;
    int input_numel = input_batch * input_channel * input_height * input_width;
    float* input_data_host = nullptr;
    float* input_data_device = nullptr;
    checkRuntime(cudaMallocHost(&input_data_host, input_numel * sizeof(float)));
    checkRuntime(cudaMalloc(&input_data_device, input_numel * sizeof(float)));

    ///
    // image to float
    auto image = cv::imread("dog.jpg");
    float mean[] = {0.406, 0.456, 0.485};
    float std[]  = {0.225, 0.224, 0.229};

    // 对应于pytorch的代码部分
    cv::resize(image, image, cv::Size(input_width, input_height));
    int image_area = image.cols * image.rows;
    unsigned char* pimage = image.data;
    float* phost_b = input_data_host + image_area * 0;
    float* phost_g = input_data_host + image_area * 1;
    float* phost_r = input_data_host + image_area * 2;
    for(int i = 0; i < image_area; ++i, pimage += 3){
        // 注意这里的顺序rgb调换了
        *phost_r++ = (pimage[0] / 255.0f - mean[0]) / std[0];
        *phost_g++ = (pimage[1] / 255.0f - mean[1]) / std[1];
        *phost_b++ = (pimage[2] / 255.0f - mean[2]) / std[2];
    }
    ///
    checkRuntime(cudaMemcpyAsync(input_data_device, input_data_host, input_numel * sizeof(float), cudaMemcpyHostToDevice, stream));

    // 3x3输入,对应3x3输出
    const int num_classes = 1000;
    float output_data_host[num_classes];
    float* output_data_device = nullptr;
    checkRuntime(cudaMalloc(&output_data_device, sizeof(output_data_host)));

    // 明确当前推理时,使用的数据输入大小
    auto input_dims = execution_context->getBindingDimensions(0);
    input_dims.d[0] = input_batch;

    // 设置当前推理时,input大小
    execution_context->setBindingDimensions(0, input_dims);
    float* bindings[] = {input_data_device, output_data_device};
    bool success      = execution_context->enqueueV2((void**)bindings, stream, nullptr);
    checkRuntime(cudaMemcpyAsync(output_data_host, output_data_device, sizeof(output_data_host), cudaMemcpyDeviceToHost, stream));
    checkRuntime(cudaStreamSynchronize(stream));

    float* prob = output_data_host;
    int predict_label = std::max_element(prob, prob + num_classes) - prob;  // 确定预测类别的下标
    auto labels = load_labels("labels.imagenet.txt");
    auto predict_name = labels[predict_label];
    float confidence  = prob[predict_label];    // 获得预测值的置信度
    printf("Predict: %s, confidence = %f, label = %d\n", predict_name.c_str(), confidence, predict_label);

    checkRuntime(cudaStreamDestroy(stream));
    checkRuntime(cudaFreeHost(input_data_host));
    checkRuntime(cudaFree(input_data_device));
    checkRuntime(cudaFree(output_data_device));
}

int main(){
    if(!build_model()){
        return -1;
    }
    inference();
    return 0;
}

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

@BangBang

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值