首先是py端的模型定义和onnx导出
import torch
import torchvision
import cv2
import numpy as np
class Classifier(torch.nn.Module):
def __init__(self):
super().__init__()
self.backbone = torchvision.models.resnet18(pretrained=True)
def forward(self, x):
feature = self.backbone(x)
probability = torch.softmax(feature, dim=1)
return probability
# 对每个通道进行归一化有助于模型的训练
imagenet_mean = [0.485, 0.456, 0.406]
imagenet_std = [0.229, 0.224, 0.225]
image = cv2.imread("workspace/dog.jpg")
image = cv2.resize(image, (224, 224)) # resize
image = image[..., ::-1] # BGR -> RGB
image = image / 255.0
image = (image - imagenet_mean) / imagenet_std # normalize
image = image.astype(np.float32) # float64 -> float32
image = image.transpose(2, 0, 1) # HWC -> CHW
image = np.ascontiguousarray(image) # contiguous array memory
image = image[None, ...] # CHW -> 1CHW
image = torch.from_numpy(image) # numpy -> torch
model = Classifier().eval()
with torch.no_grad():
probability = model(image)
predict_class = probability.argmax(dim=1).item()
confidence = probability[0, predict_class]
labels = open("workspace/labels.imagenet.txt").readlines()
labels = [item.strip() for item in labels]
print(f"Predict: {predict_class}, {confidence}, {labels[predict_class]}")
dummy = torch.zeros(1, 3, 224, 224)
torch.onnx.export(
model, (dummy,), "workspace/classifier.onnx",
input_names=["image"],
output_names=["prob"],
dynamic_axes={"image": {0: "batch"}, "prob": {0: "batch"}},
opset_version=11
)
拆解:
1. 模型的定义(或将pth保存转化onnx)
class Classifier(torch.nn.Module):
def __init__(self):
super().__init__()
self.backbone = torchvision.models.resnet18(pretrained=True)
def forward(self, x):
feature = self.backbone(x)
probability = torch.softmax(feature, dim=1)
return probability
这里将ImagNet上训练过的resnet18作为分类器,之所以重新封装,是贯彻“能放进onnx的就尽量放”的原则,因为resnet输出并非概率,softmax是必做的,放进onnx中可以提升效率。
2. 预处理
我们知道为了提升模型的泛化性,模型在训练端通常采用很多数据增强手段(随机裁减、旋转等),其中推理端也要用的一般是resize到固定尺寸(如resnet一般为224),并做Normalize归一化(减均值除以方差)。
# 对每个通道进行归一化有助于模型的训练
imagenet_mean = [0.485, 0.456, 0.406]
imagenet_std = [0.229, 0.224, 0.225]
image = cv2.imread("workspace/dog.jpg")
image = cv2.resize(image, (224, 224)) # resize
image = image[..., ::-1] # BGR -> RGB
image = image / 255.0
image = (image - imagenet_mean) / imagenet_std # normalize
image = image.astype(np.float32) # float64 -> float32
image = image.transpose(2, 0, 1) # HWC -> CHW
image = np.ascontiguousarray(image) # contiguous array memory
image = image[None, ...] # CHW -> 1CHW
image = torch.from_numpy(image) # numpy -> torch
3. 用torch.onnx.export将pytorch模型导出为onnx
dummy = torch.zeros(1, 3, 224, 224)
torch.onnx.export(
model, (dummy,), "workspace/classifier.onnx",
input_names=["image"],
output_names=["prob"],
dynamic_axes={"image": {0: "batch"}, "prob": {0: "batch"}},
opset_version=11
)
1. 指定输入为dummy,其具体的值在这里不重要,定义好它的形状后,除非后面指定维度为动态,否则推理时严格按照该输入形状。
2. export函数各参数说明
注意点:
model转化前指定eval()格式;
args传入形式tuple或者tensor,tuple加括号;
dynamic_axes指定动态维度,一般只指定batch_size为动态;
opset_version一般指定>=11;
C端编译推理
onnx-tensorrt源代码官方网址:https://github.com/onnx/onnx-tensorrt
整体代码:
// tensorRT include
// 编译用的头文件
#include <NvInfer.h>
// onnx解析器的头文件
#include <onnx-tensorrt/NvOnnxParser.h>
// 推理用的运行时头文件
#include <NvInferRuntime.h>
// cuda include
#include <cuda_runtime.h>
// system include
#include <stdio.h>
#include <math.h>
#include <iostream>
#include <fstream>
#include <vector>
#include <memory>
#include <functional>
#include <unistd.h>
#include <chrono>
#include <opencv2/opencv.hpp>
using namespace std;
#define checkRuntime(op) __check_cuda_runtime((op), #op, __FILE__, __LINE__)
bool __check_cuda_runtime(cudaError_t code, const char* op, const char* file, int line){
if(code != cudaSuccess){
const char* err_name = cudaGetErrorName(code);
const char* err_message = cudaGetErrorString(code);
printf("runtime error %s:%d %s failed. \n code = %s, message = %s\n", file, line, op, err_name, err_message);
return false;
}
return true;
}
inline const char* severity_string(nvinfer1::ILogger::Severity t){
switch(t){
case nvinfer1::ILogger::Severity::kINTERNAL_ERROR: return "internal_error";
case nvinfer1::ILogger::Severity::kERROR: return "error";
case nvinfer1::ILogger::Severity::kWARNING: return "warning";
case nvinfer1::ILogger::Severity::kINFO: return "info";
case nvinfer1::ILogger::Severity::kVERBOSE: return "verbose";
default: return "unknow";
}
}
class TRTLogger : public nvinfer1::ILogger{
public:
virtual void log(Severity severity, nvinfer1::AsciiChar const* msg) noexcept override{
if(severity <= Severity::kINFO){
// 打印带颜色的字符,格式如下:
// printf("\033[47;33m打印的文本\033[0m");
// 其中 \033[ 是起始标记
// 47 是背景颜色
// ; 分隔符
// 33 文字颜色
// m 开始标记结束
// \033[0m 是终止标记
// 其中背景颜色或者文字颜色可不写
// 部分颜色代码 https://blog.csdn.net/ericbar/article/details/79652086
if(severity == Severity::kWARNING){
printf("\033[33m%s: %s\033[0m\n", severity_string(severity), msg);
}
else if(severity <= Severity::kERROR){
printf("\033[31m%s: %s\033[0m\n", severity_string(severity), msg);
}
else{
printf("%s: %s\n", severity_string(severity), msg);
}
}
}
} logger;
// 通过智能指针管理nv返回的指针参数
// 内存自动释放,避免泄漏
template<typename _T>
shared_ptr<_T> make_nvshared(_T* ptr){
return shared_ptr<_T>(ptr, [](_T* p){p->destroy();});
}
bool exists(const string& path){
#ifdef _WIN32
return ::PathFileExistsA(path.c_str());
#else
return access(path.c_str(), R_OK) == 0;
#endif
}
// 上一节的代码
bool build_model(){
if(exists("engine.trtmodel")){
printf("Engine.trtmodel has exists.\n");
return true;
}
TRTLogger logger;
// 这是基本需要的组件
auto builder = make_nvshared(nvinfer1::createInferBuilder(logger));
auto config = make_nvshared(builder->createBuilderConfig());
auto network = make_nvshared(builder->createNetworkV2(1));
// 通过onnxparser解析器解析的结果会填充到network中,类似addConv的方式添加进去
auto parser = make_nvshared(nvonnxparser::createParser(*network, logger));
if(!parser->parseFromFile("classifier.onnx", 1)){
printf("Failed to parse classifier.onnx\n");
// 注意这里的几个指针还没有释放,是有内存泄漏的,后面考虑更优雅的解决
return false;
}
int maxBatchSize = 10;
printf("Workspace Size = %.2f MB\n", (1 << 28) / 1024.0f / 1024.0f);
config->setMaxWorkspaceSize(1 << 28);
// 如果模型有多个输入,则必须多个profile
auto profile = builder->createOptimizationProfile();
auto input_tensor = network->getInput(0);
auto input_dims = input_tensor->getDimensions();
// 配置最小、最优、最大范围
input_dims.d[0] = 1;
profile->setDimensions(input_tensor->getName(), nvinfer1::OptProfileSelector::kMIN, input_dims);
profile->setDimensions(input_tensor->getName(), nvinfer1::OptProfileSelector::kOPT, input_dims);
input_dims.d[0] = maxBatchSize;
profile->setDimensions(input_tensor->getName(), nvinfer1::OptProfileSelector::kMAX, input_dims);
config->addOptimizationProfile(profile);
auto engine = make_nvshared(builder->buildEngineWithConfig(*network, *config));
if(engine == nullptr){
printf("Build engine failed.\n");
return false;
}
// 将模型序列化,并储存为文件
auto model_data = make_nvshared(engine->serialize());
FILE* f = fopen("engine.trtmodel", "wb");
fwrite(model_data->data(), 1, model_data->size(), f);
fclose(f);
// 卸载顺序按照构建顺序倒序
printf("Done.\n");
return true;
}
///
vector<unsigned char> load_file(const string& file){
ifstream in(file, ios::in | ios::binary);
if (!in.is_open())
return {};
in.seekg(0, ios::end);
size_t length = in.tellg();
std::vector<uint8_t> data;
if (length > 0){
in.seekg(0, ios::beg);
data.resize(length);
in.read((char*)&data[0], length);
}
in.close();
return data;
}
vector<string> load_labels(const char* file){
vector<string> lines;
ifstream in(file, ios::in | ios::binary);
if (!in.is_open()){
printf("open %d failed.\n", file);
return lines;
}
string line;
while(getline(in, line)){
lines.push_back(line);
}
in.close();
return lines;
}
void inference(){
TRTLogger logger;
auto engine_data = load_file("engine.trtmodel");
auto runtime = make_nvshared(nvinfer1::createInferRuntime(logger));
auto engine = make_nvshared(runtime->deserializeCudaEngine(engine_data.data(), engine_data.size()));
if(engine == nullptr){
printf("Deserialize cuda engine failed.\n");
runtime->destroy();
return;
}
cudaStream_t stream = nullptr;
checkRuntime(cudaStreamCreate(&stream));
auto execution_context = make_nvshared(engine->createExecutionContext());
int input_batch = 1;
int input_channel = 3;
int input_height = 224;
int input_width = 224;
int input_numel = input_batch * input_channel * input_height * input_width;
float* input_data_host = nullptr;
float* input_data_device = nullptr;
checkRuntime(cudaMallocHost(&input_data_host, input_numel * sizeof(float)));
checkRuntime(cudaMalloc(&input_data_device, input_numel * sizeof(float)));
///
// image to float
auto image = cv::imread("dog.jpg");
float mean[] = {0.406, 0.456, 0.485};
float std[] = {0.225, 0.224, 0.229};
// 对应于pytorch的代码部分
cv::resize(image, image, cv::Size(input_width, input_height));
//BGR->RGB
//(pixel / 255.0 - mean) / std
//to tensor-> BGRBGRBGR->BBBGGGRRR
int image_area = image.cols * image.rows;//单个通道的面积
unsigned char* pimage = image.data;//指向image的首地址
float* phost_b = input_data_host + image_area * 0; //input_data_host是host为图像数据开辟的内存,像素在其中以BGRBGR存储,phost_b指向第一个像素B的首地址
float* phost_g = input_data_host + image_area * 1; //phost_g指向第一个像素G通道的首地址
float* phost_r = input_data_host + image_area * 2; //phost_r指向第一个像素R通道的首地址
for(int i = 0; i < image_area; ++i, pimage += 3){ //遍历像素,每个像素三个通道,且以BGR形式
// 注意这里的顺序rgb调换了
*phost_r++ = (pimage[0] / 255.0f - mean[0]) / std[0];
*phost_g++ = (pimage[1] / 255.0f - mean[1]) / std[1];
*phost_b++ = (pimage[2] / 255.0f - mean[2]) / std[2];
}
///
checkRuntime(cudaMemcpyAsync(input_data_device, input_data_host, input_numel * sizeof(float), cudaMemcpyHostToDevice, stream));
// 3x3输入,对应3x3输出
const int num_classes = 1000;
float output_data_host[num_classes];
float* output_data_device = nullptr;
checkRuntime(cudaMalloc(&output_data_device, sizeof(output_data_host)));
// 明确当前推理时,使用的数据输入大小
auto input_dims = execution_context->getBindingDimensions(0);
input_dims.d[0] = input_batch;
// 设置当前推理时,input大小
execution_context->setBindingDimensions(0, input_dims);
float* bindings[] = {input_data_device, output_data_device};
bool success = execution_context->enqueueV2((void**)bindings, stream, nullptr);
checkRuntime(cudaMemcpyAsync(output_data_host, output_data_device, sizeof(output_data_host), cudaMemcpyDeviceToHost, stream));
checkRuntime(cudaStreamSynchronize(stream));
float* prob = output_data_host;
int predict_label = std::max_element(prob, prob + num_classes) - prob; // 确定预测类别的下标
auto labels = load_labels("labels.imagenet.txt");
auto predict_name = labels[predict_label];
float confidence = prob[predict_label]; // 获得预测值的置信度
printf("Predict: %s, confidence = %f, label = %d\n", predict_name.c_str(), confidence, predict_label);
checkRuntime(cudaStreamDestroy(stream));
checkRuntime(cudaFreeHost(input_data_host));
checkRuntime(cudaFree(input_data_device));
checkRuntime(cudaFree(output_data_device));
}
int main(){
if(!build_model()){
return -1;
}
inference();
return 0;
}
核心部分拆解:
编译
1.构建器builder,以及config,network
上来还是最基本的组件:builder(用于创建config,network,engine等其他对象的核心类),config(构建配置),network(网络定义)。
auto builder = make_nvshared(nvinfer1::createInferBuilder(logger));
auto config = make_nvshared(builder->createBuilderConfig());
auto network = make_nvshared(builder->createNetworkV2(1));
注意:
1. make_nvshared是为了方便添加的函数:在程序运行时,可能遇到一些意外而需要提前退出并return false,此时需要重复大量的内存释放销毁,这里通过智能指针自行销毁。
// 通过智能指针管理nv返回的指针参数
// 内存自动释放,避免泄漏
template<typename _T>
shared_ptr<_T> make_nvshared(_T* ptr){
return shared_ptr<_T>(ptr, [](_T* p){p->destroy();});
}
2. logger是TRTLogger类实例对象,而TRTLogger是nvinfer1::ILogger派生类,用于记录打印日志
class TRTLogger : public nvinfer1::ILogger{
public:
virtual void log(Severity severity, nvinfer1::AsciiChar const* msg) noexcept override{
if(severity <= Severity::kINFO){
// 打印带颜色的字符,格式如下:
// printf("\033[47;33m打印的文本\033[0m");
// 其中 \033[ 是起始标记
// 47 是背景颜色
// ; 分隔符
// 33 文字颜色
// m 开始标记结束
// \033[0m 是终止标记
// 其中背景颜色或者文字颜色可不写
// 部分颜色代码 https://blog.csdn.net/ericbar/article/details/79652086
if(severity == Severity::kWARNING){
printf("\033[33m%s: %s\033[0m\n", severity_string(severity), msg);
}
else if(severity <= Severity::kERROR){
printf("\033[31m%s: %s\033[0m\n", severity_string(severity), msg);
}
else{
printf("%s: %s\n", severity_string(severity), msg);
}
}
}
} logger;
其中severity_string确定了不同的情况打印返回不同的信息,例如错误、警告等,整个log中针对不同信息用不同的颜色进行打印。
3. 创建network是官方推荐显示batch,即二进制参数设为1,并且必须使用createNetworkV2,对应了推理时的enqueueV2。
2. 创建parser对象解析onnx文件
// 通过onnxparser解析器解析的结果会填充到network中,类似addConv的方式添加进去
auto parser = make_nvshared(nvonnxparser::createParser(*network, logger));
if(!parser->parseFromFile("classifier.onnx", 1)){
printf("Failed to parse classifier.onnx\n");
return false;
}
注意点:
1. onnx-tensorrt中 的NvOnnxParser声明和定义了解析器,解析器对象parser通过nvonnxparser::createParser创建,parser->FromFile读取并解析onnx文件并定义network相关网络层,返回布尔类型,true表示解析成功。
2. 错误时返回false,parser的销毁由make_nvshared完成。
3. 配置最大内存以及动态尺寸
int maxBatchSize = 10;
printf("Workspace Size = %.2f MB\n", (1 << 28) / 1024.0f / 1024.0f);
config->setMaxWorkspaceSize(1 << 28);
// 如果模型有多个输入,则必须多个profile
auto profile = builder->createOptimizationProfile();
auto input_tensor = network->getInput(0);
auto input_dims = input_tensor->getDimensions();
// 配置最小、最优、最大范围
input_dims.d[0] = 1;
profile->setDimensions(input_tensor->getName(), nvinfer1::OptProfileSelector::kMIN, input_dims);
profile->setDimensions(input_tensor->getName(), nvinfer1::OptProfileSelector::kOPT, input_dims);
input_dims.d[0] = maxBatchSize;
profile->setDimensions(input_tensor->getName(), nvinfer1::OptProfileSelector::kMAX, input_dims);
config->addOptimizationProfile(profile);
注意点:
1. profile由builder->createOptimizationProfile()创建,这里用来设置动态batch,通过指定最小、最优和最大batchsize编译,从而在推理时能够指定该范围内的batchsize进行推理。
2. input_tensor->getDimensions获取输入的维度,数据类型为nvinfer1::Dims(NCHW), 通过profile->setDemisions(name, select, dims)设置动态尺寸,可以看到设置最小、最优时batch维度被赋值1,最大为10,因此推理时可以接受batchsize为1-10的张量,推理时要指定bs。
3. 最后通过config->addOptimiztionProfile将profile的动态设置加入到builder配置中。
4. 此时已经可以用builder构建engine了
auto engine = make_nvshared(builder->buildEngineWithConfig(*network, *config));
if(engine == nullptr){
printf("Build engine failed.\n");
return false;
}
5. 模型转化的最后一步,序列化engine转为trt模型。
auto model_data = make_nvshared(engine->serialize());
FILE* f = fopen("engine.trtmodel", "wb");
fwrite(model_data->data(), 1, model_data->size(), f);
fclose(f);
二进制打开写入。
推理
1. 读取trt模型文件
vector<unsigned char> load_file(const string& file){
ifstream in(file, ios::in | ios::binary);
if (!in.is_open())
return {};
in.seekg(0, ios::end);
size_t length = in.tellg();
std::vector<uint8_t> data;
if (length > 0){
in.seekg(0, ios::beg);
data.resize(length);
in.read((char*)&data[0], length);
}
in.close();
return data;
}
auto engine_data = load_file("engine.trtmodel");
注意点:
seekg用来修改指针位置,tellg用来记录指针位置。in.seekg(0,ios::end)表示基地址为结束地址,偏移量为0。
2. 创建runtime接口实例
auto runtime = make_nvshared(nvinfer1::createInferRuntime(logger));
3. 对engine_data进行反序列化得到engine。
auto engine = make_nvshared(runtime->deserializeCudaEngine(engine_data.data(), engine_data.size()));
if(engine == nullptr){
printf("Deserialize cuda engine failed.\n");
runtime->destroy();
return;
}
4. 构建执行上下文,创建cuda流
//创建cuda流,确定batch推理是独立的
cudaStream_t stream = nullptr;
checkRuntime(cudaStreamCreate(&stream));
auto execution_context = make_nvshared(engine->createExecutionContext());
5. 内存分配
int input_batch = 1;
int input_channel = 3;
int input_height = 224;
int input_width = 224;
int input_numel = input_batch * input_channel * input_height * input_width;
float* input_data_host = nullptr;
float* input_data_device = nullptr;
checkRuntime(cudaMallocHost(&input_data_host, input_numel * sizeof(float)));
checkRuntime(cudaMalloc(&input_data_device, input_numel * sizeof(float)));
注意点:
推理时要确定动态推理的尺寸(batch),这里推理的形状为(1,3,224,224)。
6.接下来是读图并做预处理,是重点。
// image to float
auto image = cv::imread("dog.jpg");
float mean[] = {0.406, 0.456, 0.485};
float std[] = {0.225, 0.224, 0.229};
// 对应于pytorch的代码部分
cv::resize(image, image, cv::Size(input_width, input_height));
//BGR->RGB
//(pixel / 255.0 - mean) / std
//to tensor-> BGRBGRBGR->BBBGGGRRR
int image_area = image.cols * image.rows;//单个通道的面积
unsigned char* pimage = image.data;//指向image的首地址
float* phost_b = input_data_host + image_area * 0; //input_data_host是host为图像数据开辟的内存,像素在其中以BGRBGR存储,phost_b指向第一个像素B的首地址
float* phost_g = input_data_host + image_area * 1; //phost_g指向第一个像素G通道的首地址
float* phost_r = input_data_host + image_area * 2; //phost_r指向第一个像素R通道的首地址
for(int i = 0; i < image_area; ++i, pimage += 3){ //遍历像素,每个像素三个通道,且以BGR形式
// 注意这里的顺序rgb调换了
*phost_r++ = (pimage[0] / 255.0f - mean[0]) / std[0];
*phost_g++ = (pimage[1] / 255.0f - mean[1]) / std[1];
*phost_b++ = (pimage[2] / 255.0f - mean[2]) / std[2];
}
注意点:
resnet分类推理时的预处理对应resize和normalize操作。resize通过cv::resize即可完成。
openCV以BGR顺序读取jpg图像,需要转化为RGB顺序,同时存储时以BGRBGRBGR...形式存储,每三个通道对应了一个像素,而tensor需要以通道分离的形式即RRR...GGG...BBB...形式存储,遍历转换过程和内存结构如下:
7. 输入数据搬运到device上,同时在device上开辟输出的内存,imagenet为1000类。
checkRuntime(cudaMemcpyAsync(input_data_device, input_data_host, input_numel * sizeof(float), cudaMemcpyHostToDevice, stream));
const int num_classes = 1000;
float output_data_host[num_classes];
float* output_data_device = nullptr;
checkRuntime(cudaMalloc(&output_data_device, sizeof(output_data_host)));
8. 执行推理时需要明确编译为动态的尺寸
// 明确当前推理时,使用的数据输入大小
auto input_dims = execution_context->getBindingDimensions(0);
input_dims.d[0] = input_batch;
// 设置当前推理时,input大小
execution_context->setBindingDimensions(0, input_dims);
注意点:
通过context->getBindingDimesions获取数据输入大小,并明确其动态batch具体值,通过context->setBindingDimensions进行设置确定的input形状。
9. 执行推理,将结果返回host并等待流同步
float* bindings[] = {input_data_device, output_data_device};
bool success = execution_context->enqueueV2((void**)bindings, stream, nullptr);
checkRuntime(cudaMemcpyAsync(output_data_host, output_data_device, sizeof(output_data_host), cudaMemcpyDeviceToHost, stream));
checkRuntime(cudaStreamSynchronize(stream));
float* prob = output_data_host;
int predict_label = std::max_element(prob, prob + num_classes) - prob; // 确定预测类别的下标
auto labels = load_labels("labels.imagenet.txt");
auto predict_name = labels[predict_label];
float confidence = prob[predict_label]; // 获得预测值的置信度
printf("Predict: %s, confidence = %f, label = %d\n", predict_name.c_str(), confidence, predict_label);
10.最后销毁内存等
checkRuntime(cudaStreamDestroy(stream));
checkRuntime(cudaFreeHost(input_data_host));
checkRuntime(cudaFree(input_data_device));
checkRuntime(cudaFree(output_data_device));
注意点:
后分配先释放。