TensorRT软件包onnx转TensorRT运行的例程,高版本不兼容,源代码如下

#include
#include <assert.h>
#include
#include <cuda_runtime_api.h>
#include
#include
#include
#include
#include <sys/stat.h>
#include <time.h>

#include “NvInfer.h”
#include “NvOnnxParser.h”
#include “common.h”
using namespace nvinfer1;

static const int INPUT_H = 28;
static const int INPUT_W = 28;
static const int OUTPUT_SIZE = 10;
static Logger gLogger;
static int gUseDLACore{-1};

//directories 包含了初始值个数的元素,每个元素被赋予相应的初始值
const std::vectorstd::string directories{“data/samples/mnist/”, “data/mnist/”};//directories保存着string类型的对象
std::string locateFile(const std::string& input)//定义一个变量类型的引用,给变量取别名
{
return locateFile(input, directories);
}

// simple PGM (portable greyscale map) reader
void readPGMFile(const std::string& fileName, uint8_t buffer[INPUT_H * INPUT_W])//buffer 缓冲区
{
readPGMFile(fileName, buffer, INPUT_H, INPUT_W);
}

void onnxToTRTModel(const std::string& modelFile, // name of the onnx model
unsigned int maxBatchSize, // batch size - NB must be at least as large as the batch we want to run with
IHostMemory*& trtModelStream) // output buffer for the TensorRT model
{
int verbosity = (int) nvinfer1::ILogger::Severity::kWARNING;

// create the builder
IBuilder* builder = createInferBuilder(gLogger);//创建构建器(即指向Ibuilder类型对象的指针)
nvinfer1::INetworkDefinition* network = builder->createNetwork();/*等价于*bulider.createNetwork(),通过Ibulider定义的
名为creatNetwork()方法,创建INetworkDefinition的对象,ntework这个指针指向这个对象*/ 

auto parser = nvonnxparser::createParser(*network, gLogger);//创建解析器

//Optional - uncomment below lines to view network layer information
//config->setPrintLayerInfo(true);
//parser->reportParsingInfo();

if (!parser->parseFromFile(locateFile(modelFile, directories).c_str(), verbosity)) //解析onnx文件,并填充网络
{
    string msg("failed to parse onnx file");
    gLogger.log(nvinfer1::ILogger::Severity::kERROR, msg.c_str());
    exit(EXIT_FAILURE);
}

// Build the engine
builder->setMaxBatchSize(maxBatchSize);
builder->setMaxWorkspaceSize(1 << 20);

samplesCommon::enableDLA(builder, gUseDLACore);
//当引擎建立起来时,TensorRT会复制
ICudaEngine* engine = builder->buildCudaEngine(*network);//通过Ibuilder类的buildCudaEngine()方法创建IcudaEngine对象,
assert(engine);

// we can destroy the parser
parser->destroy();

// serialize the engine, then close everything down
trtModelStream = engine->serialize();//将引擎序列化,保存到文件中
engine->destroy();
network->destroy();
builder->destroy();

}

void doInference(IExecutionContext& context, float* input, float* output, int batchSize)
{
const ICudaEngine& engine = context.getEngine();//
// input and output buffer pointers that we pass to the engine - the engine requires exactly IEngine::getNbBindings(),
// of these, but in this case we know that there is exactly one input and one output.
assert(engine.getNbBindings() == 2);
void* buffers[2];

// In order to bind the buffers, we need to know the names of the input and output tensors.
// note that indices are guaranteed to be less than IEngine::getNbBindings()
int inputIndex, outputIndex;
for (int b = 0; b < engine.getNbBindings(); ++b)
{
    if (engine.bindingIsInput(b))
        inputIndex = b;
    else
        outputIndex = b;
}

// create GPU buffers and a stream
CHECK(cudaMalloc(&buffers[inputIndex], batchSize * INPUT_H * INPUT_W * sizeof(float)));
CHECK(cudaMalloc(&buffers[outputIndex], batchSize * OUTPUT_SIZE * sizeof(float)));

cudaStream_t stream;
CHECK(cudaStreamCreate(&stream));

// DMA the input to the GPU,  execute the batch asynchronously, and DMA it back:
CHECK(cudaMemcpyAsync(buffers[inputIndex], input, batchSize * INPUT_H * INPUT_W * sizeof(float), cudaMemcpyHostToDevice, stream));
context.enqueue(batchSize, buffers, stream, nullptr);
CHECK(cudaMemcpyAsync(output, buffers[outputIndex], batchSize * OUTPUT_SIZE * sizeof(float), cudaMemcpyDeviceToHost, stream));
cudaStreamSynchronize(stream);

// release the stream and the buffers
cudaStreamDestroy(stream);
CHECK(cudaFree(buffers[inputIndex]));
CHECK(cudaFree(buffers[outputIndex]));

}

int main(int argc, char** argv)
{
gUseDLACore = samplesCommon::parseDLA(argc, argv);
// create a TensorRT model from the onnx model and serialize it to a stream
IHostMemory* trtModelStream{nullptr};//定义输出缓存区的TensorRT文件,目前为空
onnxToTRTModel(“mnist.onnx”, 1, trtModelStream);//转化为TensorRT文件,已经经过序列化的
assert(trtModelStream != nullptr);//assert 判断括号里面是不是对的,是错的的就终止程序

// read a random digit file
srand(unsigned(time(nullptr)));
uint8_t fileData[INPUT_H * INPUT_W];
int num = rand() % 10;
readPGMFile(locateFile(std::to_string(num) + ".pgm", directories), fileData);
//IRuntime* runtime = createInferRuntime(gLogger);创建一个runtime的对象来反序列化(下面的代码)
/*ICudaEngine* engine = runtime->deserializeCudaEngine(trtModelStream->data(), trtModelStream->size(), nullptr);
  反序列化,把之前的序列化以后的文件重新转化为Icudaengine文件*/
// print an ascii representation
std::cout << "\n\n\n---------------------------"
          << "\n\n\n"
          << std::endl;
for (int i = 0; i < INPUT_H * INPUT_W; i++)
    std::cout << (" .:-=+*#%@"[fileData[i] / 26]) << (((i + 1) % INPUT_W) ? "" : "\n");

float data[INPUT_H * INPUT_W];
for (int i = 0; i < INPUT_H * INPUT_W; i++)
    data[i] = 1.0 - float(fileData[i] / 255.0);

IRuntime* runtime = createInferRuntime(gLogger);
assert(runtime != nullptr);
if (gUseDLACore >= 0)
{
    runtime->setDLACore(gUseDLACore);
}

ICudaEngine* engine = runtime->deserializeCudaEngine(trtModelStream->data(), trtModelStream->size(), nullptr);
assert(engine != nullptr);
trtModelStream->destroy();
IExecutionContext* context = engine->createExecutionContext();//要运行推理,需要创建IExecutionContext的对象
assert(context != nullptr);
// run inference 
float prob[OUTPUT_SIZE];
doInference(*context, data, prob, 1);

// destroy the engine
context->destroy();
engine->destroy();
runtime->destroy();

std::cout << "\n\n";
float val{0.0f};
int idx{0};

//Calculate Softmax
float sum{0.0f};
for (int i = 0; i < OUTPUT_SIZE; i++)
{
    prob[i] = exp(prob[i]);
    sum += prob[i];
}
for (int i = 0; i < OUTPUT_SIZE; i++)
{
    prob[i] /= sum;
    val = std::max(val, prob[i]);
    if (val == prob[i])
        idx = i;

    std::cout << " Prob " << i << "  " << std::fixed << std::setw(5) << std::setprecision(4) << prob[i] << " "
              << "Class " << i << ": " << std::string(int(std::floor(prob[i] * 10 + 0.5f)), '*') << std::endl;
}
std::cout << std::endl;

return (idx == num && val > 0.9f) ? EXIT_SUCCESS : EXIT_FAILURE;

}

  • 16
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值