Tensorrt 基本配置

#include <fstream>
#include <iostream>

#include "NvInfer.h"
#include "NvOnnxParser.h"

using namespace std;
using namespace nvinfer1;
using namespace nvonnxparser;

//
class Logger : public ILogger {
    void log(Severity severity, const char *msg) noexcept override {
        // suppress info-level messages
        std::cout<< "[TRT] " << msg << std::endl;
    }
} logger;


int main() {
    // 构建器 创建网络与优化配置
    IBuilder *builder = createInferBuilder(logger);
    // 定义网络 定义输入\输出格式
    INetworkDefinition *network = builder->createNetworkV2(0);
    // 创建优化配置
    IBuilderConfig *config = builder->createBuilderConfig();
    // 申请存储限制
    config->setMemoryPoolLimit(MemoryPoolType::kWORKSPACE, 1U << 20);
    // 创建onnx解析器
    IParser *parser = createParser(*network, logger);
    // 读取模型文件
    char *modelFile = "xxx.onnx";
    parser->parseFromFile(modelFile,
                          static_cast<int32_t>(ILogger::Severity::kINFO));
    for (int32_t i = 0; i < parser->getNbErrors(); ++i) {
        std::cout << parser->getError(i)->desc() << std::endl;
    }
    // 设置网络输入、输出格式
    network->getInput(0)->setAllowedFormats(static_cast<TensorFormats>(
                                                    1 << static_cast<int32_t>(TensorFormat::kLINEAR)));
    network->getOutput(0)->setAllowedFormats(
            1U << static_cast<int32_t>(TensorFormat::kLINEAR));
    config->setFlag(BuilderFlag::kFP16);
    network->getInput(0)->setType(DataType::kHALF);
    network->getOutput(0)->setType(DataType::kHALF);
    config->setFlag(BuilderFlag::kGPU_FALLBACK);

    cudaStream_t profileStream;
    cudaStreamCreateWithFlags(&profileStream, cudaStreamNonBlocking);
    config->setProfileStream(profileStream);
     config->setMemoryPoolLimit(MemoryPoolType::kTACTIC_SHARED_MEMORY, 48 <<
     10);
     //序列化配置 TODO 可以保存到本地
    IHostMemory *serializedModel =
            builder->buildSerializedNetwork(*network, *config);
    if (!serializedModel) {
        return false;
    }
    // 运行时
    IRuntime *runtime = createInferRuntime(logger);
    // 推理引擎
    ICudaEngine *engine = runtime->deserializeCudaEngine(serializedModel->data(),
                                                         serializedModel->size());
    // 创建上下文
    IExecutionContext *context = engine->createExecutionContext();

    // 模型详情
    cout << "模型输入个数 " << network->getNbInputs() << endl;
    cout << "模型 第一个输入的dimension个数" << network->getInput(0)->getDimensions().nbDims << endl;
    cout << "模型输出个数 " << network->getNbOutputs() << endl;
    cout << "模型 第一个输出的dimension个数" << network->getOutput(0)->getDimensions().nbDims << endl;

    // 指定输入输出缓冲区
    void *inPtr = nullptr;
    void *outPtr = nullptr;
    for (int32_t i = 0, e = engine->getNbIOTensors(); i < e; i++)
    {
        auto const name = engine->getIOTensorName(i);
        if (engine->getTensorIOMode(name) == TensorIOMode::kINPUT)
        {
            context->setTensorAddress(name, inPtr);
        }
        else
        {
            context->setTensorAddress(name, outPtr);
        }
    }
    // 计算GPU输入存储大小
    const Dims &inDims = network->getInput(0)->getDimensions();
    int dataSize=sizeof(int8_t);
    int64_t inDimsSize= inDims.d[0] * dataSize;
    for (int i = 1; i < inDims.nbDims; ++i) {
        inDimsSize*=inDims.d[i];
    }
    // GPU 输入数据
    cudaMalloc((void**)&inPtr, inDimsSize);
    // 输入数据从 host to GPU
    cudaMemcpy(inPtr, nullptr, inDimsSize, cudaMemcpyHostToDevice);
    // 计算GPU输出存储大小
    const Dims &outDims = network->getOutput(0)->getDimensions();
    int64_t outDimsSize= outDims.d[0] * dataSize;
    for (int i = 1; i < outDims.nbDims; ++i) {
        outDimsSize*=outDims.d[i];
    }
    // GPU 输出数据
    cudaMalloc((void**)&outPtr, outDimsSize);

    // 执行
    cudaStream_t stream;
    cudaStreamCreate(&stream);
    context->enqueueV3(stream);
    // 等待处理完成
    cudaStreamSynchronize(stream);
    // 输出数据
    cudaMemcpy(nullptr, outPtr ,outDimsSize, cudaMemcpyDeviceToHost);

    return 0;
}
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值