C++ Tensorrt ncnn resnet推理

使用C++ Tensorrt 进行resnet推理
  • 将resnet模型转换成onnx格式
  • 然后使用trtexec工具转换成trt格式
  • 然后编译执行即可
#include <iostream>
#include <vector>
#include <NvInfer.h>
#include <NvInferRuntimeCommon.h>
#include <opencv2/opencv.hpp>
#include <fstream>
#include <string>
#include "logging.h"
using namespace std;
using namespace nvinfer1;
// using namespace nv;
#define CHECK(status)                                          \
    do                                                         \
    {                                                          \
        auto ret = (status);                                   \
        if (ret != 0)                                          \
        {                                                      \
            std::cerr << "Cuda failure: " << ret << std::endl; \
            abort();                                           \
        }                                                      \
    } while (0)
float *preProcess(const char *imgPath, int INPUT_W, int INPUT_H);
vector<unsigned char> load_file(const string &file);

int main()
{
    // 1. 创建Logger对象
    Logger logger;
    char *trtModelStream;
    // 获取模型数据
    auto engine_data = load_file("./resnet.engine");
    const char *imgPath = "./input.jpg";
    const int batchSize = 1;
    const int inputChannel = 3;
    const int inputHeight = 224;
    const int inputWidth = 224;
    const int outputSize = 1000;
    // 2. 创建IExecutionContext对象
    IRuntime *runtime = createInferRuntime(logger);
    ICudaEngine *engine = runtime->deserializeCudaEngine(engine_data.data(), engine_data.size());
    IExecutionContext *context = engine->createExecutionContext();
    // 3. 分配输入和输出内存
    // 获取与这个engine相关的输入输出tensor的索引s
    void *buffers[2];
    const char *INPUT_BLOB_NAME = "input";
    const char *OUTPUT_BLOB_NAME = "output";
    const int inputIndex = engine->getBindingIndex(INPUT_BLOB_NAME);
    const int outputIndex = engine->getBindingIndex(OUTPUT_BLOB_NAME);
    CHECK(cudaMalloc(&buffers[inputIndex], batchSize * inputChannel * inputHeight * inputWidth * sizeof(float)));
    CHECK(cudaMalloc(&buffers[outputIndex], batchSize * outputSize * sizeof(float)));
    vector<float> outputData(batchSize * outputSize);
    // 4. 设置输入数据
    // 调用预处理函数
    float *inputData = preProcess(imgPath, inputHeight, inputWidth);
    // 创建cuda流,用于管理数据复制,存取,和计算的并发操作
    cudaStream_t stream;
    CHECK(cudaStreamCreate(&stream));
    CHECK(cudaMemcpyAsync(buffers[inputIndex], inputData, batchSize * inputChannel * inputHeight * inputWidth * sizeof(float), cudaMemcpyHostToDevice, stream));
    // 5. 执行推理
    auto start = chrono::system_clock::now(); // 开始时间
    for (int i = 0; i < 1000; i++)
    {
        context->enqueue(batchSize, buffers, stream, nullptr);
    }
    auto end = chrono::system_clock::now(); // 结束时间
    std::cout << chrono::duration_cast<chrono::milliseconds>(end - start).count() << "ms" << std::endl;
    // 6. 将输出从GPU拷贝到CPU内存中
    CHECK(cudaMemcpyAsync(outputData.data(), buffers[outputIndex], batchSize * outputSize * sizeof(float), cudaMemcpyDeviceToHost, stream));
    // 7. 处理输出数据
    // TODO: 对输出数据进行进一步的处理,如选择最终分类结果等
    float maxVal = 0.0f;
    int maxIndex = -1;
    for (int i = 0; i < 1000; i++)
    {
        if (outputData[i] > maxVal)
        {
            maxVal = outputData[i];
            maxIndex = i;
        }
    }

    std::cout << "Top 5 Predictions:" << std::endl;
    for (int i = 0; i < 5; i++)
    {
        maxVal = 0.0f;
        maxIndex = -1;
        for (int j = 0; j < 1000; j++)
        {
            if (outputData[j] > maxVal)
            {
                maxVal = outputData[j];
                maxIndex = j;
            }
        }
        std::cout << "Class " << maxIndex << ": " << maxVal << std::endl;
        outputData[maxIndex] = -1.0f; // 将该类别得分置为负数,确保不会重复输出
    }
    // num_classes是分类器输出的类别数,outputData是经过推理后的输出数据,按顺序存储每个类别的得分。
    //  8. 释放内存
    cudaFree(buffers[inputIndex]);
    cudaFree(buffers[outputIndex]);
    // 9. 销毁资源
    context->destroy();
    engine->destroy();
    delete inputData;
    inputData = nullptr;
    return 0;
}

// 读取权重函数
vector<unsigned char> load_file(const string &file)
{
    ifstream in(file, ios::in | ios::binary);
    if (!in.is_open())
        return {};
    in.seekg(0, ios::end);
    size_t length = in.tellg();
    std::vector<uint8_t> data;
    if (length > 0)
    {
        in.seekg(0, ios::beg);
        data.resize(length);
        in.read((char *)&data[0], length);
    }
    in.close();
    return data;
}

// 预处理函数
float *preProcess(const char *imgPath, int INPUT_W, int INPUT_H)
{
    cv::Mat img = cv::imread(imgPath);
    float *data = new float[3 * INPUT_H * INPUT_W];
    vector<float> mean_value{0.406, 0.456, 0.485}; // BGR
    vector<float> std_value{0.225, 0.224, 0.229};
    cv::Mat src_img;
    cv::resize(img, src_img, cv::Size(INPUT_W, INPUT_H));
    int count = 0;
    for (int i = 0; i < INPUT_H; i++)
    {
        uchar *uc_pixel = src_img.data + i * src_img.step;
        for (int j = 0; j < INPUT_W; j++)
        { // bgr存放
            data[count] = (uc_pixel[0] / 255. - mean_value[0]) / std_value[0];
            data[count + src_img.rows * src_img.cols] = (uc_pixel[1] / 255. - mean_value[1]) / std_value[1];
            data[count + 2 * src_img.rows * src_img.cols] = (uc_pixel[2] / 255. - mean_value[2]) / std_value[2];
            uc_pixel += 3;
            count++;
        }
    }
    return data;
}

C++ ncnn推理:
  • 使用ncnn提供的工具进行转换模型
#include <algorithm>
#include <iostream>
#include <fstream>
#include <vector>
#include <opencv2/opencv.hpp>
#include "net.h"
#include <numeric>
using namespace std;
int main(int argc, char **argv)
{
  const int inputHeight = 224;
  const int inputWidth = 224;
  // Load the ResNet-34 model from file
  ncnn::Net net;
  net.opt.use_vulkan_compute = true; //使用GPU加速
  net.load_param("resnet.param");
  net.load_model("resnet.bin");

  // Load an input image
  cv::Mat img = cv::imread("input.jpg");
  cv::resize(img,img,cv::Size(inputWidth,inputHeight));//resize到目标尺寸
  // Convert the input image to the input tensor of the network
  ncnn::Mat in = ncnn::Mat::from_pixels(img.data, ncnn::Mat::PIXEL_BGR, img.cols, img.rows);
  const float mean_vals[3] = { 127.5f, 127.5f, 127.5f };
	const float norm_vals[3] = { 1.0/127.5f, 1.0/127.5f, 1.0/127.5f };
  in.substract_mean_normalize(mean_vals, norm_vals);//归一化,减均值乘方差的倒数
  // Run the forward inference
  // auto start = chrono::system_clock::now(); 
  
  ncnn::Mat out;
  auto start = chrono::system_clock::now(); // 开始时间
  for (int i = 0; i < 1000; i++)
  {
    ncnn::Extractor ex = net.create_extractor();
    ex.input("input", in);

    ex.extract("output", out);
  }

  auto end = chrono::system_clock::now(); // 结束时间
  std::cout << chrono::duration_cast<chrono::milliseconds>(end - start).count() << "ms" << std::endl;

  // Print the top-5 predictions
  std::vector<float> scores(out.w);
  for (int i = 0; i < out.w; i++)
  {
    scores[i] = out[i];
  }
  std::vector<int> indices(out.w);
  std::iota(indices.begin(), indices.end(), 0);
  std::sort(indices.begin(), indices.end(), [&scores](int lhs, int rhs)
            { return scores[lhs] > scores[rhs]; });

  std::cout << "Top-5 predictions:" << std::endl;
  for (int i = 0; i < 5; i++)
  {
    std::cout << "Class "
              << ": " << indices[i] << std::endl;
  }
  net.clear();
  return 0;
}


评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值