Pointnet++模型部署到C++中,使用onnxruntime进行推理

本文介绍了如何为Pointnet++模型创建ONNX版本,涉及环境设置(包括CUDA、cuDNN)、VS2022属性配置和C++推理代码示例。
摘要由CSDN通过智能技术生成

环境配置

  1. 下载onnxruntime-gpu版本,我选择的为1.11版本,下载链接https://onnxruntime.ai/getting-started
  2. 查看PC端的cuda是否配置,需要与onnxruntime对应的cuda和cuDNN版本,因为我的onnxruntime为1.11版本,因此我选择的cuda是11.4,cuDNN版本是8.2.2,cuDNN下载链接–https://developer.nvidia.com/rdp/cudnn-archive,cuda下载链接–https://developer.nvidia.com/rdp/cudnn-archive
  3. 下载完cuda和cuDNN就需要安装到PC的系统环境变量中,cuda下载完就一路默认next就可以,然后将以下几个加入系统环境变量
变量名             值
CUDA_BIN_PATH   %CUDA_PATH%\bin
CUDA_LIB_PATH   %CUDA_PATH%\lib\x64
CUDA_PATH   C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.4
CUDA_PATH_V11_4   C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.4
CUDA_SDK_BIN_PATH  %CUDA_SDK_PATH%\bin\win64
CUDA_SDK_LIB_PATH   %CUDA_SDK_PATH%\common\lib\x64
CUDA_SDK_PATH   C:\ProgramData\NVIDIA Corporation\CUDA Samples\v11.4
Path          %CUDA_BIN_PATH%
Path    %CUDA_LIB_PATH%
Path   %CUDA_SDK_BIN_PATH%
Path   %CUDA_SDK_LIB_PATH%

cuDNN下载解压之后,将include、lib和bin里面的文件找到C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.4,一一对应文件名放进去

检测你的cuda和cuDNN是否配置成功,可以看这个链接去测试一下–https://blog.csdn.net/jhsignal/article/details/111398427

Pointnet++转onnx

我的是做实例分割的内容,具体代码如下:

import torch
import pointnet_sem_seg

point_num = 8192
class_num = 3

model = pointnet_sem_seg.get_model(class_num)
model = model.cuda()  # cpu版本需注释此句
model.eval()
checkpoint = torch.load('log/sem_seg/2024-02-04_15-12/checkpoints/best_model.pth')
model.load_state_dict(checkpoint['model_state_dict'])

x = torch.rand(1, 9, point_num)
x = x.cuda()  # cpu版本需注释此句

export_onnx_file = "data/sem_seg.onnx"
torch.onnx.export(model,
                  x,
                  export_onnx_file,
                  opset_version=11)

VS2022属性配置–确定好是release版本还是Debug版本

  1. 配置属性中,C++语言标准选择17版本
  2. C/C++附加包含目录–D:\source\onnxruntime-win-x64-gpu-1.11.0\include
  3. C/C++SDL检查
  4. 链接器附加库目录–D:\source\onnxruntime-win-x64-gpu-1.11.0\lib
  5. 链接器输入附加依赖项–onnxruntime.lib;onnxruntime_providers_shared.lib;
    onnxruntime_providers_cuda.lib

C++推理代码

代码来源–https://blog.csdn.net/taifyang/article/details/131878409?spm=1001.2014.3001.5502


#include <iostream>
#include <fstream>
#include <vector>
#include <algorithm>
#include <ctime>
#include <random>
#include <onnxruntime_cxx_api.h>

const int point_num = 8192;
const int class_num = 3;

//点的结构体,坐标、颜色以及归一化的坐标
struct point
{
	float m_x, m_y, m_z, m_r, m_g, m_b, m_normal_x, m_normal_y, m_normal_z;
	point() :
		m_x(0), m_y(0), m_z(0), m_r(0), m_g(0), m_b(0), m_normal_x(0), m_normal_y(0), m_normal_z(0) {}
	point(float x, float y, float z, float r, float g, float b) :
		m_x(x), m_y(y), m_z(z), m_r(r), m_g(g), m_b(b), m_normal_x(0), m_normal_y(0), m_normal_z(0) {}
	point(float x, float y, float z, float r, float g, float b, float normal_x, float normal_y, float normal_z) :
		m_x(x), m_y(y), m_z(z), m_r(r), m_g(g), m_b(b), m_normal_x(normal_x), m_normal_y(normal_y), m_normal_z(normal_z) {}
};


int main()
{
	float x, y, z, r, g, b, l;
	std::vector<point> pts;
	std::vector<float> points_x, points_y, points_z;
	int points_num = 0;

	//获取文件中点云信息
	std::ifstream infile("2.txt");
	while (infile >> x >> y >> z >> r >> g >> b)
	{
		point pt(x, y, z, r, g, b);
		pts.push_back(pt);
		points_x.push_back(x);
		points_y.push_back(y);
		points_z.push_back(z);
		points_num++;
	}
	
	//计算点的边界范围
	float x_min = *std::min_element(points_x.begin(), points_x.end());
	float y_min = *std::min_element(points_y.begin(), points_y.end());
	float z_min = *std::min_element(points_z.begin(), points_z.end());
	float x_max = *std::max_element(points_x.begin(), points_x.end());
	float y_max = *std::max_element(points_y.begin(), points_y.end());
	float z_max = *std::max_element(points_z.begin(), points_z.end());

	//定义网格参数
	float stride = 0.5;
	float block_size = 1.0;
	srand((int)time(0));

	//计算网格数量
	int grid_x = ceil((x_max - x_min - block_size) / stride) + 1;
	int grid_y = ceil((y_max - y_min - block_size) / stride) + 1;

	//分块处理点云数据
	std::vector<point> data_room;
	std::vector<int> index_room;
	for (size_t index_y = 0; index_y < grid_y; index_y++)
	{
		for (size_t index_x = 0; index_x < grid_x; index_x++)
		{
			//计算当前块的范围
			float s_x = x_min + index_x * stride;
			float e_x = std::min(s_x + block_size, x_max);
			s_x = e_x - block_size;
			float s_y = y_min + index_y * stride;
			float e_y = std::min(s_y + block_size, y_max);
			s_y = e_y - block_size;

			//选择落在当前块内的点
			std::vector<int> point_idxs;
			for (size_t i = 0; i < points_num; i++)
			{
				if (points_x[i] >= s_x && points_x[i] <= e_x && points_y[i] >= s_y && points_y[i] <= e_y)
					point_idxs.push_back(i);
			}
			if (point_idxs.size() == 0)
				continue;

			//处理点的数量,确保每个批次的点数一致
			int num_batch = ceil(point_idxs.size() * 1.0 / point_num);
			int point_size = num_batch * point_num;
			bool replace = (point_size - point_idxs.size() <= point_idxs.size() ? false : true);

			//如果需要补齐点,使用随机选择或者替换的方式
			std::vector<int> point_idxs_repeat;
			if (replace)
			{
				for (size_t i = 0; i < point_size - point_idxs.size(); i++)
				{
					int id = rand() % point_idxs.size();
					point_idxs_repeat.push_back(point_idxs[id]);
				}
			}
			else
			{
				std::vector<bool> flags(pts.size(), false);
				for (size_t i = 0; i < point_size - point_idxs.size(); i++)
				{
					int id = rand() % point_idxs.size();
					while (true)
					{
						if (flags[id] == false)
						{
							flags[id] = true;
							break;
						}
						id = rand() % point_idxs.size();
					}
					point_idxs_repeat.push_back(point_idxs[id]);
				}
			}
			point_idxs.insert(point_idxs.end(), point_idxs_repeat.begin(), point_idxs_repeat.end());

			//随机打乱点的顺序
			std::random_device rd;
			std::mt19937 g(rd());	// 随机数引擎:基于梅森缠绕器算法的随机数生成器
			std::shuffle(point_idxs.begin(), point_idxs.end(), g);	// 打乱顺序,重新排序(随机序列)

			//将处理后的点数据存入数据容器中
			std::vector<point> data_batch;
			for (size_t i = 0; i < point_idxs.size(); i++)
			{
				data_batch.push_back(pts[point_idxs[i]]);
			}

			//归一化点的位置和颜色信息
			for (size_t i = 0; i < point_size; i++)
			{
				data_batch[i].m_normal_x = data_batch[i].m_x / x_max;
				data_batch[i].m_normal_y = data_batch[i].m_y / y_max;
				data_batch[i].m_normal_z = data_batch[i].m_z / z_max;
				data_batch[i].m_x -= (s_x + block_size / 2.0);
				data_batch[i].m_y -= (s_y + block_size / 2.0);
				data_batch[i].m_r /= 255.0;
				data_batch[i].m_g /= 255.0;
				data_batch[i].m_b /= 255.0;
				data_room.push_back(data_batch[i]);
				index_room.push_back(point_idxs[i]);
			}
		}
	}

	//将数据整理成批次形式
	int n = point_num, m = index_room.size() / n;
	std::vector<std::vector<point>> data_rooms(m, std::vector<point>(n, point()));
	std::vector<std::vector<int>> index_rooms(m, std::vector<int>(n, 0));
	for (size_t i = 0; i < m; i++)
	{
		for (size_t j = 0; j < n; j++)
		{
			data_rooms[i][j] = data_room[i * n + j];
			index_rooms[i][j] = index_room[i * n + j];
		}
	}

	//初始化变量用于投票
	std::vector<std::vector<int>> vote_label_pool(points_num, std::vector<int>(class_num, 0));
	int num_blocks = data_rooms.size();

	//初始化ONNX运行环境和会话
	Ort::Env env(ORT_LOGGING_LEVEL_WARNING, "sem_seg");
	Ort::SessionOptions session_options;
	session_options.SetIntraOpNumThreads(1);
	session_options.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_EXTENDED);

	OrtCUDAProviderOptions cuda_option;
	cuda_option.device_id = 1;
	cuda_option.arena_extend_strategy = 0;
	cuda_option.cudnn_conv_algo_search = OrtCudnnConvAlgoSearchExhaustive;
	cuda_option.gpu_mem_limit = SIZE_MAX;
	cuda_option.do_copy_in_default_stream = 1;
	session_options.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_ALL);

	session_options.AppendExecutionProvider_CUDA(cuda_option);

	//加载模型
	const wchar_t* model_path = L"D:\\source\\repo\\point_pytorch_onnx\\sem_seg.onnx";
	std::wcout << L"Model Path: " << model_path << std::endl;
	Ort::Session session(env, model_path, session_options);
	Ort::AllocatorWithDefaultOptions allocator;

	//获取输入节点和输出节点的名称
	std::vector<const char*>  input_node_names;
	for (size_t i = 0; i < session.GetInputCount(); i++)
	{
		input_node_names.push_back(session.GetInputName(i, allocator));
	}

	std::vector<const char*> output_node_names;
	for (size_t i = 0; i < session.GetOutputCount(); i++)
	{
		output_node_names.push_back(session.GetOutputName(i, allocator));
	}

	//创建输入张量
	const size_t input_tensor_size = 1 * 9 * point_num;
	std::vector<float> input_tensor_values(input_tensor_size);

	//运行模型
	for (int sbatch = 0; sbatch < num_blocks; sbatch++)
	{
		int start_idx = sbatch;
		int end_idx = std::min(sbatch + 1, num_blocks);
		int real_batch_size = end_idx - start_idx;
		std::vector<point> batch_data = data_rooms[start_idx];
		std::vector<int> point_idx = index_rooms[start_idx];
		std::vector<float> batch(point_num * 9);
		for (size_t i = 0; i < point_num; i++)
		{
			batch[9 * i + 0] = batch_data[i].m_x;
			batch[9 * i + 1] = batch_data[i].m_y;
			batch[9 * i + 2] = batch_data[i].m_z;
			batch[9 * i + 3] = batch_data[i].m_r;
			batch[9 * i + 4] = batch_data[i].m_g;
			batch[9 * i + 5] = batch_data[i].m_b;
			batch[9 * i + 6] = batch_data[i].m_normal_x;
			batch[9 * i + 7] = batch_data[i].m_normal_y;
			batch[9 * i + 8] = batch_data[i].m_normal_z;
		}

		for (size_t i = 0; i < 9; i++)
		{
			for (size_t j = 0; j < point_num; j++)
			{
				input_tensor_values[i * point_num + j] = batch[9 * j + i];
			}
		}

		std::vector<int64_t> input_node_dims = { 1, 9, point_num };
		auto memory_info = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);
		Ort::Value input_tensor = Ort::Value::CreateTensor<float>(memory_info, input_tensor_values.data(), input_tensor_size, input_node_dims.data(), input_node_dims.size());

		std::vector<Ort::Value> inputs;
		inputs.push_back(std::move(input_tensor));

		std::vector<Ort::Value> outputs = session.Run(Ort::RunOptions{ nullptr }, input_node_names.data(), inputs.data(), input_node_names.size(), output_node_names.data(), output_node_names.size());

		//获取输出结果并进行投票
		const float* rawOutput = outputs[0].GetTensorData<float>();
		std::vector<int64_t> outputShape = outputs[0].GetTensorTypeAndShapeInfo().GetShape();
		size_t count = outputs[0].GetTensorTypeAndShapeInfo().GetElementCount();
		std::vector<float> pred(rawOutput, rawOutput + count);

		std::vector<std::vector<float>> preds(point_num, std::vector<float>(class_num, 0));

		for (size_t i = 0; i < point_num; i++)
		{
			for (size_t j = 0; j < class_num; j++)
			{
				preds[i][j] = pred[i * class_num + j];
			}
		}

		std::vector<int> pred_label(point_num, 0);
		for (size_t i = 0; i < point_num; i++)
		{
			pred_label[i] = std::max_element(preds[i].begin(), preds[i].end()) - preds[i].begin();
			vote_label_pool[point_idx[i]][pred_label[i]] += 1;
		}
	}
   
	//输出预测结果到文件
	std::ofstream outfile("pred.txt");
	for (size_t i = 0; i < points_num; i++)
	{
		int max_index = std::max_element(vote_label_pool[i].begin(), vote_label_pool[i].end()) - vote_label_pool[i].begin();
		outfile << pts[i].m_x << " " << pts[i].m_y << " " << pts[i].m_z << " " << max_index << std::endl;
	}
	outfile.close();

	return 0;
}

评论 8
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值