环境配置:pytorch1.8.0+openvino2022.1.0
OpenVINO的api使用比libtorch略复杂,但是使用起来大同小异;感觉OpenVINO的接口更偏C风格,libtorch接口更偏C++风格。
2023/04/15更新:新增semantic segmentation部分,统一代码风格等。
2023/05/02更新:新增python onnx推理脚本。
2023/05/15更新:修正part segmentation C++推理代码错误,更新onnx文件下载链接。
2023/07/23更新:增加python openvino推理,删除python onnxruntime推理(后续移入onnxruntime部署文章)。
classification
以分10类,cpu版本为例。
先将pytorch训练出的pth权重文件转为onnx文件:
import torch
import pointnet_cls
point_num = 1024
class_num = 10
normal_channel = False
model = pointnet_cls.get_model(class_num, normal_channel)
#model = model.cuda() #cpu版本需注释此句
model.eval()
checkpoint = torch.load('./cls.pth')
model.load_state_dict(checkpoint['model_state_dict'])
x = (torch.rand(1, 6, point_num) if normal_channel else torch.rand(1, 3, point_num))
#x = x.cuda() #cpu版本需注释此句
export_onnx_file = "./cls.onnx"
torch.onnx.export(model,
x,
export_onnx_file,
opset_version = 11)
将onnx转成openvino IR格式:
from openvino.tools import mo
from openvino.runtime import serialize
if __name__ == "__main__":
onnx_path = f"./cls.onnx"
# fp32 IR model
fp32_path = f"./cls/cls_fp32.xml"
print(f"Export ONNX to OpenVINO FP32 IR to: {fp32_path}")
model = mo.convert_model(onnx_path)
serialize(model, fp32_path)
# fp16 IR model
fp16_path = f"./cls/cls_fp16.xml"
print(f"Export ONNX to OpenVINO FP16 IR to: {fp16_path}")
model = mo.convert_model(onnx_path, compress_to_fp16 = True)
serialize(model, fp16_path)
python推理:(onnx/IR格式)
import numpy as np
from openvino.inference_engine import IECore
point_num = 1024
def pc_normalize(pc):
centroid = np.mean(pc, axis=0)
pc = pc - centroid
m = np.max(np.sqrt(np.sum(pc**2, axis=1)))
pc = pc / m
return pc
if __name__ == '__main__':
data = np.loadtxt('./bed_0610.txt', delimiter=',')
point_set = data[:, 0:3]
point_set = point_set[0:point_num, :]
point_set[:, 0:3] = pc_normalize(point_set[:, 0:3])
points = np.reshape(point_set, ((1, point_num, 3)))
points = points.swapaxes(2, 1)
ie = IECore()
#net = ie.read_network(model="cls.onnx")
net = ie.read_network(model="cls/cls_fp16.xml", weights="cls/cls_fp16.bin")
exec_net = ie.load_network(network=net, device_name="CPU")
input_name = next(iter(net.input_info))
infer_request_handle = exec_net.start_async(request_id=0, inputs={input_name: points})
if infer_request_handle.wait(-1) == 0:
output_layer = infer_request_handle._outputs_list[1]
outputs = infer_request_handle.output_blobs[output_layer]
print(np.argmax(outputs.buffer))
C++推理:(onnx/IR格式)
#include <iostream>
#include <vector>
#include <fstream>
#include <openvino/openvino.hpp>
const int point_num = 1024;
const int class_num = 10;
void pc_normalize(std::vector<float>& points)
{
float mean_x = 0, mean_y = 0, mean_z = 0;
for (size_t i = 0; i < point_num; ++i)
{
mean_x += points[3 * i];
mean_y += points[3 * i + 1];
mean_z += points[3 * i + 2];
}
mean_x /= point_num;
mean_y /= point_num;
mean_z /= point_num;
for (size_t i = 0; i < point_num; ++i)
{
points[3 * i] -= mean_x;
points[3 * i + 1] -= mean_y;
points[3 * i + 2] -= mean_z;
}
float m = 0;
for (size_t i = 0; i < point_num; ++i)
{
if (sqrt(pow(points[3 * i], 2) + pow(points[3 * i + 1], 2) + pow(points[3 * i + 2], 2)) > m)
m = sqrt(pow(points[3 * i], 2) + pow(points[3 * i + 1], 2) + pow(points[3 * i + 2], 2));
}
for (size_t i = 0; i < point_num; ++i)
{
points[3 * i] /= m;
points[3 * i + 1] /= m;
points[3 * i + 2] /= m;
}
}
void classfier(std::vector<float> & points)
{
ov::Core core;
//auto model = core.compile_model("cls.onnx","CPU");
auto model = core.compile_model("./cls/cls_fp16.xml","CPU");
auto iq = model.create_infer_request();
auto input = iq.get_input_tensor(0);
auto output = iq.get_output_tensor(0);
input.set_shape({ 1, 3, point_num });
float* input_data_host = input.data<float>();
for (size_t i = 0; i < 3; i++)
{
for (size_t j = 0; j < point_num; j++)
{
input_data_host[point_num * i + j] = points[3 * j + i];
}
}
iq.infer();
float* pred = output.data<float>();
int predict_label = std::max_element(pred, pred + class_num) - pred;
std::cout << predict_label << std::endl;
}
int main()
{
std::vector<float> points;
float x, y, z, nx, ny, nz;
char ch;
std::ifstream infile("bed_0610.txt");
for (size_t i = 0; i < point_num; i++)
{
infile >> x >> ch >> y >> ch >> z >> ch >> nx >> ch >> ny >> ch >> nz;
points.push_back(x);
points.push_back(y);
points.push_back(z);
}
infile.close();
pc_normalize(points);
classfier(points);
return 0;
}
part segmentation
以分16类50部分,cpu版本为例。
先将pytorch训练出的pth权重文件转为onnx文件:
import torch
import torch
import pointnet_part_seg
point_num = 2048
class_num = 16
part_num = 50
normal_channel = False
def to_categorical(y, class_num):
""" 1-hot encodes a tensor """
new_y = torch.eye(class_num)[y.cpu().data.numpy(),]
if (y.is_cuda):
return new_y.cuda()
return new_y
model = pointnet_part_seg.get_model(part_num, normal_channel)
#model = model.cuda() #cpu版本需注释此句
model.eval()
checkpoint = torch.load('./part_seg.pth')
model.load_state_dict(checkpoint['model_state_dict'])
x = (torch.rand(1, 6, point_num) if normal_channel else torch.rand(1, 3, point_num))
#=x = x.cuda() #cpu版本需注释此句
label = torch.randint(0, 1, (1, 1))
#label = label.cuda() #cpu版本需注释此句
export_onnx_file = "./part_seg.onnx"
torch.onnx.export(model,
(x, to_categorical(label, class_num)),
export_onnx_file,
opset_version = 11)
将onnx转成openvino IR格式:
from openvino.tools import mo
from openvino.runtime import serialize
if __name__ == "__main__":
onnx_path = f"./part_seg.onnx"
# fp32 IR model
fp32_path = f"./part_seg/part_seg_fp32.xml"
print(f"Export ONNX to OpenVINO FP32 IR to: {fp32_path}")
model = mo.convert_model(onnx_path)
serialize(model, fp32_path)
# fp16 IR model
fp16_path = f"./part_seg/part_seg_fp16.xml"
print(f"Export ONNX to OpenVINO FP16 IR to: {fp16_path}")
model = mo.convert_model(onnx_path, compress_to_fp16 = True)
serialize(model, fp16_path)
python推理:(onnx/IR格式)
import numpy as np
from openvino.inference_engine import IECore
point_num = 2048
class_num = 16
def to_categorical(y, class_num):
""" 1-hot encodes a tensor """
new_y = np.eye(class_num)[y,]
return new_y.astype(np.float32)
def pc_normalize(pc):
centroid = np.mean(pc, axis=0)
pc = pc - centroid
m = np.max(np.sqrt(np.sum(pc ** 2, axis=1)))
pc = pc / m
return pc
if __name__ == '__main__':
data = np.loadtxt('85a15c26a6e9921ae008cc4902bfe3cd.txt')
point_set = data[:, 0:3]
point_set[:, 0:3] = pc_normalize(point_set[:, 0:3])
choice = np.random.choice(point_set.shape[0], point_num, replace=True)
point_set = point_set[choice, :][:, 0:3]
pts = point_set
points = np.reshape(point_set, ((1, point_num, 3)))
points = points.swapaxes(2, 1)
label = np.array([[0]], dtype=np.int32)
ie = IECore()
#net = ie.read_network(model="part_seg.onnx")
net = ie.read_network(model="part_seg/part_seg_fp16.xml", weights="part_seg/part_seg_fp16.bin")
exec_net = ie.load_network(network=net, device_name="CPU")
input_names = []
for key in net.input_info:
input_names.append(key)
infer_request_handle=exec_net.start_async(request_id=0, inputs={input_names[0]:to_categorical(label, class_num), input_names[1]:points})
if infer_request_handle.wait(-1) == 0:
output_layer = infer_request_handle._outputs_list[1]
outputs = infer_request_handle.output_blobs[output_layer]
cur_pred_val_logits = outputs.buffer
cur_pred_val = np.zeros((1, point_num)).astype(np.int32)
logits = cur_pred_val_logits[0, :, :]
cur_pred_val[0, :] = np.argmax(logits, 1)
pts = np.append(pts.reshape(point_num, 3), cur_pred_val[0, :].reshape(point_num, 1), 1)
np.savetxt('pred.txt', pts, fmt='%.06f')
C++推理:(onnx/IR格式)
#include <iostream>
#include <vector>
#include <fstream>
#include <ctime>
#include <openvino/openvino.hpp>
const int point_num = 2048;
const int class_num = 16;
const int parts_num = 50;
void pc_normalize(std::vector<float>& points)
{
float mean_x = 0, mean_y = 0, mean_z = 0;
for (size_t i = 0; i < point_num; ++i)
{
mean_x += points[3 * i];
mean_y += points[3 * i + 1];
mean_z += points[3 * i + 2];
}
mean_x /= point_num;
mean_y /= point_num;
mean_z /= point_num;
for (size_t i = 0; i < point_num; ++i)
{
points[3 * i] -= mean_x;
points[3 * i + 1] -= mean_y;
points[3 * i + 2] -= mean_z;
}
float m = 0;
for (size_t i = 0; i < point_num; ++i)
{
if (sqrt(pow(points[3 * i], 2) + pow(points[3 * i + 1], 2) + pow(points[3 * i + 2], 2)) > m)
m = sqrt(pow(points[3 * i], 2) + pow(points[3 * i + 1], 2) + pow(points[3 * i + 2], 2));
}
for (size_t i = 0; i < point_num; ++i)
{
points[3 * i] /= m;
points[3 * i + 1] /= m;
points[3 * i + 2] /= m;
}
}
void resample(std::vector<float> & points)
{
srand((int)time(0));
std::vector<int> choice(point_num);
for (size_t i = 0; i < point_num; i++)
{
choice[i] = rand() % (points.size() / 3);
}
std::vector<float> temp_points(3 * point_num);
for (size_t i = 0; i < point_num; i++)
{
temp_points[3 * i] = points[3 * choice[i]];
temp_points[3 * i + 1] = points[3 * choice[i] + 1];
temp_points[3 * i + 2] = points[3 * choice[i] + 2];
}
points = temp_points;
}
std::vector<int> classfier(std::vector<float> & points, std::vector<float> & labels)
{
std::vector<int> max_index(point_num, 0);
ov::Core core;
//auto model = core.compile_model("part_seg.onnx", "CPU");
auto model = core.compile_model("./part_seg/part_seg_fp16.xml", "CPU");
auto iq = model.create_infer_request();
auto input0 = iq.get_input_tensor(0);
input0.set_shape({ 1, 3, point_num });
float* input_data_host0 = input0.data<float>();
for (size_t i = 0; i < 3; i++)
{
for (size_t j = 0; j < point_num; j++)
{
input_data_host0[i * point_num + j] = points[3 * j + i];
}
}
auto input1 = iq.get_input_tensor(1);
input1.set_shape({ 1, 1, class_num });
float* input_data_host1 = input1.data<float>();
for (size_t i = 0; i < class_num; i++)
{
input_data_host1[i] = labels[i];
}
iq.infer();
auto output = iq.get_output_tensor(0);
float* pred = output.data<float>();
std::vector<std::vector<float>> preds(point_num, std::vector<float>(parts_num, 0));
for (size_t i = 0; i < point_num; i++)
{
for (size_t j = 0; j < parts_num; j++)
{
preds[i][j] = pred[i * parts_num + j];
}
}
for (size_t i = 0; i < point_num; i++)
{
max_index[i] = std::max_element(preds[i].begin(), preds[i].end()) - preds[i].begin();
}
return max_index;
}
int main()
{
std::vector<float> points, labels;
float x, y, z, nx, ny, nz, label;
std::ifstream infile("85a15c26a6e9921ae008cc4902bfe3cd.txt");
while (infile >> x >> y >> z >> nx >> ny >> nz >> label)
{
points.push_back(x);
points.push_back(y);
points.push_back(z);
}
for (size_t i = 0; i < class_num; i++)
{
labels.push_back(0.0);
}
labels[0] = 1.0;
infile.close();
pc_normalize(points);
resample(points);
std::vector<int> result = classfier(points, labels);
std::fstream outfile("pred.txt", 'w');
for (size_t i = 0; i < point_num; i++)
{
outfile << points[3 * i] << " " << points[3 * i + 1] << " " << points[3 * i + 2] << " " << result[i] << std::endl;
}
outfile.close();
return 0;
}
sematic segmentation
以分13类,cpu版本为例。
先将pytorch训练出的pth权重文件转为onnx文件:
import torch
import pointnet_sem_seg
point_num = 4096
class_num = 13
model = pointnet_sem_seg.get_model(class_num)
#model = model.cuda() #cpu版本需注释此句
model.eval()
checkpoint = torch.load('sem_seg.pth')
model.load_state_dict(checkpoint['model_state_dict'])
x = torch.rand(1, 9, point_num)
#x = x.cuda() #cpu版本需注释此句
export_onnx_file = "./sem_seg.onnx"
torch.onnx.export(model,
x,
export_onnx_file,
opset_version = 11)
将onnx转成openvino IR格式:
from openvino.tools import mo
from openvino.runtime import serialize
if __name__ == "__main__":
onnx_path = f"./sem_seg.onnx"
# fp32 IR model
fp32_path = f"./sem_seg/sem_seg_fp32.xml"
print(f"Export ONNX to OpenVINO FP32 IR to: {fp32_path}")
model = mo.convert_model(onnx_path)
serialize(model, fp32_path)
# fp16 IR model
fp16_path = f"./sem_seg/sem_seg_fp16.xml"
print(f"Export ONNX to OpenVINO FP16 IR to: {fp16_path}")
model = mo.convert_model(onnx_path, compress_to_fp16 = True)
serialize(model, fp16_path)
python推理:(onnx/IR格式)
import numpy as np
from openvino.inference_engine import IECore
point_num = 4096
class_num = 13
stride = 0.5
block_size = 1.0
if __name__ == '__main__':
data = np.load('Area_1_conferenceRoom_1.npy')
points = data[:,:6]
coord_min, coord_max = np.amin(points, axis=0)[:3], np.amax(points, axis=0)[:3]
grid_x = int(np.ceil(float(coord_max[0] - coord_min[0] - block_size) / stride) + 1)
grid_y = int(np.ceil(float(coord_max[1] - coord_min[1] - block_size) / stride) + 1)
data_room, index_room = np.array([]), np.array([])
for index_y in range(0, grid_y):
for index_x in range(0, grid_x):
s_x = coord_min[0] + index_x * stride
e_x = min(s_x + block_size, coord_max[0])
s_x = e_x - block_size
s_y = coord_min[1] + index_y * stride
e_y = min(s_y + block_size, coord_max[1])
s_y = e_y - block_size
point_idxs = np.where((points[:, 0] >= s_x) & (points[:, 0] <= e_x) & (points[:, 1] >= s_y) & (points[:, 1] <= e_y))[0]
if point_idxs.size == 0:
continue
num_batch = int(np.ceil(point_idxs.size / point_num))
point_size = int(num_batch * point_num)
replace = False if (point_size - point_idxs.size <= point_idxs.size) else True
point_idxs_repeat = np.random.choice(point_idxs, point_size - point_idxs.size, replace=replace)
point_idxs = np.concatenate((point_idxs, point_idxs_repeat))
np.random.shuffle(point_idxs)
data_batch = points[point_idxs, :]
normlized_xyz = np.zeros((point_size, 3))
normlized_xyz[:, 0] = data_batch[:, 0] / coord_max[0]
normlized_xyz[:, 1] = data_batch[:, 1] / coord_max[1]
normlized_xyz[:, 2] = data_batch[:, 2] / coord_max[2]
data_batch[:, 0] = data_batch[:, 0] - (s_x + block_size / 2.0)
data_batch[:, 1] = data_batch[:, 1] - (s_y + block_size / 2.0)
data_batch[:, 3:6] /= 255.0
data_batch = np.concatenate((data_batch, normlized_xyz), axis=1)
data_room = np.vstack([data_room, data_batch]) if data_room.size else data_batch
index_room = np.hstack([index_room, point_idxs]) if index_room.size else point_idxs
data_room = data_room.reshape((-1, point_num, data_room.shape[1]))
index_room = index_room.reshape((-1, point_num))
ie = IECore()
#net = ie.read_network(model="sem_seg.onnx")
net = ie.read_network(model="sem_seg/sem_seg_fp16.xml", weights="sem_seg/sem_seg_fp16.bin")
exec_net = ie.load_network(network=net, device_name="CPU")
input_name = next(iter(net.input_info))
vote_label_pool = np.zeros((points.shape[0], class_num))
num_blocks = data_room.shape[0]
batch_data = np.zeros((1, point_num, 9))
batch_point_index = np.zeros((1, point_num))
for sbatch in range(num_blocks):
start_idx = sbatch
end_idx = min(sbatch + 1, num_blocks)
real_batch_size = end_idx - start_idx
batch_data[0:real_batch_size, ...] = data_room[start_idx:end_idx, ...]
batch_point_index[0:real_batch_size, ...] = index_room[start_idx:end_idx, ...]
input = batch_data.swapaxes(2, 1).astype(np.float32)
infer_request_handle = exec_net.start_async(request_id=0, inputs={input_name: input})
if infer_request_handle.wait(-1) == 0:
output_layer = infer_request_handle._outputs_list[1]
outputs = infer_request_handle.output_blobs[output_layer]
batch_pred_label = np.argmax(outputs.buffer, 2)
point_idx = batch_point_index[0:real_batch_size, ...]
pred_label = batch_pred_label[0:real_batch_size, ...]
for b in range(pred_label.shape[0]):
for n in range(pred_label.shape[1]):
vote_label_pool[int(point_idx[b, n]), int(pred_label[b, n])] += 1
pred = np.argmax(vote_label_pool, 1)
fout = open('pred.txt', 'w')
for i in range(points.shape[0]):
fout.write('%f %f %f %d\n' % (points[i, 0], points[i, 1], points[i, 2], pred[i]))
fout.close()
C++推理:(onnx/IR格式)
#include <iostream>
#include <fstream>
#include <vector>
#include <algorithm>
#include <ctime>
#include <random>
#include <openvino/openvino.hpp>
const int point_num = 4096;
const int class_num = 13;
struct point
{
float m_x, m_y, m_z, m_r, m_g, m_b, m_normal_x, m_normal_y, m_normal_z;
point() :
m_x(0), m_y(0), m_z(0), m_r(0), m_g(0), m_b(0), m_normal_x(0), m_normal_y(0), m_normal_z(0) {}
point(float x, float y, float z, float r, float g, float b) :
m_x(x), m_y(y), m_z(z), m_r(r), m_g(g), m_b(b), m_normal_x(0), m_normal_y(0), m_normal_z(0) {}
point(float x, float y, float z, float r, float g, float b, float normal_x, float normal_y, float normal_z) :
m_x(x), m_y(y), m_z(z), m_r(r), m_g(g), m_b(b), m_normal_x(normal_x), m_normal_y(normal_y), m_normal_z(normal_z) {}
};
int main()
{
float x, y, z, r, g, b, l;
std::vector<point> pts;
std::vector<float> points_x, points_y, points_z;
int points_num = 0;
std::ifstream infile("Area_1_conferenceRoom_1.txt");
while (infile >> x >> y >> z >> r >> g >> b >> l)
{
point pt(x, y, z, r, g, b);
pts.push_back(pt);
points_x.push_back(x);
points_y.push_back(y);
points_z.push_back(z);
points_num++;
}
float x_min = *std::min_element(points_x.begin(), points_x.end());
float y_min = *std::min_element(points_y.begin(), points_y.end());
float z_min = *std::min_element(points_z.begin(), points_z.end());
float x_max = *std::max_element(points_x.begin(), points_x.end());
float y_max = *std::max_element(points_y.begin(), points_y.end());
float z_max = *std::max_element(points_z.begin(), points_z.end());
float stride = 0.5;
float block_size = 1.0;
srand((int)time(0));
int grid_x = ceil((x_max - x_min - block_size) / stride) + 1;
int grid_y = ceil((y_max - y_min - block_size) / stride) + 1;
std::vector<point> data_room;
std::vector<int> index_room;
for (size_t index_y = 0; index_y < grid_y; index_y++)
{
for (size_t index_x = 0; index_x < grid_x; index_x++)
{
float s_x = x_min + index_x * stride;
float e_x = std::min(s_x + block_size, x_max);
s_x = e_x - block_size;
float s_y = y_min + index_y * stride;
float e_y = std::min(s_y + block_size, y_max);
s_y = e_y - block_size;
std::vector<int> point_idxs;
for (size_t i = 0; i < points_num; i++)
{
if (points_x[i] >= s_x && points_x[i] <= e_x && points_y[i] >= s_y && points_y[i] <= e_y)
point_idxs.push_back(i);
}
if (point_idxs.size() == 0)
continue;
int num_batch = ceil(point_idxs.size() * 1.0 / point_num);
int point_size = num_batch * point_num;
bool replace = (point_size - point_idxs.size() <= point_idxs.size() ? false : true);
std::vector<int> point_idxs_repeat;
if (replace)
{
for (size_t i = 0; i < point_size - point_idxs.size(); i++)
{
int id = rand() % point_idxs.size();
point_idxs_repeat.push_back(point_idxs[id]);
}
}
else
{
std::vector<bool> flags(pts.size(), false);
for (size_t i = 0; i < point_size - point_idxs.size(); i++)
{
int id = rand() % point_idxs.size();
while (true)
{
if (flags[id] == false)
{
flags[id] = true;
break;
}
id = rand() % point_idxs.size();
}
point_idxs_repeat.push_back(point_idxs[id]);
}
}
point_idxs.insert(point_idxs.end(), point_idxs_repeat.begin(), point_idxs_repeat.end());
std::random_device rd;
std::mt19937 g(rd()); // 随机数引擎:基于梅森缠绕器算法的随机数生成器
std::shuffle(point_idxs.begin(), point_idxs.end(), g); // 打乱顺序,重新排序(随机序列)
std::vector<point> data_batch;
for (size_t i = 0; i < point_idxs.size(); i++)
{
data_batch.push_back(pts[point_idxs[i]]);
}
for (size_t i = 0; i < point_size; i++)
{
data_batch[i].m_normal_x = data_batch[i].m_x / x_max;
data_batch[i].m_normal_y = data_batch[i].m_y / y_max;
data_batch[i].m_normal_z = data_batch[i].m_z / z_max;
data_batch[i].m_x -= (s_x + block_size / 2.0);
data_batch[i].m_y -= (s_y + block_size / 2.0);
data_batch[i].m_r /= 255.0;
data_batch[i].m_g /= 255.0;
data_batch[i].m_b /= 255.0;
data_room.push_back(data_batch[i]);
index_room.push_back(point_idxs[i]);
}
}
}
int n = point_num, m = index_room.size() / n;
std::vector<std::vector<point>> data_rooms(m, std::vector<point>(n, point()));
std::vector<std::vector<int>> index_rooms(m, std::vector<int>(n, 0));
for (size_t i = 0; i < m; i++)
{
for (size_t j = 0; j < n; j++)
{
data_rooms[i][j] = data_room[i * n + j];
index_rooms[i][j] = index_room[i * n + j];
}
}
std::vector<std::vector<int>> vote_label_pool(points_num, std::vector<int>(class_num, 0));
int num_blocks = data_rooms.size();
clock_t start = clock();
ov::Core core;
//auto model = core.compile_model("./sem_seg.onnx", "CPU");
auto model = core.compile_model("./sem_seg/sem_seg_fp16.xml", "CPU");
auto iq = model.create_infer_request();
auto input = iq.get_input_tensor(0);
auto output = iq.get_output_tensor(0);
input.set_shape({ 1, 9, point_num });
float* input_data_host = input.data<float>();
for (int sbatch = 0; sbatch < num_blocks; sbatch++)
{
int start_idx = sbatch;
int end_idx = std::min(sbatch + 1, num_blocks);
int real_batch_size = end_idx - start_idx;
std::vector<point> batch_data = data_rooms[start_idx];
std::vector<int> point_idx = index_rooms[start_idx];
std::vector<float> batch(point_num * 9);
for (size_t i = 0; i < point_num; i++)
{
batch[9 * i + 0] = batch_data[i].m_x;
batch[9 * i + 1] = batch_data[i].m_y;
batch[9 * i + 2] = batch_data[i].m_z;
batch[9 * i + 3] = batch_data[i].m_r;
batch[9 * i + 4] = batch_data[i].m_g;
batch[9 * i + 5] = batch_data[i].m_b;
batch[9 * i + 6] = batch_data[i].m_normal_x;
batch[9 * i + 7] = batch_data[i].m_normal_y;
batch[9 * i + 8] = batch_data[i].m_normal_z;
}
for (size_t i = 0; i < 9; i++)
{
for (size_t j = 0; j < point_num; j++)
{
input_data_host[i * point_num + j] = batch[9 * j + i];
}
}
iq.infer();
float* pred = output.data<float>();
std::vector<std::vector<float>> preds(point_num, std::vector<float>(class_num, 0));
for (size_t i = 0; i < point_num; i++)
{
for (size_t j = 0; j < class_num; j++)
{
preds[i][j] = pred[i * class_num + j];
}
}
std::vector<int> pred_label(point_num, 0);
for (size_t i = 0; i < point_num; i++)
{
pred_label[i] = std::max_element(preds[i].begin(), preds[i].end()) - preds[i].begin();
vote_label_pool[point_idx[i]][pred_label[i]] += 1;
}
}
std::ofstream outfile("pred.txt");
for (size_t i = 0; i < points_num; i++)
{
int max_index = std::max_element(vote_label_pool[i].begin(), vote_label_pool[i].end()) - vote_label_pool[i].begin();
outfile << pts[i].m_x << " " << pts[i].m_y << " " << pts[i].m_z << " " << max_index << std::endl;
}
outfile.close();
return 0;
}
注意,由于C++无法直接读取npy格式文件(可以依赖一些库),这里先使用python脚本将npy文件转换成txt文件。
import numpy as np
npy = np.load("Area_1_conferenceRoom_1.npy")
np.savetxt('Area_1_conferenceRoom_1.txt', npy, fmt='%0.06f')
参考:openvino的入门–c++部署的图像分类为例
模型下载地址:pointnet模型权重