1. 前言
之前就想着使用rknn的c版本的api做推理看看,找了一个简单的,那就unet吧,本来想着用rk的demo文件,但是里面是mobilenet,相关的函数没有,卡这也卡了好久,突然发现tengine相关的后处理,拿来用用,终于调试好了!!!(环境自己配置)
2. c代码修改
2.1前处理
const char* img_path = "/home/ubuntu/npu_test/unet/img/01_test.tif";
const char* roi_mask_path = "/home/ubuntu/npu_test/unet/img/01_test_mask.png";
const char *model_path = "/home/ubuntu/npu_test/unet/model/eyes_unet-sim-3588.rknn";
// Load ROI mask
Mat roi_img = imread(roi_mask_path, IMREAD_GRAYSCALE);
if (roi_img.empty()) {
cout << "Image not found: " << roi_mask_path << endl;
return -1;
}
// Load image
Mat original_img = imread(img_path);
if (original_img.empty()) {
cout << "Image not found: " << img_path << endl;
return -1;
}
// Convert image to RGB
cvtColor(original_img, original_img, COLOR_BGR2RGB);
// Expand batch dimension
// Mat img = original_img.reshape(1, 1);
Mat img = original_img;
2.2 rknn的模型加载
const int MODEL_IN_WIDTH = 565;
const int MODEL_IN_HEIGHT = 584;
const int MODEL_IN_CHANNELS = 3;
rknn_context ctx = 0;
int ret;
int model_len = 0;
unsigned char *model;
// ======================= 初始化RKNN模型 ===================
model = load_model(model_path, &model_len);
ret = rknn_init(&ctx, model, model_len, 0, NULL);
if (ret < 0)
{
printf("rknn_init fail! ret=%d\n", ret);
return -1;
}
// ======================= 获取模型输入输出信息 ===================
rknn_input_output_num io_num;
ret = rknn_query(ctx, RKNN_QUERY_IN_OUT_NUM, &io_num, sizeof(io_num));
if (ret != RKNN_SUCC)
{
printf("rknn_query fail! ret=%d\n", ret);
return -1;
}
// ======================= 设置模型输入 ===================
// 使用rknn_input结构体存储模型输入信息, 表示模型的一个数据输入,用来作为参数传入给 rknn_inputs_set 函数
rknn_input inputs[1];
memset(inputs, 0, sizeof(inputs));
inputs[0].index = 0; // 设置模型输入索引
inputs[0].type = RKNN_TENSOR_UINT8; // 设置模型输入类型
inputs[0].size = img.cols * img.rows * img.channels() * sizeof(uint8_t); // 设置模型输入大小
inputs[0].fmt = RKNN_TENSOR_NHWC; // 设置模型输入格式:NHWC
inputs[0].buf = img.data; // 设置模型输入数据
// 使用rknn_inputs_set函数设置模型输入
ret = rknn_inputs_set(ctx, io_num.n_input, inputs);
if (ret < 0)
{
printf("rknn_input_set fail! ret=%d\n", ret);
return -1;
}
// ======================= 推理 ===================
printf("rknn_run\n");
ret = rknn_run(ctx, nullptr);
if (ret < 0)
{
printf("rknn_run fail! ret=%d\n", ret);
return -1;
}
// ======================= 获取模型输出 ===================
// 使用rknn_output结构体存储模型输出信息
rknn_output outputs[1];
memset(outputs, 0, sizeof(outputs));
outputs[0].want_float = 1;
// 使用rknn_outputs_get函数获取模型输出
ret = rknn_outputs_get(ctx, 1, outputs, NULL);
if (ret < 0)
{
printf("rknn_outputs_get fail! ret=%d\n", ret);
return -1;
}
2.3 后处理
float *output_data = (float *)outputs[0].buf;
Mat changge_result_img = Mat::zeros(roi_img.rows, roi_img.cols, CV_8UC1);
int index = 0;
for (int row = 0; row < roi_img.rows; row++) {
for (int col = 0; col < roi_img.cols; col++) {
float channel_1 = *(output_data + index);
float channel_2 = *(output_data + index + roi_img.rows * roi_img.cols);
uchar roi_pixel = roi_img.at<uchar>(row, col);
if (channel_1 < channel_2 && roi_pixel != 0) {
changge_result_img.at<uchar>(row, col) = 255;
} else {
changge_result_img.at<uchar>(row, col) = 0;
}
index++;
}
}
2.4 rknn资源释放
// Release resources
rknn_outputs_release(ctx, 1, outputs);
if (ret < 0)
{
printf("rknn_outputs_release fail! ret=%d\n", ret);
return -1;
}
else if (ctx > 0)
{
// ======================= 释放RKNN模型 ===================
rknn_destroy(ctx);
}
// ======================= 释放模型数据 ===================
if (model)
{
free(model);
}
2.5 完整代码
#include <iostream>
#include <opencv2/core/hal/interface.h>
#include <opencv2/opencv.hpp>
#include <opencv2/imgproc.hpp>
#include "rknn_api.h"
#include <chrono>
using namespace std;
using namespace cv;
static unsigned char *load_model(const char *filename, int *model_size)
{
FILE *fp = fopen(filename, "rb");
if (fp == nullptr)
{
printf("fopen %s fail!\n", filename);
return NULL;
}
fseek(fp, 0, SEEK_END);
int model_len = ftell(fp);
unsigned char *model = (unsigned char *)malloc(model_len); // 申请模型大小的内存,返回指针
fseek(fp, 0, SEEK_SET);
if (model_len != fread(model, 1, model_len, fp))
{
printf("fread %s fail!\n", filename);
free(model);
return NULL;
}
*model_size = model_len;
if (fp)
{
fclose(fp);
}
return model;
}
int main() {
auto start = std::chrono::high_resolution_clock::now();
const char* img_path = "/home/ubuntu/npu_test/unet/img/01_test.tif";
const char* roi_mask_path = "/home/ubuntu/npu_test/unet/img/01_test_mask.png";
const char *model_path = "/home/ubuntu/npu_test/unet/model/eyes_unet-sim-3588.rknn";
// Load ROI mask
Mat roi_img = imread(roi_mask_path, IMREAD_GRAYSCALE);
if (roi_img.empty()) {
cout << "Image not found: " << roi_mask_path << endl;
return -1;
}
// Load image
Mat original_img = imread(img_path);
if (original_img.empty()) {
cout << "Image not found: " << img_path << endl;
return -1;
}
// Convert image to RGB
cvtColor(original_img, original_img, COLOR_BGR2RGB);
// Expand batch dimension
Mat img = original_img;
rknn_context ctx = 0;
int ret;
int model_len = 0;
unsigned char *model;
// ======================= 初始化RKNN模型 ===================
model = load_model(model_path, &model_len);
ret = rknn_init(&ctx, model, model_len, 0, NULL);
if (ret < 0)
{
printf("rknn_init fail! ret=%d\n", ret);
return -1;
}
// ======================= 获取模型输入输出信息 ===================
rknn_input_output_num io_num;
ret = rknn_query(ctx, RKNN_QUERY_IN_OUT_NUM, &io_num, sizeof(io_num));
if (ret != RKNN_SUCC)
{
printf("rknn_query fail! ret=%d\n", ret);
return -1;
}
// ======================= 设置模型输入 ===================
// 使用rknn_input结构体存储模型输入信息, 表示模型的一个数据输入,用来作为参数传入给 rknn_inputs_set 函数
rknn_input inputs[1];
memset(inputs, 0, sizeof(inputs));
inputs[0].index = 0; // 设置模型输入索引
inputs[0].type = RKNN_TENSOR_UINT8; // 设置模型输入类型
inputs[0].size = img.cols * img.rows * img.channels() * sizeof(uint8_t); // 设置模型输入大小
inputs[0].fmt = RKNN_TENSOR_NHWC; // 设置模型输入格式:NHWC
inputs[0].buf = img.data; // 设置模型输入数据
// 使用rknn_inputs_set函数设置模型输入
ret = rknn_inputs_set(ctx, io_num.n_input, inputs);
if (ret < 0)
{
printf("rknn_input_set fail! ret=%d\n", ret);
return -1;
}
// ======================= 推理 ===================
printf("rknn_run\n");
ret = rknn_run(ctx, nullptr);
if (ret < 0)
{
printf("rknn_run fail! ret=%d\n", ret);
return -1;
}
// ======================= 获取模型输出 ===================
// 使用rknn_output结构体存储模型输出信息
rknn_output outputs[1];
memset(outputs, 0, sizeof(outputs));
outputs[0].want_float = 1;
// 使用rknn_outputs_get函数获取模型输出
ret = rknn_outputs_get(ctx, 1, outputs, NULL);
if (ret < 0)
{
printf("rknn_outputs_get fail! ret=%d\n", ret);
return -1;
}
float *output_data = (float *)outputs[0].buf;
Mat changge_result_img = Mat::zeros(roi_img.rows, roi_img.cols, CV_8UC1);
int index = 0;
for (int row = 0; row < roi_img.rows; row++) {
for (int col = 0; col < roi_img.cols; col++) {
float channel_1 = *(output_data + index);
float channel_2 = *(output_data + index + roi_img.rows * roi_img.cols);
uchar roi_pixel = roi_img.at<uchar>(row, col);
if (channel_1 < channel_2 && roi_pixel != 0) {
changge_result_img.at<uchar>(row, col) = 255;
} else {
changge_result_img.at<uchar>(row, col) = 0;
}
index++;
}
}
// Release resources
rknn_outputs_release(ctx, 1, outputs);
if (ret < 0)
{
printf("rknn_outputs_release fail! ret=%d\n", ret);
return -1;
}
else if (ctx > 0)
{
// ======================= 释放RKNN模型 ===================
rknn_destroy(ctx);
}
// ======================= 释放模型数据 ===================
if (model)
{
free(model);
}
auto end = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start);
std::cout << "Execution time: " << duration.count() << " milliseconds" << std::endl;
return 0;
}
感觉代码还是不优美,很多的for循环看着难受,但是已经实现了,后续再修改吧(后处理修改了)
3. 结果展示
![]() | ![]() |
| C++的测试结果 time: 497 ms | python的测试结果 time:660ms |
文章描述了一位开发者使用rknnC++API加载Unet模型,对图像进行预处理、推理以及后处理的过程,包括加载模型、设置输入、推理输出和结果转换。最后展示了C++版本的执行时间和与Python版本的对比。


922

被折叠的 条评论
为什么被折叠?



