最近研究瑞星微的rv1106的yolov5检测算法,但是官方给的例子真是让人欲哭无泪,首先读取时局用的是stb的图像库,前处理加上resize需要250ms,后处理只能输出nc1hwc2,里面的举证转换需要250ms,后续自己做了很多工作,发这个只是为了记录代码。
// Copyright (c) 2021 by Rockchip Electronics Co., Ltd. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*-------------------------------------------
Includes
-------------------------------------------*/
#include "rknn_api.h"
#include <float.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <vector>
#include "fstream"
#include "iostream"
#define STB_IMAGE_IMPLEMENTATION
#include "stb/stb_image.h"
#define STB_IMAGE_RESIZE_IMPLEMENTATION
#include <stb/stb_image_resize.h>
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "postprocess.h"
#define PERF_WITH_POST 1
/*-------------------------------------------
Functions
-------------------------------------------*/
static inline int64_t getCurrentTimeUs()
{
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000000 + tv.tv_usec;
}
static void dump_tensor_attr(rknn_tensor_attr *attr)
{
char dims[128] = {
0};
for (int i = 0; i < attr->n_dims; ++i)
{
int idx = strlen(dims);
sprintf(&dims[idx], "%d%s", attr->dims[i], (i == attr->n_dims - 1) ? "" : ", ");
}
printf(" index=%d, name=%s, n_dims=%d, dims=[%s], n_elems=%d, size=%d, fmt=%s, type=%s, qnt_type=%s, "
"zp=%d, scale=%f\n",
attr->index, attr->name, attr->n_dims, dims, attr->n_elems, attr->size, get_format_string(attr->fmt),
get_type_string(attr->type), get_qnt_type_string(attr->qnt_type), attr->zp, attr->scale);
}
static void *load_file(const char *file_path, size_t *file_size)
{
FILE *fp = fopen(file_path, "r");
if (fp == NULL)
{
printf("failed to open file: %s\n", file_path);
return NULL;
}
fseek(fp, 0, SEEK_END);
size_t size = (size_t)ftell(fp);
fseek(fp, 0, SEEK_SET);
void *file_data = malloc(size);
if (file_data == NULL)
{
fclose(fp);
printf("failed allocate file size: %zu\n", size);
return NULL;
}
if (fread(file_data, 1, size, fp) != size)
{
fclose(fp);
free(file_data);
printf("failed to read file data!\n");
return NULL;
}
fclose(fp);
*file_size = size;
return file_data;
}
static unsigned char *load_image(const char *image_path, rknn_tensor_attr *input_attr, int *img_height, int *img_width)
{
int req_height = 0;
int req_width = 0;
int req_channel = 0;
switch (input_attr->fmt)
{
case RKNN_TENSOR_NHWC:
req_height = input_attr->dims[1];
req_width = input_attr->dims[2];
req_channel = input_attr->dims[3];
break;
case RKNN_TENSOR_NCHW:
req_height = input_attr->dims[2];
req_width = input_attr->dims[3];
req_channel = input_attr->dims[1];
break;
default:
printf("meet unsupported layout\n");
return NULL;
}
int channel = 0;
unsigned char *image_data = stbi_load(image_path, img_width, img_height, &channel, req_channel);
if (image_data == NULL)
{
printf("load image failed!\n");
return NULL;
}
if (*img_width != req_width || *img_height != req_height)
{
unsigned char *image_resized = (unsigned char *)STBI_MALLOC(req_width * req_height * req_channel);
if (!image_resized)
{
printf("malloc image failed!\n");
STBI_FREE(image_data);
return NULL;
}
int64_t start_us = getCurrentTimeUs();
if (stbir_resize_uint8(image_data, *img_width, *img_height, 0, image_resized, req_width, req_height, 0, channel) != 1)
{
printf("resize image failed!\n");
STBI_FREE(image_data);
return NULL;
}
int64_t elapse_us = getCurrentTimeUs() - start_us;
printf(" resize Time = %.2fms,\n", elapse_us / 1000.f);
STBI_FREE(image_data);
image_data = image_resized;
}
return image_data;
}
// 量化模型的npu输出结果为int8数据类型,后处理要按照int8数据类型处理
// 如下提供了int8排布的NC1HWC2转换成int8的nchw转换代码
int NC1HWC2_int8_to_NCHW_int8(const int8_t *src, int8_t *dst, int *dims, int channel, int h, int w)
{
int batch = dims[0];
int C1 = dims[1];
int C2 = dims[4];
int hw_src = dims[2] * dims[3];
std::cout <<batch <<" " <<C1 <<" " <<C2 <<" " <<hw_src <<" " <<std::endl;
int hw_dst = h * w;
for (int i = 0; i < batch; i++)
{
src = src + i * C1 * hw_src * C2;
dst = dst + i * channel * hw_dst;
for (int c = 0; c < channel; ++c)
{
int plane = c / C2;
const int8_t *src_c = plane * hw_src * C2 + src;
int offset = c % C2;
for (int cur_h = 0; cur_h < h; ++cur_h)
for (int cur_w = 0; cur_w < w; ++cur_w)
{
int cur_hw = cur_h * w + cur_w;
dst[c * hw_dst + cur_h * w + cur_w] = src_c[C2 * cur_hw + offset];
}
}
}
return 0;
}
// 量化模型的npu输出结果为int8数据类型,后处理要按照int8数据类型处理
// 如下提供了int8排布的NC1HWC2转换成float的nchw转换代码
int NC1HWC2_int8_to_NCHW_float(const int8_t *src, float *dst, int *dims, int channel, int h, int w, int zp, float scale)
{
int batch = dims[0];
int C1 = dims[1];
int C2 = dims[4];
int hw_src = dims[2] * dims[3];
int hw_dst = h * w;
for (int i = 0; i < batch; i++)
{
src = src + i * C1 * hw_src * C2;
dst = dst + i * channel * hw_dst;
for (int c = 0; c < channel; ++c)
{
int plane = c / C2;
const int8_t *src_c = plane * hw_src * C2 + src;
int offset = c % C2;
for (int cur_h = 0; cur_h < h; ++cur_h)
for (int cur_w = 0; cur_w < w; ++cur_w)
{
int cur_hw = cur_h * w + cur_w;
dst[c * hw_dst + cur_h * w + cur_w] = (src_c[C2 * cur_hw + offset] - zp) * scale; // int8-->float
}
}
}
return 0;
}
/*-------------------------------------------
Main Functions
-------------------------------------------*/
int main(int argc, char *argv[])
{
if (argc < 3)
{
printf("Usage:%s model_path input_path [loop_count]\n", argv[0]);
return -1;
}
char *model_path = argv[1];
char *input_path = argv[2];
int loop_count = 10;
if (argc > 3)
{
loop_count = atoi(argv[3]);
}
const float nms_threshold = NMS_THRESH;
const float box_conf_threshold = BOX_THRESH;
int img_width = 0;
int img_height = 0;
rknn_context ctx = 0;
// Load RKNN Model
#if 1
// Init rknn from model path
int ret = rknn_init(&ctx, model_path, 0, 0, NULL);
#else
// Init rknn from model data