MNN实践[C++版本]

1. Yolov5-lite

1.1 安装依赖库

  • opencv
  • protobuf
  • cmake

1.2 编译libMNN.so动态库

$ git clone https://github.com/alibaba/MNN.git
$ cd MNN
$ mkdir bulid && cd build
$ sudo cmake ..
$ sudo make

1.3 编译当前项目

$ cd mnn_demo
$ mkdir bulid && cd build
$ sudo cmake ..
$ sudo make

1.4 下载MNN格式模型文件

$ mkdir model_zoo && cd model_zoo
# wget v5lite-s.mnn or v5lite-s-int4.mnn into model_zoo

v5lite-s.mnn: https://drive.google.com/file/d/10dBsY0T19Kyz2sZ4ebfpsb6dnG58pmYq/view?usp=sharing
v5lite-s-int4.mnn: https://drive.google.com/file/d/1v90z5sWx6rTnrF9jejugZup2YuIuXObR/view?usp=sharing

1.5 运行测试

$./yolov5
# 默认输入图像名字1860.jpg, 保存图像名字output.jpg
# 默认的检测类别总数80, 类别名字如下所示:
# "person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light",
# "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow",
# "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee",
# "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard",
# "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple",
# "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch",
# "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone",
# "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear",
# "hair drier", "toothbrush"

output

2. 基于YoloV5的透字检测模型实践

2.1 安装依赖库

  • opencv
  • protobuf
  • cmake

2.2 编译libMNN.so动态库

$ git clone https://github.com/alibaba/MNN.git
$ cd MNN
$ mkdir bulid && cd build
$ sudo cmake ..
$ sudo make
$ cp libMNN.so to/your/project/path
$ cd ../include
$ cp -R MNN to/your/project/include/folder

例如: 我的项目路径在 /home/tianzx/ai_model/test_ai_interface/mnn/demo/yolov5_cpp

$ tree . --filelimit=10 --dirsfirst
.
├── include
│   ├── MNN [13 entries exceeds filelimit, not opening dir]
│   ├── util.h
│   └── Yolo.h
├── model_zoo [11 entries exceeds filelimit, not opening dir]
├── src
│   ├── main.cpp
│   ├── util.cpp
│   └── Yolo.cpp
├── CMakeLists.txt
├── libMNN.so
├── test_detect.jpg
├── test.jpg
├── README.md
└── yolov5

4 directories, 11 files

2.3 模型文件转换

将yolov5l模型文件perspec_yolov5l_v0.0.1.pt转换perspec_yolov5l_v0.0.1.mnn

2.3.1 pt转onnx
  • perspec_yolov5l_v0.0.1.pt 通过 https://github.com/ultralytics/yolov5 训练得到.
  • pip3 install onnx-simplifier
  • python export.py --weights runs/train/perspec_bestweights/perspec_yolov5l_v0.0.1.pt --include onnx --simplify --train
2.3.2 onnx转mnn
$ cd MNN/build
$ ./MNNConvert -f ONNX --modelFile path/to/perspec_yolov5l_v0.0.1.onnx --MNNModel path/to/perspec_yolov5l_v0.0.1.mnn --bizCode biz
2.3.3 移动mnn文件

$ mv path/to/perspec_yolov5l_v0.0.1.mnn /home/tianzx/ai_model/test_ai_interface/mnn/demo/yolov5_cpp/model_zoo/

2.4 修改main.cpp文件

  • draw_box函数
    • 修改检测类别变量定义int CNUM=1, 检测类别名字变量定义static const char* class_names[] = { “perspective” };
  • main函数
    • 新增模型路径参数model_name, 输入图像参数image_name, 保存图像路径参数save_image_name
    • int num_classes = 1; int net_size = 640; int INPUT_SIZE = 640;
    • float threshold = 0.25; float nms_threshold = 0.45;
    • std::vector yolov5s_layers{

{“619”, 32, {{116, 90}, {156, 198}, {373, 326}}},
{“599”, 16, {{30, 61}, {62, 45}, {59, 119}}},
{“output”, 8, {{10, 13}, {16, 30}, {33, 23}}},
};
重点: yolov5s_layers 变量的定义和yolov5l的模型结构yolov5l.yaml 有关. https://github.com/ultralytics/yolov5/blob/master/models/yolov5l.yaml

yolov5l.yaml

https://netron.app/ 中打开 perspec_yolov5l_v0.0.1.mnn 文件, 同时点击键盘上 Ctrl+F, 打开右侧的 FIND 侧边, 点击鼠标拖动滚动条一直到最后, 可以看到下图所示:
在这里插入图片描述
分别点击带向右箭头的599, 619, output, 并对比yolov5l.yaml中的head 和 anchors, 可以得出如下表格所示内容:

输出节点对应卷积权重的大小yolov5l.yaml的headyolov5l.yaml的anchors
61918x1024x1x1[-1, 3, C3, [1024, False]], # 23 (P5/32-large)[116,90, 156,198, 373,326] # P5/32
59918x512x1x1[-1, 3, C3, [512, False]], # 20 (P4/16-medium)[30,61, 62,45, 59,119] # P4/16
output18x256x1x1[-1, 3, C3, [256, False]], # 17 (P3/8-small)[10,13, 16,30, 33,23] # P3/8
#include <iostream>
#include <string>
#include<ctime>

#include <MNN/MNNDefine.h>
#include <MNN/MNNForwardType.h>
#include <MNN/Interpreter.hpp>
#include <opencv2/opencv.hpp>

#include "Yolo.h"

void show_shape(std::vector<int> shape)
{
    std::cout<<shape[0]<<" "<<shape[1]<<" "<<shape[2]<<" "<<shape[3]<<" "<<shape[4]<<" "<<std::endl;

}

void scale_coords(std::vector<BoxInfo> &boxes, int w_from, int h_from, int w_to, int h_to)
{
    float w_ratio = float(w_to)/float(w_from);
    float h_ratio = float(h_to)/float(h_from);


    for(auto &box: boxes)
        {
            box.x1 *= w_ratio;
            box.x2 *= w_ratio;
            box.y1 *= h_ratio;
            box.y2 *= h_ratio;
        }
    return ;
}

cv::Mat draw_box(cv::Mat & cv_mat, std::vector<BoxInfo> &boxes)
{
    int CNUM = 1;
    static const char* class_names[] = {
    "perspective"
};
    cv::RNG rng(0xFFFFFFFF);
    cv::Scalar_<int> randColor[CNUM];
    for (int i = 0; i < CNUM; i++)
        rng.fill(randColor[i], cv::RNG::UNIFORM, 0, 256);

    for(auto box : boxes)
        {
            int width = box.x2-box.x1;
            int height = box.y2-box.y1;
            int id = box.id;
            char text[256];
            cv::Point p = cv::Point(box.x1, box.y1-5);
            cv::Rect rect = cv::Rect(box.x1, box.y1, width, height);
            cv::rectangle(cv_mat, rect, cv::Scalar(0, 0, 255));
            sprintf(text, "%s %.1f%%", class_names[box.label], box.score * 100);
            cv::putText(cv_mat, text, p, cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 0, 255));
        }
    return cv_mat;
}


void help() {
    std::cout << "Usage:\n";
    std::cout << "./yolov5 <path of mnn file> <path of input image> <path of output image>\n";
    std::cout << "./yolov5 model_zoo/perspec_yolov5l_v0.0.1.mnn M9255760001311204045_-2_crop.jpg M9255760001311204045_-2_crop_detect.jpg";
}

// https://github.com/ppogg/YOLOv5-Lite/tree/master/cpp_demo/mnn
int main(int argc, char* argv[])
{
if (argc != 4)
{
    help();
    std::cout << "";
    return -1;
}

// Function type
std::string model_name = argv[1];
std::string image_name = argv[2];
std::string save_image_name = argv[3];

// [IMPORTANT] 
int num_classes = 1;
int net_size    = 640;
std::vector<YoloLayerData> yolov5s_layers{
    {"619",    32, {{116, 90}, {156, 198}, {373, 326}}},
    {"599",    16, {{30,  61}, {62,  45},  {59,  119}}},
    {"output", 8,  {{10,  13}, {16,  30},  {33,  23}}},
    };

std::vector<YoloLayerData> & layers = yolov5s_layers;

std::shared_ptr<MNN::Interpreter> net = std::shared_ptr<MNN::Interpreter>(MNN::Interpreter::createFromFile(model_name.c_str()));
if (nullptr == net) {
return 0;
}

MNN::ScheduleConfig config;
config.numThread = 1;
config.type      = static_cast<MNNForwardType>(MNN_FORWARD_CPU);
MNN::BackendConfig backendConfig;
backendConfig.precision = (MNN::BackendConfig::PrecisionMode)2;
// backendConfig.precision =  MNN::PrecisionMode Precision_Normal; // static_cast<PrecisionMode>(Precision_Normal);
config.backendConfig = &backendConfig;
MNN::Session *session = net->createSession(config);


// [IMPORTANT] load image and preprocessing
int INPUT_SIZE = 640;
cv::Mat raw_image      = cv::imread(image_name.c_str());
cv::Mat image;
cv::resize(raw_image, image, cv::Size(INPUT_SIZE, INPUT_SIZE));
image.convertTo(image, CV_32FC3);
image = image /255.0f;

// wrapping input tensor, convert nhwc to nchw    
std::vector<int> dims{1, INPUT_SIZE, INPUT_SIZE, 3};
auto nhwc_Tensor = MNN::Tensor::create<float>(dims, NULL, MNN::Tensor::TENSORFLOW);
auto nhwc_data   = nhwc_Tensor->host<float>();
auto nhwc_size   = nhwc_Tensor->size();
std::memcpy(nhwc_data, image.data, nhwc_size);

auto inputTensor = net->getSessionInput(session, nullptr);
inputTensor->copyFromHostTensor(nhwc_Tensor);

// run network
clock_t startTime,endTime;
startTime = clock();//计时开始
net->runSession(session);
endTime = clock();//计时结束
cout << "The forward time is: " <<(double)(endTime - startTime) / 1000.0 << "ms" << endl;

// get output data
std::string output_tensor_name0 = layers[2].name ;
std::string output_tensor_name1 = layers[1].name ;
std::string output_tensor_name2 = layers[0].name ;

MNN::Tensor *tensor_scores  = net->getSessionOutput(session, output_tensor_name0.c_str());
MNN::Tensor *tensor_boxes   = net->getSessionOutput(session, output_tensor_name1.c_str());
MNN::Tensor *tensor_anchors = net->getSessionOutput(session, output_tensor_name2.c_str());

MNN::Tensor tensor_scores_host(tensor_scores, tensor_scores->getDimensionType());
MNN::Tensor tensor_boxes_host(tensor_boxes, tensor_boxes->getDimensionType());
MNN::Tensor tensor_anchors_host(tensor_anchors, tensor_anchors->getDimensionType());

tensor_scores->copyToHostTensor(&tensor_scores_host);
tensor_boxes->copyToHostTensor(&tensor_boxes_host);
tensor_anchors->copyToHostTensor(&tensor_anchors_host);

std::vector<BoxInfo> result;
std::vector<BoxInfo> boxes;

yolocv::YoloSize yolosize = yolocv::YoloSize{INPUT_SIZE,INPUT_SIZE};

// [IMPORTANT] 
float threshold = 0.25;
float nms_threshold = 0.45;

boxes = decode_infer(tensor_scores_host, layers[2].stride,  yolosize, net_size, num_classes, layers[2].anchors, threshold);
result.insert(result.begin(), boxes.begin(), boxes.end());

boxes = decode_infer(tensor_boxes_host, layers[1].stride,  yolosize, net_size, num_classes, layers[1].anchors, threshold);
result.insert(result.begin(), boxes.begin(), boxes.end());

boxes = decode_infer(tensor_anchors_host, layers[0].stride,  yolosize, net_size, num_classes, layers[0].anchors, threshold);
result.insert(result.begin(), boxes.begin(), boxes.end());

nms(result, nms_threshold);

// std::cout<<result.size()<<std::endl;
scale_coords(result, INPUT_SIZE, INPUT_SIZE, raw_image.cols, raw_image.rows);
cv::Mat frame_show = draw_box(raw_image, result);
cv::imwrite(save_image_name, frame_show);

return 0;
}

2.5 编译当前项目

  • 修改CMakeLists.txt文件
cmake_minimum_required(VERSION 3.5.1)
project(yolov5)


SET(CMAKE_BUILD_TYPE "Debug")
SET(CMAKE_CXX_FLAGS_DEBUG "$ENV{CXXFLAGS} -O0 -Wall -g2 -ggdb")
# SET(CMAKE_CXX_FLAGS_RELEASE "$ENV{CXXFLAGS} -O3 -Wall")

set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall")
set(EXECUTABLE_OUTPUT_PATH ${PROJECT_SOURCE_DIR}/)

# Head files
include_directories(
/usr/local/opencv4/include/opencv4/opencv
/usr/local/opencv4/include/opencv4
${PROJECT_SOURCE_DIR}/include/
)

file(GLOB OpenCV4LIBS /usr/local/opencv4/lib/*.so)
file(GLOB MNNLIBS ${PROJECT_SOURCE_DIR}/libMNN.so)
file(GLOB SOURCE_FILES src/*.cpp)
# link_directories(${PROJECT_SOURCE_DIR}/ncnn-20210322-ubuntu-1804-shared/lib/)
add_executable(${CMAKE_PROJECT_NAME} ${SOURCE_FILES})

target_link_libraries (
${CMAKE_PROJECT_NAME}
${OpenCV4LIBS}
${MNNLIBS}
pthread
)
$ cd /home/tianzx/ai_model/test_ai_interface/mnn/demo/yolov5_cpp
$ mkdir bulid && cd build
$ sudo cmake ..
$ sudo make
$ cp ./yolov5 ../

2.6 运行

$ ./yolov5 model_zoo/perspec_yolov5l_v0.0.1.mnn test.jpg test_detect.jpg

test.jpg

test_detect.jpg

3. 参考

  • 0
    点赞
  • 7
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

Digital2Slave

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值