mnn推理示例

本文档详细介绍了如何在Android环境下配置MNN库,解决依赖问题,并提供了在Visual Studio 2017上的配置步骤,包括头文件和库的放置,以及测试代码的实现。通过这些步骤,读者可以成功集成和运行MNN模型进行对象检测。
摘要由CSDN通过智能技术生成

有 libMNN.so

https://github.com/cmdbug/MNN_Demo

原版报错:

library “libopencv_java4.so“ not found“

应该吧so都放在libs下面,

jniLibs.srcDirs = [ 'libs','src/main/cpp/opencv']

代码:NanoDet 640*480 Android荣耀9 nova7 60ms左右。

ndk编译报错,我把下面注释了,编译成功了。

#if (DEFINED ANDROID_NDK_MAJOR AND ${ANDROID_NDK_MAJOR} GREATER 20)
#    set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -static-openmp")
#endif ()

MNN项目组已经提供了编译好的库:

1.下载编译好的MNN库

https://github.com/iwatake2222/InferenceHelper

2021.0810,mnn官方已经不提供编译好的库了。

官网地址为:https://github.com/alibaba/MNN/releases

2.在visual studio上进行配置

注意visual studio版本为2017,我习惯把这些库都跟opencv放一起,具体来讲就是:

首先,把从Source code中解压的include文件如图1所示,复制出来;

然后,在自己的opencv的include文件夹下面新建一个MNN子文件夹,将上面的头文件复制过去:

其次,将解压好的MNN-WindowsX64-0.2.1.7.zip文件中MNN.dll和MNN.lib放到opencv对应位置:

最后,跟配置opencv一样,新建一个项目,配置项目:

3.测试代码

总共三个文件:

第一个文件:mobilenetssd.h

#ifndef _MOBILENET_SSD_H_

#define _MOBILENET_SSD_H_


#include <vector>


#include "MNN/Interpreter.hpp"

#include "MNN/MNNDefine.h"

#include "MNN/Tensor.hpp"

#include "MNN/ImageProcess.hpp"


#include "opencv2/core.hpp"

#include "opencv2/imgproc.hpp"


namespace mirror {

struct ObjectInfo {

std::string name_;

cv::Rect location_;

float score_;

};


class MobilenetSSD {

public:

MobilenetSSD();

~MobilenetSSD();

int Init(const char* root_path);

int Detect(const cv::Mat& img_src, std::vector<ObjectInfo>* objects);

private:

uint8_t* GetImage(const cv::Mat& img_src) {

uchar* data_ptr = new uchar[img_src.total() * 4];

cv::Mat img_tmp(img_src.size(), CV_8UC4, data_ptr);

cv::cvtColor(img_src, img_tmp, CV_BGR2RGBA, 4);

return (uint8_t*)img_tmp.data;

}


private:

bool initialized_;

const cv::Size inputSize_ = { 300, 300 };

std::vector<int> dims_ = { 1, 3, 300, 300 };

const float meanVals_[3] = { 0.5f, 0.5f, 0.5f };

const float normVals_[3] = { 0.007843f, 0.007843f, 0.007843f };

std::vector<std::string> class_names = {

"background", "aeroplane", "bicycle", "bird", "boat",

"bottle", "bus", "car", "cat", "chair",

"cow", "diningtable", "dog", "horse",

"motorbike", "person", "pottedplant",

"sheep", "sofa", "train", "tvmonitor"

};


std::shared_ptr<MNN::Interpreter> mobilenetssd_interpreter_;

MNN::Session* mobilenetssd_sess_ = nullptr;

MNN::Tensor* input_tensor_ = nullptr;

std::shared_ptr<MNN::CV::ImageProcess> pretreat_data_ = nullptr;


};


}



#endif // !_MOBILENET_SSD_H_
 ​​​​​​

第二个文件:mobilenetssd.cpp

#include "mobilenetssd.h"

#include <iostream>

#include <string>


#include "opencv2/imgproc.hpp"


namespace mirror {


MobilenetSSD::MobilenetSSD() {

initialized_ = false;

}


MobilenetSSD::~MobilenetSSD() {

mobilenetssd_interpreter_->releaseModel();

mobilenetssd_interpreter_->releaseSession(mobilenetssd_sess_);

}


int MobilenetSSD::Init(const char * root_path) {

std::cout << "start Init." << std::endl;

std::string model_file = std::string(root_path) + "/mobilenetssd.mnn";

mobilenetssd_interpreter_ = std::unique_ptr<MNN::Interpreter>(MNN::Interpreter::createFromFile(model_file.c_str()));

if (nullptr == mobilenetssd_interpreter_) {

std::cout << "load model failed." << std::endl;

return 10000;

}


MNN::ScheduleConfig schedule_config;

schedule_config.type = MNN_FORWARD_CPU;

schedule_config.numThread = 4;


MNN::BackendConfig backend_config;

backend_config.precision = MNN::BackendConfig::Precision_High;

backend_config.power = MNN::BackendConfig::Power_High;

schedule_config.backendConfig = &backend_config;


mobilenetssd_sess_ = mobilenetssd_interpreter_->createSession(schedule_config);


// image processer

MNN::CV::Matrix trans;

trans.setScale(1.0f, 1.0f);

MNN::CV::ImageProcess::Config img_config;

img_config.filterType = MNN::CV::BICUBIC;

::memcpy(img_config.mean, meanVals_, sizeof(meanVals_));

::memcpy(img_config.normal, normVals_, sizeof(normVals_));

img_config.sourceFormat = MNN::CV::RGBA;

img_config.destFormat = MNN::CV::RGB;

pretreat_data_ = std::shared_ptr<MNN::CV::ImageProcess>(MNN::CV::ImageProcess::create(img_config));

pretreat_data_->setMatrix(trans);


std::string input_name = "data";

input_tensor_ = mobilenetssd_interpreter_->getSessionInput(mobilenetssd_sess_, input_name.c_str());

mobilenetssd_interpreter_->resizeTensor(input_tensor_, dims_);

mobilenetssd_interpreter_->resizeSession(mobilenetssd_sess_);


initialized_ = true;


std::cout << "end Init." << std::endl;

return 0;

}


int MobilenetSSD::Detect(const cv::Mat & img_src, std::vector<ObjectInfo>* objects) {

std::cout << "start detect." << std::endl;

if (!initialized_) {

std::cout << "model uninitialized." << std::endl;

return 10000;

}

if (img_src.empty()) {

std::cout << "input empty." << std::endl;

return 10001;

}


int width = img_src.cols;

int height = img_src.rows;


// preprocess

cv::Mat img_resized;

cv::resize(img_src, img_resized, inputSize_);

uint8_t* data_ptr = GetImage(img_resized);

pretreat_data_->convert(data_ptr, inputSize_.width, inputSize_.height, 0, input_tensor_);


mobilenetssd_interpreter_->runSession(mobilenetssd_sess_);

std::string output_name = "detection_out";

MNN::Tensor* output_tensor = mobilenetssd_interpreter_->getSessionOutput(mobilenetssd_sess_, output_name.c_str());


// copy to host

MNN::Tensor output_host(output_tensor, output_tensor->getDimensionType());

output_tensor->copyToHostTensor(&output_host);


auto output_ptr = output_host.host<float>();

for (int i = 0; i < output_host.height(); ++i) {

int index = i * output_host.width();

ObjectInfo object;

object.name_ = class_names[int(output_ptr[index + 0])];

object.score_ = output_ptr[index + 1];

object.location_.x = output_ptr[index + 2] * width;

object.location_.y = output_ptr[index + 3] * height;

object.location_.width = output_ptr[index + 4] * width - object.location_.x;

object.location_.height = output_ptr[index + 5] * height - object.location_.y;


objects->push_back(object);

}



std::cout << "end detect." << std::endl;


return 0;

}


}
 ​​​​​

第三个文件:main.cpp

#include "mobilenetssd.h"

#include "opencv2/opencv.hpp"


int main(int argc, char* argv[]){

const char* img_path = "./data/images/test.jpg";

cv::Mat img_src = cv::imread(img_path);

mirror::MobilenetSSD* mobilenetssd = new mirror::MobilenetSSD();


const char* root_path = "./data/models";

mobilenetssd->Init(root_path);

std::vector<mirror::ObjectInfo> objects;

mobilenetssd->Detect(img_src, &objects);


int num_objects = static_cast<int>(objects.size());

for (int i = 0; i < num_objects; ++i) {

std::cout << "location: " << objects[i].location_ << std::endl;

cv::rectangle(img_src, objects[i].location_, cv::Scalar(255, 0, 255), 2);

char text[256];

sprintf_s(text, "%s %.1f%%", objects[i].name_.c_str(), objects[i].score_ * 100);

int baseLine = 0;

cv::Size label_size = cv::getTextSize(text, cv::FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);

cv::putText(img_src, text, cv::Point(objects[i].location_.x,

objects[i].location_.y + label_size.height),

cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 0, 0));

}

cv::imwrite("./data/images/cat.jpg", img_src);

cv::imshow("result", img_src);

cv::waitKey(0);


delete mobilenetssd;


system("pause");

return 0;

}
 

最后结果:

需要用到的模型和测试图片下载地址:https://download.csdn.net/download/sinat_31425585/12137855

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值