MNN学习笔记(六):配置visual studio项目

这个其实很简单,原因是MNN项目组已经提供了编译好的库:

1.下载编译好的MNN库

下载地址为:https://github.com/alibaba/MNN/releases

下载两个文件:Source.code和MNN-WindowsX64-0.2.1.7.zip

2.在visual studio上进行配置

注意visual studio版本为2017,我习惯把这些库都跟opencv放一起,具体来讲就是:

首先,把从Source code中解压的include文件如图1所示,复制出来;

然后,在自己的opencv的include文件夹下面新建一个MNN子文件夹,将上面的头文件复制过去:

其次,将解压好的MNN-WindowsX64-0.2.1.7.zip文件中MNN.dll和MNN.lib放到opencv对应位置:

最后,跟配置opencv一样,新建一个项目,配置项目:

3.测试代码

总共三个文件:

第一个文件:mobilenetssd.h

#ifndef _MOBILENET_SSD_H_
#define _MOBILENET_SSD_H_

#include <vector>

#include "MNN/Interpreter.hpp"
#include "MNN/MNNDefine.h"
#include "MNN/Tensor.hpp"
#include "MNN/ImageProcess.hpp"

#include "opencv2/core.hpp"
#include "opencv2/imgproc.hpp"

namespace mirror {
struct ObjectInfo {
	std::string name_;
	cv::Rect location_;
	float score_;
};

class MobilenetSSD {
public:
	MobilenetSSD();
	~MobilenetSSD();
	int Init(const char* root_path);
	int Detect(const cv::Mat& img_src, std::vector<ObjectInfo>* objects);
private:
	uint8_t* GetImage(const cv::Mat& img_src) {
		uchar* data_ptr = new uchar[img_src.total() * 4];
		cv::Mat img_tmp(img_src.size(), CV_8UC4, data_ptr);
		cv::cvtColor(img_src, img_tmp, CV_BGR2RGBA, 4);
		return (uint8_t*)img_tmp.data;
	}

private:
	bool initialized_;
	const cv::Size inputSize_ = { 300, 300 };
	std::vector<int> dims_ = { 1, 3, 300, 300 };
	const float meanVals_[3] = { 0.5f, 0.5f, 0.5f };
	const float normVals_[3] = { 0.007843f, 0.007843f, 0.007843f };
	std::vector<std::string> class_names = {
		"background", "aeroplane", "bicycle", "bird", "boat",
		"bottle", "bus", "car", "cat", "chair",
		"cow", "diningtable", "dog", "horse",
		"motorbike", "person", "pottedplant",
		"sheep", "sofa", "train", "tvmonitor"
	};

	std::shared_ptr<MNN::Interpreter> mobilenetssd_interpreter_;
	MNN::Session* mobilenetssd_sess_ = nullptr;
	MNN::Tensor* input_tensor_ = nullptr;
	std::shared_ptr<MNN::CV::ImageProcess> pretreat_data_ = nullptr;

};

}


#endif // !_MOBILENET_SSD_H_

第二个文件:mobilenetssd.cpp

#include "mobilenetssd.h"
#include <iostream>
#include <string>

#include "opencv2/imgproc.hpp"

namespace mirror {

MobilenetSSD::MobilenetSSD() {
	initialized_ = false;
}

MobilenetSSD::~MobilenetSSD() {
	mobilenetssd_interpreter_->releaseModel();
	mobilenetssd_interpreter_->releaseSession(mobilenetssd_sess_);
}

int MobilenetSSD::Init(const char * root_path) {
	std::cout << "start Init." << std::endl;
	std::string model_file = std::string(root_path) + "/mobilenetssd.mnn";
	mobilenetssd_interpreter_ = std::unique_ptr<MNN::Interpreter>(MNN::Interpreter::createFromFile(model_file.c_str()));
	if (nullptr == mobilenetssd_interpreter_) {
		std::cout << "load model failed." << std::endl;
		return 10000;
	}

	MNN::ScheduleConfig schedule_config;
	schedule_config.type = MNN_FORWARD_CPU;
	schedule_config.numThread = 4;

	MNN::BackendConfig backend_config;
	backend_config.precision = MNN::BackendConfig::Precision_High;
	backend_config.power = MNN::BackendConfig::Power_High;
	schedule_config.backendConfig = &backend_config;

	mobilenetssd_sess_ = mobilenetssd_interpreter_->createSession(schedule_config);

	// image processer
	MNN::CV::Matrix trans;
	trans.setScale(1.0f, 1.0f);
	MNN::CV::ImageProcess::Config img_config;
	img_config.filterType = MNN::CV::BICUBIC;
	::memcpy(img_config.mean, meanVals_, sizeof(meanVals_));
	::memcpy(img_config.normal, normVals_, sizeof(normVals_));
	img_config.sourceFormat = MNN::CV::RGBA;
	img_config.destFormat = MNN::CV::RGB;
	pretreat_data_ = std::shared_ptr<MNN::CV::ImageProcess>(MNN::CV::ImageProcess::create(img_config));
	pretreat_data_->setMatrix(trans);

	std::string input_name = "data";
	input_tensor_ = mobilenetssd_interpreter_->getSessionInput(mobilenetssd_sess_, input_name.c_str());
	mobilenetssd_interpreter_->resizeTensor(input_tensor_, dims_);
	mobilenetssd_interpreter_->resizeSession(mobilenetssd_sess_);

	initialized_ = true;

	std::cout << "end Init." << std::endl;
	return 0;
}

int MobilenetSSD::Detect(const cv::Mat & img_src, std::vector<ObjectInfo>* objects) {
	std::cout << "start detect." << std::endl;
	if (!initialized_) {
		std::cout << "model uninitialized." << std::endl;
		return 10000;
	}
	if (img_src.empty()) {
		std::cout << "input empty." << std::endl;
		return 10001;
	}

	int width = img_src.cols;
	int height = img_src.rows;

	// preprocess
	cv::Mat img_resized;
	cv::resize(img_src, img_resized, inputSize_);
	uint8_t* data_ptr = GetImage(img_resized);
	pretreat_data_->convert(data_ptr, inputSize_.width, inputSize_.height, 0, input_tensor_);
	 
	mobilenetssd_interpreter_->runSession(mobilenetssd_sess_);
	std::string output_name = "detection_out";
	MNN::Tensor* output_tensor = mobilenetssd_interpreter_->getSessionOutput(mobilenetssd_sess_, output_name.c_str());

	// copy to host
	MNN::Tensor output_host(output_tensor, output_tensor->getDimensionType());
	output_tensor->copyToHostTensor(&output_host);

	auto output_ptr = output_host.host<float>();
	for (int i = 0; i < output_host.height(); ++i) {
		int index = i * output_host.width();
		ObjectInfo object;
		object.name_ = class_names[int(output_ptr[index + 0])];
		object.score_ = output_ptr[index + 1];
		object.location_.x = output_ptr[index + 2] * width;
		object.location_.y = output_ptr[index + 3] * height;
		object.location_.width = output_ptr[index + 4] * width - object.location_.x;
		object.location_.height = output_ptr[index + 5] * height - object.location_.y;

		objects->push_back(object);
	}


	std::cout << "end detect." << std::endl;

	return 0;
}

}

第三个文件:main.cpp

#include "mobilenetssd.h"
#include "opencv2/opencv.hpp"

int main(int argc, char* argv[]){
	const char* img_path = "./data/images/test.jpg";
	cv::Mat img_src = cv::imread(img_path);
	mirror::MobilenetSSD* mobilenetssd = new mirror::MobilenetSSD();

	const char* root_path = "./data/models";
	mobilenetssd->Init(root_path);
	std::vector<mirror::ObjectInfo> objects;
	mobilenetssd->Detect(img_src, &objects);

	int num_objects = static_cast<int>(objects.size());
	for (int i = 0; i < num_objects; ++i) {
		std::cout << "location: " << objects[i].location_ << std::endl;
		cv::rectangle(img_src, objects[i].location_, cv::Scalar(255, 0, 255), 2);
		char text[256];
		sprintf_s(text, "%s %.1f%%", objects[i].name_.c_str(), objects[i].score_ * 100);
		int baseLine = 0;
		cv::Size label_size = cv::getTextSize(text, cv::FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);
		cv::putText(img_src, text, cv::Point(objects[i].location_.x,
			objects[i].location_.y + label_size.height),
			cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 0, 0));
	}
	cv::imwrite("./data/images/cat.jpg", img_src);
	cv::imshow("result", img_src);
	cv::waitKey(0);

	delete mobilenetssd;

	system("pause");
	return 0;
}

最后结果:

需要用到的模型和测试图片下载地址:https://download.csdn.net/download/sinat_31425585/12137855

打完收工!

  • 1
    点赞
  • 17
    收藏
    觉得还不错? 一键收藏
  • 20
    评论
引用中的代码片段是一个使用MNN库的示例,它并没有直接使用MNN的imread函数,而是使用了OpenCV的imread函数来读取图片。所以在编译过程中出现undefined reference to `MNN::CV::imread的错误,可能是因为你的代码中缺少了对MNN库的链接。你可以尝试在CMakeLists.txt中添加对MNN库的链接,类似于引用中对OpenCV库的链接。具体来说,你可以修改CMakeLists.txt如下: target_link_libraries(helloworld -lMNN) 这样可以将MNN库链接到你的项目中,解决undefined reference to `MNN::CV::imread的问题。<span class="em">1</span><span class="em">2</span><span class="em">3</span> #### 引用[.reference_title] - *1* *3* [MNN和NCNN安装记录](https://blog.csdn.net/qq_19784349/article/details/93226763)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v93^chatsearchT3_2"}}] [.reference_item style="max-width: 50%"] - *2* [Yolo-FastestV2在树莓派4B上的MNN移植记录](https://blog.csdn.net/weixin_39266208/article/details/122131303)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v93^chatsearchT3_2"}}] [.reference_item style="max-width: 50%"] [ .reference_list ]

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 20
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值