openvino最简单的部署

1.前言

  • openvino可以看成一个驱动,你只要找到合适的头文件和库,就可以使用相应的设备,如CPU、GPU、计算棒等等。
  • 当然,你必须先有一个安装了openvino的设备,参考这里
  • 直通车,下载越早,花费越少

2.头文件和库

  • 头文件
    • /opt/intel/openvino/deployment_tools/inference_engine/include
    • /opt/intel/openvino/deployment_tools/ngraph/include
    • /opt/intel/openvino/deployment_tools/inference_engine/samples/cpp/common/samples
    • /opt/intel/openvino/deployment_tools/inference_engine/lib/intel64
    • /opt/intel/openvino/deployment_tools/inference_engine/external/tbb/lib
    • /opt/intel/openvino/deployment_tools/ngraph/lib
    • 后面两个我只复制了libtbb.so.2和libngraph.so

3.C++代码

#include<string> 
#include<inference_engine.hpp> 
#include<samples/ocv_common.hpp>//matU8ToBlob
#include <ngraph/ngraph.hpp>
#include<time.h>
#include <sys/time.h>
#include <iostream>

using namespace InferenceEngine;

//将OpenCV Mat对象中的图像数据传给为InferenceEngine Blob对象
void frameToBlob(const cv::Mat& frame, InferRequest::Ptr& inferRequest, const std::string& inputName) {
	/* 从OpenCV Mat对象中拷贝图像数据到InferenceEngine 输入Blob对象 */
	Blob::Ptr frameBlob = inferRequest->GetBlob(inputName);
	matU8ToBlob<uint8_t>(frame, frameBlob);
}
//配置推理计算设备,IR文件路径,图片路径,阈值
std::string DEVICE = "MYRIAD";
std::string IR_FileNameNoExt = "/home/lwd/openvino/models/32/mobilenet-ssd";
std::string imageFile = "/home/lwd/openvino/mmgg.jpg";
float confidence_threshold = 0.07; 

int main(void) {
	// --------------------------- 1. 载入硬件插件(Plugin) --------------------------------------
	Core ie;
	std::cout << "1.Load Plugin..." << std::endl;     
	std::cout << ie.GetVersions(DEVICE) << std::endl; //输出插件版本信息

	// ------------------- 2. 读取IR文件 (.xml and .bin files) --------------------------
	std::cout << "2.Read IR File..." << std::endl;
	CNNNetwork network = ie.ReadNetwork(IR_FileNameNoExt+".xml");

	// -------------------- 3. 配置网络输入输出 -----------------------------------------
	std::cout << "3.Prepare Input and Output..." << std::endl;
	/** 获得神经网络的输入信息 **/
	InputsDataMap inputsInfo(network.getInputsInfo());
	std::string imageInputName, imInfoInputName;
	InputInfo::Ptr inputInfo = nullptr;
	SizeVector inputImageDims;
	/** 遍历模型所有的输入blobs **/
	for (auto & item : inputsInfo) {
		/** 处理保存图像数据的第一个张量 **/
		if (item.second->getInputData()->getTensorDesc().getDims().size() == 4) {
			imageInputName = item.first;
			inputInfo = item.second;
			/** 创建第一个输入blob **/
			Precision inputPrecision = Precision::U8;
			item.second->setPrecision(inputPrecision);
		}
		else if (item.second->getInputData()->getTensorDesc().getDims().size() == 2) {
			imInfoInputName = item.first;
			Precision inputPrecision = Precision::FP32;
			item.second->setPrecision(inputPrecision);
			if ((item.second->getTensorDesc().getDims()[1] != 3 && item.second->getTensorDesc().getDims()[1] != 6)) {
				throw std::logic_error("Invalid input info. Should be 3 or 6 values length");
			}
		}
	}
	/** 获得神经网络的输出信息 **/
	OutputsDataMap outputsInfo(network.getOutputsInfo());
	std::string outputName;
	DataPtr outputInfo;
	if (auto ngraphFunction = network.getFunction())
    {
        for (const auto &out : outputsInfo)
        {
            for (const auto &op : ngraphFunction->get_ops())
            {
                if (op->get_type_info() == ngraph::op::DetectionOutput::type_info &&
                    op->get_friendly_name() == out.second->getName())
                {
                    outputName = out.first;
                    outputInfo = out.second;
                    break;
                }
            }
        }
    }
    else
    {
        outputInfo = outputsInfo.begin()->second;
        outputName = outputInfo->getName();
    }
	const SizeVector outputDims = outputInfo->getTensorDesc().getDims();
	const int maxProposalCount = outputDims[2];
	const int objectSize = outputDims[3];
	outputInfo->setPrecision(Precision::FP32);
	// --------------------------- 4. 载入模型到AI推理计算设备---------------------------------------
	std::cout << "4.Load model into device..." << std::endl;
	ExecutableNetwork executable_network = ie.LoadNetwork(network, DEVICE);
	// --------------------------- 5. 创建Infer Request--------------------------------------------
	std::cout << "5.Create Infer Request..." << std::endl;
	InferRequest::Ptr infer_request = executable_network.CreateInferRequestPtr();
	// --------------------------- 6. 准备输入数据 ------------------------------------------------
	std::cout << "6.Prepare Input..." << std::endl;
	cv::Mat img = cv::imread(imageFile);
	frameToBlob(img, infer_request, imageInputName);
	const size_t width = (size_t)img.cols;
	const size_t height = (size_t)img.rows;
	// --------------------------- 7. 执行推理计算 ------------------------------------------------
	std::cout << "7.Start inference..." << std::endl;
	struct timeval start, end;
    gettimeofday(&start, NULL);
	infer_request->Infer();
	gettimeofday(&end, NULL);
    std::cout << (end.tv_usec-start.tv_usec)/1000000.0 + end.tv_sec-start.tv_sec << std::endl;
	// --------------------------- 8. 处理输出 ----------------------------------------------------
	std::cout << "8.Process output blobs..." << std::endl;
	const float *detections = infer_request->GetBlob(outputName)->buffer().as<PrecisionTrait<Precision::FP32>::value_type*>();
	for (int i = 0; i < maxProposalCount; i++) {
		float image_id = detections[i * objectSize + 0];
		float confidence = detections[i * objectSize + 2];
		auto label = static_cast<int>(detections[i * objectSize + 1]);
		float xmin = detections[i * objectSize + 3] * width;
		float ymin = detections[i * objectSize + 4] * height;
		float xmax = detections[i * objectSize + 5] * width;
		float ymax = detections[i * objectSize + 6] * height;
		
		if (confidence > confidence_threshold) {
			// 仅当> confidence_threshold值时,显示推理计算结果
			std::ostringstream conf;
			conf << ":" << std::fixed << std::setprecision(3) << confidence;
			if (xmin < 0) {
				xmin = -xmin;
			}
			if (ymin < 0) {
				ymin = -ymin;
			}
			if (xmax > width) {
				xmax = width;
			}
			if (ymax > height) {
				ymax = height;
			}
			cv::Point origin;
			origin.x = xmin;
			origin.y = ymin + 20;
			std::cout << label << std::endl;
			std::string text = "UNKNOWN";
			if (1 <= label && label <= 2) {
				text = labels[label];
			}
			cv::putText(img, std::to_string(label),
				origin, cv::FONT_HERSHEY_COMPLEX_SMALL, 1,
				cv::Scalar(0, 0, 255));
			cv::rectangle(img, cv::Point2f(xmin, ymin), cv::Point2f(xmax, ymax), cv::Scalar(0, 0, 255));
		}
		
	}
	
	std::cout << "Infer done!" << std::endl;
	cv::imshow("Detection results", img);
	cv::waitKey(0);
	cv::destroyAllWindows();

	return 0;
}

4. CMakeLists.txt

cmake_minimum_required (VERSION 2.8.12)

project(iee)
set(CMAKE_BUILD_TYPE "Release")
set (CMAKE_CXX_STANDARD 11)

find_package(OpenCV REQUIRED)

include_directories(
  ${OpenCV_INCLUDE_DIRS}
  include
  include/inference_engine
)

add_executable(${PROJECT_NAME}
   ie.cc
)

target_link_libraries(${PROJECT_NAME}
    ${OpenCV_LIBRARIES}
    ${PROJECT_SOURCE_DIR}/lib/libngraph.so
    ${PROJECT_SOURCE_DIR}/lib/libtbb.so.2
    ${PROJECT_SOURCE_DIR}/lib/libinference_engine_legacy.so
    ${PROJECT_SOURCE_DIR}/lib/libinference_engine.so
    ${PROJECT_SOURCE_DIR}/lib/libinference_engine_transformations.so
)

5.计算棒

  • 如果想使用计算棒,需要/opt/intel/openvino/inference_engine/external/97-myriad-usbboot.rules,复制到/etc/udev/rules.d,全部命令如下:
  • sudo usermod -a -G users “$(whoami)”,如遇到问题,看这里
  • 登出用户并重新登录
  • sudo cp 97-myriad-usbboot.rules /etc/udev/rules.d
  • sudo udevadm control --reload-rules
  • sudo udevadm trigger
  • sudo ldconfig
  • 重启
  • 将代码中DEVICE的值改为MYRIAD

6.七七八八

  • 可以将需要复制的东西拷贝到一个文件夹里,部署的时候整体拷过去
  • 我在笔记本上使用CPU检测时间是0.01~0.013秒,使用计算棒耗时0.03秒。嗯~一定是我的打开方式不对!!!
  • 计算棒拔掉一会,再插上,就会报错,必须重启电脑

E: [ncAPI] [ 148432] [iee] ncDeviceOpen:1008 Failed to find booted device after boot

  • 2
    点赞
  • 13
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 5
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 5
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

刀么克瑟拉莫

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值