OpenVino精简demo代码Super Resolution C++

配置环境:
在这里插入图片描述

①C:\Program Files (x86)\Intel\openvino_2021.2.185\deployment_tools\inference_engine\include
②D:\opencv\opencv344\opencv\build\install\include\opencv2
③D:\opencv\opencv344\opencv\build\install\include\opencv
④D:\opencv\opencv344\opencv\build\install\include
⑤C:\Program Files (x86)\Intel\openvino_2021.2.185\deployment_tools\inference_engine\samples\cpp\common
在这里插入图片描述①C:\Program Files (x86)\Intel\openvino_2021.2.185\deployment_tools\ngraph\lib
②C:\Program Files (x86)\Intel\openvino_2021.2.185\deployment_tools\inference_engine\lib\intel64\Debug
③D:\opencv\opencv344\opencv\build\install\x64\vc15\lib
④C:\Program Files (x86)\Intel\openvino_2021.2.185\deployment_tools\inference_engine\external\hddl\lib
在这里插入图片描述
hddlapi.lib
inference_engined.lib
opencv_aruco344d.lib
opencv_bgsegm344d.lib
opencv_bioinspired344d.lib
opencv_calib3d344d.lib
opencv_ccalib344d.lib
opencv_core344d.lib
opencv_datasets344d.lib
opencv_dnn344d.lib
opencv_dnn_objdetect344d.lib
opencv_dpm344d.lib
opencv_face344d.lib
opencv_features2d344d.lib
opencv_flann344d.lib
opencv_fuzzy344d.lib
opencv_hfs344d.lib
opencv_highgui344d.lib
opencv_imgcodecs344d.lib
opencv_imgproc344d.lib
opencv_img_hash344d.lib
opencv_line_descriptor344d.lib
opencv_ml344d.lib
opencv_objdetect344d.lib
opencv_optflow344d.lib
opencv_phase_unwrapping344d.lib
opencv_photo344d.lib
opencv_plot344d.lib
opencv_reg344d.lib
opencv_rgbd344d.lib
opencv_saliency344d.lib
opencv_shape344d.lib
opencv_stereo344d.lib
opencv_stitching344d.lib
opencv_structured_light344d.lib
opencv_superres344d.lib
opencv_surface_matching344d.lib
opencv_text344d.lib
opencv_tracking344d.lib
opencv_video344d.lib
opencv_videoio344d.lib
opencv_videostab344d.lib
opencv_xfeatures2d344d.lib
opencv_ximgproc344d.lib
opencv_xobjdetect344d.lib
opencv_xphoto344d.lib
ngraphd.lib
onnx_importerd.lib

代码:

// ForOpenvinoTest.cpp : 此文件包含 "main" 函数。程序执行将在此处开始并结束。
//
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include <vector>
#include <memory>
#include <string>
#include <samples/common.hpp>
#include <windows.h>
#include <wchar.h>
#include <inference_engine.hpp>
#include <samples/ocv_common.hpp>
#include <samples/classification_results.h>

using namespace InferenceEngine;

#if defined(ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32)
#define tcout std::wcout
#define file_name_t std::wstring
#define imread_t imreadW
#define ClassificationResult_t ClassificationResultW
#else
#define tcout std::cout
#define file_name_t std::string
#define imread_t cv::imread
#define ClassificationResult_t ClassificationResult
#endif

std::wstring StringToWString(const std::string& str)
{
	int num = MultiByteToWideChar(CP_UTF8, 0, str.c_str(), -1, NULL, 0);
	wchar_t *wide = new wchar_t[num];
	MultiByteToWideChar(CP_UTF8, 0, str.c_str(), -1, wide, num);
	std::wstring w_str(wide);
	delete[] wide;
	return w_str;
}
//int wmain(int argc, char *argv[]) {
//#else

int main(int argc) {
	// --------------------------- 1. Load inference engine instance负载推理引擎实例 -------------------------------------
	Core ie;
	// 2. 读模型Read a model in OpenVINO Intermediate Representation (.xml and .bin files) or ONNX (.onnx file) format
	CNNNetwork network = ie.ReadNetwork("C:/Users/sjsys/Desktop/1/single-image-super-resolution-1032.xml");
	// --------------------------- 3. Configure input & output 配置输入输出---------------------------------------------
	// --------------------------- Prepare input blobs 准备输入blob-----------------------------------------------------
	/** Collect images**/
	std::cout<< "Preparing input blobs" << std::endl;
	/** Taking information about all topology inputs **/
	ICNNNetwork::InputShapes inputShapes(network.getInputShapes());
	std::string lrInputBlobName = inputShapes.begin()->first;
	SizeVector lrShape = inputShapes[lrInputBlobName];
	// A model like single-image-super-resolution-???? may take bicubic interpolation of the input image as the
	// second input
	std::string bicInputBlobName;
	if (inputShapes.size() == 2) {
		bicInputBlobName = (++inputShapes.begin())->first;
		SizeVector bicShape = inputShapes[bicInputBlobName];
		if (bicShape.size() != 4) {
			throw std::logic_error("Number of dimensions for both inputs must be 4");
		}
		if (lrShape[2] >= bicShape[2] && lrShape[3] >= bicShape[3]) {
			lrInputBlobName.swap(bicInputBlobName);
			lrShape.swap(bicShape);
		}
		else if (!(lrShape[2] <= bicShape[2] && lrShape[3] <= bicShape[3])) {
			throw std::logic_error("Each spatial dimension of one input must surpass or be equal to a spatial"
				"dimension of another input");
		}
	}
	/** Collect images**/
	std::vector<cv::Mat> inputImages;
	std::string imageNames = "C:/Users/sjsys/Desktop/1/3.bmp";
	cv::Mat img = cv::imread(imageNames, cv::IMREAD_COLOR);
	inputImages.push_back(img);	
	if (inputImages.empty()) throw std::logic_error("Valid input images were not found!");

	/** Setting batch size using image count **/
	inputShapes[lrInputBlobName][0] = inputImages.size();
	if (!bicInputBlobName.empty()) {
		inputShapes[bicInputBlobName][0] = inputImages.size();
	}
	network.reshape(inputShapes);
	std::cout << "Batch size is " << std::to_string(network.getBatchSize()) << std::endl;

	// --------------------------- Prepare output blobs 准备输出blob----------------------------------------------------
	std::cout << "Preparing output blobs" << std::endl;

	OutputsDataMap outputInfo(network.getOutputsInfo());
	// BlobMap outputBlobs;
	std::string firstOutputName;
	for (auto &item : outputInfo) {
		if (firstOutputName.empty()) {
			firstOutputName = item.first;
		}
		DataPtr outputData = item.second;
		if (!outputData) {
			throw std::logic_error("output data pointer is not valid");
		}

		item.second->setPrecision(Precision::FP32);
	}

	// --------------------------- 4. Loading model to the device 加载模型到设备------------------------------------------
	ExecutableNetwork executable_network = ie.LoadNetwork(network, "CPU");
	// --------------------------- 5. Create infer request创建请求 -------------------------------------------------
	InferRequest infer_request = executable_network.CreateInferRequest();
	// --------------------------- 6. Prepare input 准备输入--------------------------------------------------------
	Blob::Ptr lrInputBlob = infer_request.GetBlob(lrInputBlobName);
	for (size_t i = 0; i < inputImages.size(); ++i) {
		cv::Mat img = inputImages[i];
		matU8ToBlob<float_t>(img, lrInputBlob, i);

		if (!bicInputBlobName.empty()) {
			Blob::Ptr bicInputBlob = infer_request.GetBlob(bicInputBlobName);
			int w = bicInputBlob->getTensorDesc().getDims()[3];
			int h = bicInputBlob->getTensorDesc().getDims()[2];

			cv::Mat resized;
			cv::resize(img, resized, cv::Size(w, h), 0, 0, cv::INTER_CUBIC);
			matU8ToBlob<float_t>(resized, bicInputBlob, i);
		}
	}
	// --------------------------- 7. Do inference 做推理--------------------------------------------------------
	infer_request.Infer();
	// --------------------------- 8. Process output流程输出 ------------------------------------------------------
	Blob::Ptr output = infer_request.GetBlob(firstOutputName);
	LockedMemory<const void> outputBlobMapped = as<MemoryBlob>(output)->rmap();
	const auto outputData = outputBlobMapped.as<float*>();
	size_t numOfImages = output->getTensorDesc().getDims()[0];
	size_t numOfChannels = output->getTensorDesc().getDims()[1];
	size_t h = output->getTensorDesc().getDims()[2];
	size_t w = output->getTensorDesc().getDims()[3];
	size_t nunOfPixels = w * h;
	std::cout << "Output size [N,C,H,W]: " << numOfImages << ", " << numOfChannels << ", " << h << ", " << w << std::endl;
	for (size_t i = 0; i < numOfImages; ++i) {
		std::vector<cv::Mat> imgPlanes;
		if (numOfChannels == 3) {
			imgPlanes = std::vector<cv::Mat>{
				  cv::Mat(h, w, CV_32FC1, &(outputData[i * nunOfPixels * numOfChannels])),
				  cv::Mat(h, w, CV_32FC1, &(outputData[i * nunOfPixels * numOfChannels + nunOfPixels])),
				  cv::Mat(h, w, CV_32FC1, &(outputData[i * nunOfPixels * numOfChannels + nunOfPixels * 2])) };
		}
		else {
			imgPlanes = std::vector<cv::Mat>{ cv::Mat(h, w, CV_32FC1, &(outputData[i * nunOfPixels * numOfChannels])) };
			cv::threshold(imgPlanes[0], imgPlanes[0], 0.5f, 1.0f, cv::THRESH_BINARY);
		};

		for (auto & img : imgPlanes)
			img.convertTo(img, CV_8UC1, 255);
		cv::Mat resultImg;
		cv::merge(imgPlanes, resultImg);	
		cv::imshow("result", resultImg);
		
	}
	
	std::cout << "程序完成啦!" << std::endl;
	cv::waitKey(0);
	return EXIT_SUCCESS;
}



  • 2
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
手把手讲授如何搭建成功OpenVINO框架,并且使用预训练模型快速开发超分辨率、道路分割、汽车识别、人脸识别、人体姿态和行人车辆分析。得益于OpenVINO框架的强大能力,这些例子都能够基于CPU达到实时帧率。课程的亮点在于在调通Demo的基础上更进一步:一是在讲Demo的时候,对相关领域问题进行分析(比如介绍什么是超分辨率,有什么作用)、预训练模型的来龙去脉(来自那篇论文,用什么训练的)、如何去查看不同模型的输入输出参数、如何编写对应的接口参数进行详细讲解;二是基本上对所有的代码进行重构,也就是能够让例子独立出来,并且给出了带有较详细注释的代码;三是注重实际运用,将Demo进一步和实时视频处理框架融合,形成能够独立运行的程序,方便模型落地部署;四是重难点突出、注重总结归纳,对OpenVINO基本框架,特别是能够提高视频处理速度的异步机制和能够直接部署解决实际问题的骨骼模型着重讲解,帮助学习理解;五是整个课程准备精细,每一课都避免千篇一律,前一课有对后一课的预告,后一课有对前一课的难点回顾,避免学习过程中出现突兀;六是在适当的时候拓展衍生,不仅讲OpenVINO解决图像处理问题,而且还补充图像处理的软硬选择、如何在手机上开发图像处理程序等内容,帮助拓展视野,增强对行业现状的了解。基本提纲:1、课程综述、环境配置2、OpenVINO范例-超分辨率(super_resolution_demo)3、OpenVINO范例-道路分割(segmentation_demo)4、OpenVINO范例-汽车识别(security_barrier_camera_demo)5、OpenVINO范例-人脸识别(interactive_face_detection_demo)6、OpenVINO范例-人体姿态分析(human_pose_estimation_demo)7、OpenVINO范例-行人车辆分析(pedestrian_tracker_demo)8、NCS和GOMFCTEMPLATE9、课程小结,资源分享
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值