OpenVINO2020.4 人脸分析系列(一):人脸检测C++ Demo

介绍

OpenVINO官方Demo中拆解出来的人脸检测C++代码,用一个main函数实现模型的加载、推理、数据处理等过程,便于理解OpenVINO进行Inference的整个流程。

代码

人脸检测C++ main函数。

#include <iostream>
#include <inference_engine.hpp>
#include <samples/ocv_common.hpp>
#include <time.h>
using namespace InferenceEngine;

// Function "frameToBlob": Transform the CV::Mat image to face 
void frameToBlob(const cv::Mat& frame, InferRequest::Ptr& inferRequest, const std::string& inputName) {
	Blob::Ptr frameBlob = inferRequest->GetBlob(inputName);
	matU8ToBlob<uint8_t>(frame, frameBlob);
}

// Super params: Config the inference device, IR path, image path, thresholds and labels
std::string DEVICE = "CPU";
std::string faceDetIRFile = "D:/B.WorkData/face-detection-adas-0001.xml";
std::string imageFile = "D:/B.WorkData/0.jpg";
float faceDetThreshold = 0.5; // Face detection confidence, (0,1) 
float bb_enlarge_coefficient = static_cast<float>(1.2);
float bb_dx_coefficient = static_cast<float>(1);
float bb_dy_coefficient = static_cast<float>(1);

// Define the face detction result struct
struct FaceResult {
	int label;
	float confidence;
	cv::Rect location;
};

int main(void) {
	std::cout << "Openvino Face detection inference process" << std::endl;
	// --------------------------- 1. Loading Plugin ----------------------------------------------
	Core ie;
	std::cout << "1.Load Plugin..." << std::endl;     
	std::cout << ie.GetVersions(DEVICE) << std::endl; //OpenVINO Inference Engine version information
	// --------------------------------------------------------------------------------------------

	// ------------------- 2. Reading IR files (.xml and .bin files) ------------------------------
	std::cout << "2.Read IR File..." << std::endl;
	auto faceDetNetwork = ie.ReadNetwork(faceDetIRFile);
	size_t batchSize = 1;
	faceDetNetwork.setBatchSize(batchSize);
	// --------------------------------------------------------------------------------------------

	// -------------------- 3. Check the face detection network input and output ------------------
	std::cout << "3.Prepare Input and Output..." << std::endl;
	/** Check input informatino **/
	InputsDataMap faceInputsInfo(faceDetNetwork.getInputsInfo());

	InputInfo::Ptr faceInputInfoSecond = faceInputsInfo.begin()->second;
	faceInputInfoSecond->setPrecision(Precision::U8);
	const SizeVector inputDims = faceInputInfoSecond->getTensorDesc().getDims();
	float face_network_input_height = inputDims[2];
	float face_network_input_width = inputDims[3];
	std::string faceInputName = faceInputsInfo.begin()->first;

	/** check output information **/
	std::string faceOutputName;
	int maxProposalCount;
	int objectSize;
	std::string labels_output;
	OutputsDataMap outputInfo(faceDetNetwork.getOutputsInfo());
	if (outputInfo.size() == 1) {
		DataPtr& _output = outputInfo.begin()->second;
		faceOutputName = outputInfo.begin()->first;
		const SizeVector outputDims = _output->getTensorDesc().getDims();
		maxProposalCount = outputDims[2];
		objectSize = outputDims[3];
		if (objectSize != 7) {
			throw std::logic_error("Face Detection network output layer should have 7 as a last dimension");
		}
		if (outputDims.size() != 4) {
			throw std::logic_error("Face Detection network output dimensions not compatible shoulld be 4, but was " +
				std::to_string(outputDims.size()));
		}
		_output->setPrecision(Precision::FP32);
	}
	else {
		for (const auto& outputLayer : outputInfo) {
			const SizeVector outputDims = outputLayer.second->getTensorDesc().getDims();
			if (outputDims.size() == 2 && outputDims.back() == 5) {
				faceOutputName = outputLayer.first;
				maxProposalCount = outputDims[0];
				objectSize = outputDims.back();
				outputLayer.second->setPrecision(Precision::FP32);
			}
			else if (outputDims.size() == 1 && outputLayer.second->getPrecision() == Precision::I32) {
				labels_output = outputLayer.first;
			}
		}
	}
	// --------------------------------------------------------------------------------------------

	// --------------------------- 4. Loading face detection model to Inference device-------------
	std::cout << "4.Load model into device..." << std::endl;
	std::map<std::string, std::string> config = { };
	config[PluginConfigParams::KEY_CPU_THREADS_NUM] = "1";  // Config cpu threads nums
	ExecutableNetwork face_executable_network = ie.LoadNetwork(faceDetNetwork, DEVICE, config);
	// --------------------------------------------------------------------------------------------

	// --------------------------- 5. Create infer request-----------------------------------------
	std::cout << "5.Create Infer Request..." << std::endl;
	InferRequest::Ptr face_infer_request = face_executable_network.CreateInferRequestPtr();
	// --------------------------------------------------------------------------------------------

	// --------------------------- 6. Prepare input data ------------------------------------------
	std::cout << "6.Prepare Input..." << std::endl;
	cv::Mat img = cv::imread(imageFile);
	frameToBlob(img, face_infer_request, faceInputName);
	const size_t inputWidth = (size_t)img.cols;
	const size_t inputHeight = (size_t)img.rows;
	cv::Rect imgRect(0, 0, inputWidth, inputHeight);
	// --------------------------------------------------------------------------------------------

	// --------------------------- 7. Inference ---------------------------------------------------
	std::cout << "7.Start inference..." << std::endl;
	face_infer_request->Infer();
	// --------------------------------------------------------------------------------------------
		
	// --------------------------- 8. Postprocessing ----------------------------------------------
	std::vector<FaceResult> results;
	std::cout << "8.Process output blobs..." << std::endl;
	LockedMemory<const void> outputMapped = as<MemoryBlob>(face_infer_request->GetBlob(faceOutputName))->rmap();
	const float* detections = outputMapped.as<float*>();

	if (!labels_output.empty()) {
		LockedMemory<const void> labelsMapped = as<MemoryBlob>(face_infer_request->GetBlob(labels_output))->rmap();
		const int32_t* labels = labelsMapped.as<int32_t*>();

		for (int i = 0; i < maxProposalCount && objectSize == 5; i++) {
			FaceResult r;
			r.label = labels[i];
			r.confidence = detections[i * objectSize + 4];

			if (r.confidence <= faceDetThreshold) {
				continue;
			}

			r.location.x = static_cast<int>(detections[i * objectSize + 0] / face_network_input_width * inputWidth);
			r.location.y = static_cast<int>(detections[i * objectSize + 1] / face_network_input_height * inputHeight);
			r.location.width = static_cast<int>(detections[i * objectSize + 2] / face_network_input_width * inputWidth - r.location.x);
			r.location.height = static_cast<int>(detections[i * objectSize + 3] / face_network_input_height * inputHeight - r.location.y);

			if (r.confidence > faceDetThreshold) {
				results.push_back(r);
			}
		}
	}

	for (int i = 0; i < maxProposalCount && objectSize == 7; i++) {
		float image_id = detections[i * objectSize + 0];
		if (image_id < 0) {
			break;
		}
		FaceResult r;
		r.label = static_cast<int>(detections[i * objectSize + 1]);
		r.confidence = detections[i * objectSize + 2];

		if (r.confidence <= faceDetThreshold) {
			continue;
		}

		r.location.x = static_cast<int>(detections[i * objectSize + 3] * inputWidth);
		r.location.y = static_cast<int>(detections[i * objectSize + 4] * inputHeight);
		r.location.width = static_cast<int>(detections[i * objectSize + 5] * inputWidth - r.location.x);
		r.location.height = static_cast<int>(detections[i * objectSize + 6] * inputHeight - r.location.y);

	    if (r.confidence > faceDetThreshold) {
			results.push_back(r);
		}
	}
	// Draw results on input image
	for (int i = 0; i < results.size(); ++i)
		cv::rectangle(img, results[i].location & imgRect, cv::Scalar(0,255,0),1,8,0);
	cv::namedWindow("face", cv::WINDOW_AUTOSIZE);
	cv::imshow("face", img);
	cv::waitKey(0);
	return 0;
}

结果

人脸检测结果如下。VS调整生成模式为x64 Release模式,在CPU上体验前所未有的快!
人脸加测结果

参考

[1]https://docs.openvinotoolkit.org/latest/omz_models_intel_face_detection_adas_0001_description_face_detection_adas_0001.html

  • 1
    点赞
  • 7
    收藏
    觉得还不错? 一键收藏
  • 3
    评论
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值