OpenVINO+CNN(C++)

#pragma once
#include <core/core.hpp>
#include <highgui/highgui.hpp>
#include <imgcodecs/imgcodecs.hpp>
#include <imgproc/imgproc.hpp>
#include <inference_engine.hpp>
#include <ie_extension.h>
#include <ie_blob.h>
#include <ie_api.h>

using namespace std;
using namespace cv;
using namespace InferenceEngine;
int main() {
	string modepayh = "D:\\program\\vs\\OpenVINO\\OpenVINO\\resNet50.onnx";
	string imagepath = "C:\\Users\\chwb\\Downloads\\7335\\B2F.K\\124143165.png";
	//read image
	cv::Mat im = cv::imread(imagepath);
	cv::Mat image;
	cv::resize(im, image, cv::Size(224, 56));
	cout << "image loaded" << endl;
	// 读取网络图
	Core ie;
	CNNNetwork network = ie.ReadNetwork(modepayh);
	// 请求网络输入信息
	InputInfo::Ptr input_info = network.getInputsInfo().begin()->second;
	std::string input_name = network.getInputsInfo().begin()->first;
	// 设置输入精度
	input_info->getPreProcess().setResizeAlgorithm(RESIZE_BILINEAR);
	input_info->setLayout(Layout::NCHW);
	input_info->setPrecision(Precision::U8);
	cout << input_name << endl;
	// 请求网络输入信息
	DataPtr output_info = network.getOutputsInfo().begin()->second;
	std::string output_name = network.getOutputsInfo().begin()->first;
	// 设置输出精度与内容
	output_info->setPrecision(Precision::FP32);
	// 加载可执行网络对象
	ExecutableNetwork executable_network = ie.LoadNetwork(network, "CPU");
	// 创建推断图
	InferRequest infer_request = executable_network.CreateInferRequest();
	// 加载预分类数据
	InferenceEngine::TensorDesc tDesc(
		InferenceEngine::Precision::U8,
		{ 1, 3, 224, 56 },
		InferenceEngine::Layout::NCHW);
	Blob::Ptr imgBlob = InferenceEngine::make_shared_blob<uint8_t>(tDesc, image.data);
	infer_request.SetBlob(input_name, imgBlob);
	// 向后推理预测
	clock_t time_start = clock();
	infer_request.Infer();
	clock_t time_end = clock();
	cout << "infer time is:" << 1000 * (time_end - time_start) / (double)CLOCKS_PER_SEC << "ms" << endl;
	// 获取推理预测的结果
	OutputsDataMap outputsInfo(network.getOutputsInfo());
	const float* output = infer_request.GetBlob(output_name)->buffer().as<PrecisionTrait<Precision::FP32>::value_type*>();
	for (int i = 0; i < 3; i++) {
		cout << output[i] << endl;
	}
	return 0;
}

onnx 转 IR

python ./mo.py --input_model models/resnet50.onnx --output_dir models/resnet50_retrival  --reverse_input_channels --input_shape [1,3,224,56] --mean_values  [0.485,0.456,0.406] --scale_values [0.229,0.224,0.225]
#pragma once
#include <core/core.hpp>
#include <highgui/highgui.hpp>
#include <imgcodecs/imgcodecs.hpp>
#include <imgproc/imgproc.hpp>
#include <inference_engine.hpp>
#include <ie_extension.h>
#include <ie_blob.h>
#include <ie_api.h>
using namespace std;
using namespace cv;
using namespace InferenceEngine;
void matU8ToBlob(const cv::Mat& orig_image, InferenceEngine::Blob::Ptr& blob) {
	InferenceEngine::SizeVector blobSize = blob->getTensorDesc().getDims();
	const size_t width = blobSize[3];
	const size_t height = blobSize[2];
	const size_t channels = blobSize[1];
	InferenceEngine::MemoryBlob::Ptr mblob = InferenceEngine::as<InferenceEngine::MemoryBlob>(blob);
	if (!mblob) {
		IE_THROW() << "We expect blob to be inherited from MemoryBlob in matU8ToBlob, "
			<< "but by fact we were not able to cast inputBlob to MemoryBlob";
	}
	// locked memory holder should be alive all time while access to its buffer happens
	auto mblobHolder = mblob->wmap();
	float* blob_data = mblobHolder.as<float*>();
	cv::Mat resized_image(orig_image);
	if (static_cast<int>(width) != orig_image.size().width || static_cast<int>(height) != orig_image.size().height) {
		cv::resize(orig_image, resized_image, cv::Size(width, height));
	}
	size_t image_size = width * height;
	//float* blob_data = static_cast<float*>(input_blot->buffer());
	for (size_t row = 0; row < height; row++) {
		for (size_t col = 0; col < width; col++) {
			for (size_t ch = 0; ch < channels; ch++) {
				blob_data[image_size * ch + row * width + col] = resized_image.at<cv::Vec3f>(row, col)[ch];

			}
		}
	}
}
//对每一行进行softmax
void softmax(float* x, int row, int col)
{
	for (int j = 0; j < row; ++j)
	{
		float max = 0.0;
		float sum = 0.0;
		for (int k = 0; k < col; ++k)
			if (max < x[k + j * col])
				max = x[k + j * col];
		for (int k = 0; k < col; ++k)
		{
			x[k + j * col] = exp(x[k + j * col] - max);    // prevent data overflow
			sum += x[k + j * col];
		}
		for (int k = 0; k < col; ++k) x[k + j * col] /= sum;
	}
}   //row*col

int main() {
	// 配置推理计算设备,IR文件路径,图片路径,阈值和标签
	string labels[3] = { "B", "C" };
	string DEVICE = "CPU";
	/*string xml = "D:\\program\\vs\\OpenVINO\\OpenVINO\\resnet.xml";
	string bin = "D:\\program\\vs\\OpenVINO\\OpenVINO\\resnet.bin";*/
	string onnx = "D:\\program\\vs\\OpenVINO\\OpenVINO\\resnet.onnx";
	string imgPath = "D:\\Data\\leaf_photos\\B\\072052902.png";
	string imgDir = "D:\\Data\\leaf_photos\\C";
	//初始化IE,查询支持硬件设备
	InferenceEngine::Core ie;
	vector<string> availableDevices = ie.GetAvailableDevices(); 
	for (int i = 0; i < availableDevices.size(); i++) {
		printf("supported device name : %s \n", availableDevices[i].c_str());
	}
	//加载ResNet18网络
	InferenceEngine::CNNNetwork network = ie.ReadNetwork(onnx);
	InferenceEngine::InputsDataMap inputs = network.getInputsInfo();
	InferenceEngine::OutputsDataMap outputs = network.getOutputsInfo();
	//获取输入与输出名称、设置输入与输出数据格式
	std::string input_name = "";
	for (auto item : inputs) {
		input_name = item.first;
		auto input_data = item.second;
		input_data->setPrecision(Precision::FP32);
		input_data->setLayout(Layout::NCHW);
		std::cout << "input name: " << input_name << std::endl;
	}
	std::string output_name = "";
	for (auto item : outputs) {
		output_name = item.first;
		auto output_data = item.second;
		output_data->setPrecision(Precision::FP32);
		std::cout << "output name: " << output_name << std::endl;
	}
	//获取推理请求对象实例
	auto executable_network = ie.LoadNetwork(network, DEVICE);
	auto infer_request = executable_network.CreateInferRequest();
	//输入图像数据设置
	Blob::Ptr input_blot = infer_request.GetBlob(input_name);
	cv::Mat image;
 	vector<string> imgPaths, imgNames;
	cv::glob(imgDir, imgPaths, true);
	for (int i = 0; i < imgPaths.size(); ++i) {
		cv::Mat image = cv::imread(imgPaths[i]);
		if (image.empty()) {
			cout << "img is not exist!" << endl; 
		}
		cv::cvtColor(image, image, cv::COLOR_BGR2RGB);
		image.convertTo(image, CV_32F);
		image = image / 255.0;
		cv::subtract(image, cv::Scalar(0.485, 0.456, 0.406), image);
		cv::divide(image, cv::Scalar(0.229, 0.224, 0.225), image);
		matU8ToBlob(image, input_blot);
		//推理
		infer_request.Infer();
		auto output = infer_request.GetBlob(output_name);
		float* probs = static_cast<PrecisionTrait<Precision::FP32>::value_type*>(output->buffer());
		const SizeVector outputDims = output->getTensorDesc().getDims();
		cout << probs[0] << " " << probs[1] << endl;
		softmax(probs, outputDims[0], outputDims[1]);
		float max = probs[0];
		int max_index = 0;
		for (int i = 0; i < outputDims[1]; i++) {
			//cout << probs[i] << " ";
			if (max < probs[i]) {
				max = probs[i];
				max_index = i;
			}
		}
		cout << "pb:" << probs[0] << ", " << "pc:" << probs[1] << " " << "Class:" << labels[max_index] << endl;
		//cout << endl;
		//cv::putText(src, labels[max_index], cv::Point(50, 50), cv::FONT_HERSHEY_SIMPLEX, 1.0, cv::Scalar(0, 0, 255), 2, 8);
		//cv::imshow("输入图像", src);
		//cv::waitKey(0);
	}
	return 0;
}

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值