// ForOpenvinoTest.cpp : 此文件包含 "main" 函数。程序执行将在此处开始并结束。
//
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include <memory>
#include <string>
#include <samples/common.hpp>
#include <windows.h>
#include <wchar.h>
#include <inference_engine.hpp>
#include <samples/ocv_common.hpp>
#include <samples/classification_results.h>
using namespace InferenceEngine;
#if defined(ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32)
#define tcout std::wcout
#define file_name_t std::wstring
#define imread_t imreadW
#define ClassificationResult_t ClassificationResultW
#else
#define tcout std::cout
#define file_name_t std::string
#define imread_t cv::imread
#define ClassificationResult_t ClassificationResult
#endif
#if defined(ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32)
cv::Mat imreadW(std::wstring input_image_path) {
cv::Mat image;
std::ifstream input_image_stream;
input_image_stream.open(
input_image_path.c_str(),
std::iostream::binary | std::ios_base::ate | std::ios_base::in);
if (input_image_stream.is_open()) {
if (input_image_stream.good()) {
std::size_t file_size = input_image_stream.tellg();
input_image_stream.seekg(0, std::ios::beg);
std::vector<char> buffer(0);
std::copy(
std::istream_iterator<char>(input_image_stream),
std::istream_iterator<char>(),
std::back_inserter(buffer));
image = cv::imdecode(cv::Mat(1, file_size, CV_8UC1, &buffer[0]), cv::IMREAD_COLOR);
}
else {
tcout << "Input file '" << input_image_path << "' processing error" << std::endl;
}
input_image_stream.close();
}
else {
tcout << "Unable to read input file '" << input_image_path << "'" << std::endl;
}
return image;
}
std::string simpleConvert(const std::wstring & wstr) {
std::string str;
for (auto && wc : wstr)
str += static_cast<char>(wc);
return str;
}
std::wstring StringToWString(const std::string& str)
{
int num = MultiByteToWideChar(CP_UTF8, 0, str.c_str(), -1, NULL, 0);
wchar_t *wide = new wchar_t[num];
MultiByteToWideChar(CP_UTF8, 0, str.c_str(), -1, wide, num);
std::wstring w_str(wide);
delete[] wide;
return w_str;
}
//int wmain(int argc, char *argv[]) {
//#else
int main(int argc) {
#endif
try {
const file_name_t input_model{ StringToWString("C:/Program Files (x86)/Intel/openvino_2021.2.185/deployment_tools/tools/model_downloader/public/alexnet/alexnet.xml") };
const file_name_t input_image_path{ StringToWString("C:/Users/sjsys/Desktop/1/3.bmp") };
#if defined(ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32)
const std::string device_name = simpleConvert(StringToWString("CPU"));
#else
const std::string device_name{ StringToWString("CPU") };
#endif
// -----------------------------------------------------------------------------------------------------
// --------------------------- 1. Load inference engine instance -------------------------------------
Core ie;
// -----------------------------------------------------------------------------------------------------
// 2. Read a model in OpenVINO Intermediate Representation (.xml and .bin files) or ONNX (.onnx file) format
CNNNetwork network = ie.ReadNetwork(input_model);
if (network.getOutputsInfo().size() != 1) throw std::logic_error("Sample supports topologies with 1 output only");
if (network.getInputsInfo().size() != 1) throw std::logic_error("Sample supports topologies with 1 input only");
// -----------------------------------------------------------------------------------------------------
// --------------------------- 3. Configure input & output ---------------------------------------------
// --------------------------- Prepare input blobs -----------------------------------------------------
InputInfo::Ptr input_info = network.getInputsInfo().begin()->second;
std::string input_name = network.getInputsInfo().begin()->first;
/* Mark input as resizable by setting of a resize algorithm.
* In this case we will be able to set an input blob of any shape to an infer request.
* Resize and layout conversions are executed automatically during inference */
input_info->getPreProcess().setResizeAlgorithm(RESIZE_BILINEAR);
input_info->setLayout(Layout::NHWC);
input_info->setPrecision(Precision::U8);
// --------------------------- Prepare output blobs ----------------------------------------------------
DataPtr output_info = network.getOutputsInfo().begin()->second;
std::string output_name = network.getOutputsInfo().begin()->first;
output_info->setPrecision(Precision::FP32);
// -----------------------------------------------------------------------------------------------------
// --------------------------- 4. Loading model to the device ------------------------------------------
ExecutableNetwork executable_network = ie.LoadNetwork(network, device_name);
// -----------------------------------------------------------------------------------------------------
// --------------------------- 5. Create infer request -------------------------------------------------
InferRequest infer_request = executable_network.CreateInferRequest();
// -----------------------------------------------------------------------------------------------------
// --------------------------- 6. Prepare input --------------------------------------------------------
/* Read input image to a blob and set it to an infer request without resize and layout conversions. */
cv::Mat image = imread_t(input_image_path);
Blob::Ptr imgBlob = wrapMat2Blob(image); // just wrap Mat data by Blob::Ptr without allocating of new memory
infer_request.SetBlob(input_name, imgBlob); // infer_request accepts input blob of any size
// -----------------------------------------------------------------------------------------------------
// --------------------------- 7. Do inference --------------------------------------------------------
/* Running the request synchronously */
infer_request.Infer();
// -----------------------------------------------------------------------------------------------------
// --------------------------- 8. Process output ------------------------------------------------------
Blob::Ptr output = infer_request.GetBlob(output_name);
// Print classification results
ClassificationResult_t classificationResult(output, { input_image_path });
classificationResult.print();
// -----------------------------------------------------------------------------------------------------
}
catch (const std::exception & ex) {
std::cerr << ex.what() << std::endl;
return EXIT_FAILURE;
}
std::cout << "This sample is an API example, for any performance measurements "
"please use the dedicated benchmark_app tool" << std::endl;
return EXIT_SUCCESS;
}
//#include <iostream>
//#include <inference_engine.hpp>
//int main()
//{
// InferenceEngine::Core core;
// InferenceEngine::CNNNetwork network;
// InferenceEngine::ExecutableNetwork executable_network;
// network = core.ReadNetwork("C:/Users/sjsys/Desktop/1/single-image-super-resolution-1033.xml");
//
// InferenceEngine::InputsDataMap input_info = network.getInputsInfo();
//
// InferenceEngine::OutputsDataMap output_info = network.getOutputsInfo();
// executable_network = core.LoadNetwork(network, "CPU");
// auto infer_request = executable_network.CreateInferRequest();
//}
// 运行程序: Ctrl + F5 或调试 >“开始执行(不调试)”菜单
// 调试程序: F5 或调试 >“开始调试”菜单
// 入门使用技巧:
// 1. 使用解决方案资源管理器窗口添加/管理文件
// 2. 使用团队资源管理器窗口连接到源代码管理
// 3. 使用输出窗口查看生成输出和其他消息
// 4. 使用错误列表窗口查看错误
// 5. 转到“项目”>“添加新项”以创建新的代码文件,或转到“项目”>“添加现有项”以将现有代码文件添加到项目
// 6. 将来,若要再次打开此项目,请转到“文件”>“打开”>“项目”并选择 .sln 文件
OpenVino精简demo代码Hello Classification C++ Sample
最新推荐文章于 2023-06-25 21:00:00 发布