openvino cpp使用

英特尔OpenVINO使用入门(C++集成方式)_阿飞__的博客-CSDN博客_intel openvino

OpenVino学习2 --- Demo篇_P_B_Y的博客-CSDN博客

1.官方文档示例

①   /opt/intel/openvino_2021.4.689/inference_engine/samples/cpp/hello_classification

// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include <samples/classification_results.h>

#include <inference_engine.hpp>
#include <iterator>
#include <memory>
#include <samples/common.hpp>
#include <samples/ocv_common.hpp>
#include <string>
#include <vector>

using namespace InferenceEngine;

/**
 * @brief Define names based depends on Unicode path support
 */
#if defined(ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32)
    #define tcout                  std::wcout
    #define file_name_t            std::wstring
    #define imread_t               imreadW
    #define ClassificationResult_t ClassificationResultW
#else
    #define tcout                  std::cout
    #define file_name_t            std::string
    #define imread_t               cv::imread
    #define ClassificationResult_t ClassificationResult
#endif

#if defined(ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32)
/**
 * @brief Realization cv::imread with support Unicode paths
 */
cv::Mat imreadW(std::wstring input_image_path) {
    cv::Mat image;
    std::ifstream input_image_stream;
    input_image_stream.open(input_image_path.c_str(), std::iostream::binary | std::ios_base::ate | std::ios_base::in);
    if (input_image_stream.is_open()) {
        if (input_image_stream.good()) {
            input_image_stream.seekg(0, std::ios::end);
            std::size_t file_size = input_image_stream.tellg();
            input_image_stream.seekg(0, std::ios::beg);
            std::vector<char> buffer(0);
            std::copy(std::istreambuf_iterator<char>(input_image_stream), std::istreambuf_iterator<char>(), std::back_inserter(buffer));
            image = cv::imdecode(cv::Mat(1, file_size, CV_8UC1, &buffer[0]), cv::IMREAD_COLOR);
        } else {
            tcout << "Input file '" << input_image_path << "' processing error" << std::endl;
        }
        input_image_stream.close();
    } else {
        tcout << "Unable to read input file '" << input_image_path << "'" << std::endl;
    }
    return image;
}

/**
 * @brief Convert wstring to string
 * @param ref on wstring
 * @return string
 */
std::string simpleConvert(const std::wstring& wstr) {
    std::string str;
    for (auto&& wc : wstr)
        str += static_cast<char>(wc);
    return str;
}

/**
 * @brief Main with support Unicode paths, wide strings
 */
int wmain(int argc, wchar_t* argv[]) {
#else

int main(int argc, char* argv[]) {
#endif
    try {
        // ------------------------------ Parsing and validation of input arguments
        // ---------------------------------
        if (argc != 4) {
            tcout << "Usage : " << argv[0] << " <path_to_model> <path_to_image> <device_name>" << std::endl;
            return EXIT_FAILURE;
        }

        const file_name_t input_model {argv[1]};
        const file_name_t input_image_path {argv[2]};
#if defined(ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32)
        const std::string device_name = simpleConvert(argv[3]);
#else
        const std::string device_name {argv[3]};
#endif
        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Step 1. Initialize inference engine core
        // -------------------------------------
        Core ie;
        // -----------------------------------------------------------------------------------------------------

        // Step 2. Read a model in OpenVINO Intermediate Representation (.xml and
        // .bin files) or ONNX (.onnx file) format
        CNNNetwork network = ie.ReadNetwork(input_model);
        if (network.getOutputsInfo().size() != 1)
            throw std::logic_error("Sample supports topologies with 1 output only");
        if (network.getInputsInfo().size() != 1)
            throw std::logic_error("Sample supports topologies with 1 input only");
        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Step 3. Configure input & output
        // ---------------------------------------------
        // --------------------------- Prepare input blobs
        // -----------------------------------------------------
        InputInfo::Ptr input_info = network.getInputsInfo().begin()->second;
        std::string input_name = network.getInputsInfo().begin()->first;

        /* Mark input as resizable by setting of a resize algorithm.
         * In this case we will be able to set an input blob of any shape to an
         * infer request. Resize and layout conversions are executed automatically
         * during inference */
        input_info->getPreProcess().setResizeAlgorithm(RESIZE_BILINEAR);
        input_info->setLayout(Layout::NHWC);
        input_info->setPrecision(Precision::U8);

        // --------------------------- Prepare output blobs
        // ----------------------------------------------------
        if (network.getOutputsInfo().empty()) {
            std::cerr << "Network outputs info is empty" << std::endl;
            return EXIT_FAILURE;
        }
        DataPtr output_info = network.getOutputsInfo().begin()->second;
        std::string output_name = network.getOutputsInfo().begin()->first;

        output_info->setPrecision(Precision::FP32);
        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Step 4. Loading a model to the device
        // ------------------------------------------
        ExecutableNetwork executable_network = ie.LoadNetwork(network, device_name);
        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Step 5. Create an infer request
        // -------------------------------------------------
        InferRequest infer_request = executable_network.CreateInferRequest();
        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Step 6. Prepare input
        // --------------------------------------------------------
        /* Read input image to a blob and set it to an infer request without resize
         * and layout conversions. */
        cv::Mat image = imread_t(input_image_path);
        Blob::Ptr imgBlob = wrapMat2Blob(image);     // just wrap Mat data by Blob::Ptr
                                                     // without allocating of new memory
        infer_request.SetBlob(input_name, imgBlob);  // infer_request accepts input blob of any size
        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Step 7. Do inference
        // --------------------------------------------------------
        /* Running the request synchronously */
        infer_request.Infer();
        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Step 8. Process output
        // ------------------------------------------------------
        Blob::Ptr output = infer_request.GetBlob(output_name);
        // Print classification results
        ClassificationResult_t classificationResult(output, {input_image_path});
        classificationResult.print();
        // -----------------------------------------------------------------------------------------------------
    } catch (const std::exception& ex) {
        std::cerr << ex.what() << std::endl;
        return EXIT_FAILURE;
    }
    std::cout << "This sample is an API example, for any performance measurements "
                 "please use the dedicated benchmark_app tool"
              << std::endl;
    return EXIT_SUCCESS;
}

②  /opt/intel/openvino_2021.4.689/inference_engine/samples/cpp/hello_query_device

// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include <cstdlib>
#include <ie_plugin_config.hpp>
#include <inference_engine.hpp>
#include <iomanip>
#include <memory>
#include <samples/common.hpp>
#include <set>
#include <string>
#include <tuple>
#include <vector>

using namespace InferenceEngine;

namespace {
/**
 * @brief Overload output stream operator to print vectors in pretty form
 * [value1, value2, ...]
 */
template <typename T>
std::ostream& operator<<(std::ostream& stream, const std::vector<T>& v) {
    stream << "[ ";
    for (auto&& value : v)
        stream << value << " ";
    return stream << "]";
}

/**
 * @brief Print IE Parameters
 * @param reference on IE Parameter
 * @return void
 */
void printParameterValue(const Parameter& value) {
    if (value.empty()) {
        std::cout << "EMPTY VALUE" << std::endl;
    } else if (value.is<bool>()) {
        std::cout << std::boolalpha << value.as<bool>() << std::noboolalpha << std::endl;
    } else if (value.is<int>()) {
        std::cout << value.as<int>() << std::endl;
    } else if (value.is<unsigned int>()) {
        std::cout << value.as<unsigned int>() << std::endl;
    } else if (value.is<uint64_t>()) {
        std::cout << value.as<uint64_t>() << std::endl;
    } else if (value.is<float>()) {
        std::cout << value.as<float>() << std::endl;
    } else if (value.is<std::string>()) {
        std::string stringValue = value.as<std::string>();
        std::cout << (stringValue.empty() ? "\"\"" : stringValue) << std::endl;
    } else if (value.is<std::vector<std::string>>()) {
        std::cout << value.as<std::vector<std::string>>() << std::endl;
    } else if (value.is<std::vector<int>>()) {
        std::cout << value.as<std::vector<int>>() << std::endl;
    } else if (value.is<std::vector<float>>()) {
        std::cout << value.as<std::vector<float>>() << std::endl;
    } else if (value.is<std::vector<unsigned int>>()) {
        std::cout << value.as<std::vector<unsigned int>>() << std::endl;
    } else if (value.is<std::tuple<unsigned int, unsigned int, unsigned int>>()) {
        auto values = value.as<std::tuple<unsigned int, unsigned int, unsigned int>>();
        std::cout << "{ ";
        std::cout << std::get<0>(values) << ", ";
        std::cout << std::get<1>(values) << ", ";
        std::cout << std::get<2>(values);
        std::cout << " }";
        std::cout << std::endl;
    } else if (value.is<Metrics::DeviceType>()) {
        auto v = value.as<Metrics::DeviceType>();
        std::cout << v << std::endl;
    } else if (value.is<std::map<InferenceEngine::Precision, float>>()) {
        auto values = value.as<std::map<InferenceEngine::Precision, float>>();
        std::cout << "{ ";
        for (auto& kv : values) {
            std::cout << kv.first << ": " << kv.second << "; ";
        }
        std::cout << " }";
        std::cout << std::endl;
    } else if (value.is<std::tuple<unsigned int, unsigned int>>()) {
        auto values = value.as<std::tuple<unsigned int, unsigned int>>();
        std::cout << "{ ";
        std::cout << std::get<0>(values) << ", ";
        std::cout << std::get<1>(values);
        std::cout << " }";
        std::cout << std::endl;
    } else {
        std::cout << "UNSUPPORTED TYPE" << std::endl;
    }
}

}  // namespace

int main(int argc, char* argv[]) {
    try {
        // ------------------------------ Parsing and validation of input arguments
        // ---------------------------------
        if (argc != 1) {
            std::cout << "Usage : " << argv[0] << std::endl;
            return EXIT_FAILURE;
        }

        // --------------------------- Step 1. Initialize inference engine core
        // -------------------------------------
        std::cout << "Loading Inference Engine" << std::endl;
        Core ie;

        // --------------------------- Get list of available devices
        // -------------------------------------

        std::vector<std::string> availableDevices = ie.GetAvailableDevices();

        // --------------------------- Query and print supported metrics and config
        // keys--------------------

        std::cout << "Available devices: " << std::endl;
        for (auto&& device : availableDevices) {
            std::cout << device << std::endl;

            std::cout << "\tSUPPORTED_METRICS: " << std::endl;
            std::vector<std::string> supportedMetrics = ie.GetMetric(device, METRIC_KEY(SUPPORTED_METRICS));
            for (auto&& metricName : supportedMetrics) {
                if (metricName != METRIC_KEY(SUPPORTED_METRICS) && metricName != METRIC_KEY(SUPPORTED_CONFIG_KEYS)) {
                    std::cout << "\t\t" << metricName << " : " << std::flush;
                    printParameterValue(ie.GetMetric(device, metricName));
                }
            }

            if (std::find(supportedMetrics.begin(), supportedMetrics.end(), METRIC_KEY(SUPPORTED_CONFIG_KEYS)) != supportedMetrics.end()) {
                std::cout << "\tSUPPORTED_CONFIG_KEYS (default values): " << std::endl;
                std::vector<std::string> supportedConfigKeys = ie.GetMetric(device, METRIC_KEY(SUPPORTED_CONFIG_KEYS));
                for (auto&& configKey : supportedConfigKeys) {
                    std::cout << "\t\t" << configKey << " : " << std::flush;
                    printParameterValue(ie.GetConfig(device, configKey));
                }
            }

            std::cout << std::endl;
        }
    } catch (const std::exception& ex) {
        std::cerr << std::endl << "Exception occurred: " << ex.what() << std::endl << std::flush;
        return EXIT_FAILURE;
    }
    return EXIT_SUCCESS;
}

③ /opt/intel/openvino_2021.4.689/inference_engine/samples/cpp/hello_nv12_input_classification

// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include <samples/classification_results.h>
#include <sys/stat.h>

#include <fstream>
#include <inference_engine.hpp>
#include <iostream>
#include <memory>
#include <samples/common.hpp>
#include <samples/slog.hpp>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#ifdef _WIN32
    #include <samples/os/windows/w_dirent.h>
#else
    #include <dirent.h>
#endif

using namespace InferenceEngine;

/**
 * \brief Parse image size provided as string in format WIDTHxHEIGHT
 * @param string of image size in WIDTHxHEIGHT format
 * @return parsed width and height
 */
std::pair<size_t, size_t> parseImageSize(const std::string& size_string) {
    auto delimiter_pos = size_string.find("x");
    if (delimiter_pos == std::string::npos || delimiter_pos >= size_string.size() - 1 || delimiter_pos == 0) {
        std::stringstream err;
        err << "Incorrect format of image size parameter, expected WIDTHxHEIGHT, "
               "actual: "
            << size_string;
        throw std::runtime_error(err.str());
    }

    size_t width = static_cast<size_t>(std::stoull(size_string.substr(0, delimiter_pos)));
    size_t height = static_cast<size_t>(std::stoull(size_string.substr(delimiter_pos + 1, size_string.size())));

    if (width == 0 || height == 0) {
        throw std::runtime_error("Incorrect format of image size parameter, width "
                                 "and height must not be equal to 0");
    }

    if (width % 2 != 0 || height % 2 != 0) {
        throw std::runtime_error("Unsupported image size, width and height must be even numbers");
    }

    return {width, height};
}

// Comparing to samples/args_helper.hpp, this version filters files by ".yuv"
// extension
/**
 * @brief This function checks input args and existence of specified files in a
 * given folder
 * @param path path to a file to be checked for existence
 * @return files updated vector of verified input files
 */
std::vector<std::string> readInputFileNames(const std::string& path) {
    struct stat sb;
    if (stat(path.c_str(), &sb) != 0) {
        slog::warn << "File " << path << " cannot be opened!" << slog::endl;
        return {};
    }

    std::vector<std::string> files;

    if (S_ISDIR(sb.st_mode)) {
        DIR* dp = opendir(path.c_str());
        if (dp == nullptr) {
            slog::warn << "Directory " << path << " cannot be opened!" << slog::endl;
            return {};
        }

        for (struct dirent* ep = readdir(dp); ep != nullptr; ep = readdir(dp)) {
            std::string fileName = ep->d_name;
            if (fileName == "." || fileName == ".." || fileName.substr(fileName.size() - 4) != ".yuv")
                continue;
            files.push_back(path + "/" + ep->d_name);
        }
        closedir(dp);
    } else {
        files.push_back(path);
    }

    size_t max_files = 20;
    if (files.size() < max_files) {
        slog::info << "Files were added: " << files.size() << slog::endl;
        for (std::string filePath : files) {
            slog::info << "    " << filePath << slog::endl;
        }
    } else {
        slog::info << "Files were added: " << files.size() << ". Too many to display each of them." << slog::endl;
    }

    return files;
}

using UString = std::basic_string<uint8_t>;

/**
 * \brief Read image data from file
 * @param vector files paths
 * @param size of file paths vector
 * @return buffers containing the images data
 */
std::vector<UString> readImagesDataFromFiles(const std::vector<std::string>& files, size_t size) {
    std::vector<UString> result;

    for (const auto& image_path : files) {
        std::ifstream file(image_path, std::ios_base::ate | std::ios_base::binary);
        if (!file.good() || !file.is_open()) {
            std::stringstream err;
            err << "Cannot access input image file. File path: " << image_path;
            throw std::runtime_error(err.str());
        }

        const size_t file_size = file.tellg();
        if (file_size < size) {
            std::stringstream err;
            err << "Invalid read size provided. File size: " << file_size << ", to read: " << size;
            throw std::runtime_error(err.str());
        }
        file.seekg(0);

        UString data(size, 0);
        file.read(reinterpret_cast<char*>(&data[0]), size);
        result.push_back(std::move(data));
    }
    return result;
}

/**
 * @brief Read input image to blob
 * @param ref to input image data
 * @param width input image
 * @param height input image
 * @return blob point to hold the NV12 input data
 */
std::vector<Blob::Ptr> readInputBlobs(std::vector<UString>& data, size_t width, size_t height) {
    // read image with size converted to NV12 data size: height(NV12) = 3 / 2 *
    // logical height

    // Create tensor descriptors for Y and UV blobs
    const InferenceEngine::TensorDesc y_plane_desc(InferenceEngine::Precision::U8, {1, 1, height, width}, InferenceEngine::Layout::NHWC);
    const InferenceEngine::TensorDesc uv_plane_desc(InferenceEngine::Precision::U8, {1, 2, height / 2, width / 2}, InferenceEngine::Layout::NHWC);
    const size_t offset = width * height;

    std::vector<Blob::Ptr> blobs;
    for (auto& buf : data) {
        // --------------------------- Create a blob to hold the NV12 input data
        // -------------------------------
        auto ptr = &buf[0];

        // Create blob for Y plane from raw data
        Blob::Ptr y_blob = make_shared_blob<uint8_t>(y_plane_desc, ptr);
        // Create blob for UV plane from raw data
        Blob::Ptr uv_blob = make_shared_blob<uint8_t>(uv_plane_desc, ptr + offset);
        // Create NV12Blob from Y and UV blobs
        blobs.emplace_back(make_shared_blob<NV12Blob>(y_blob, uv_blob));
    }

    return blobs;
}

/**
 * @brief Check supported batched blob for device
 * @param IE core object
 * @param string device name
 * @return True(success) or False(fail)
 */
bool isBatchedBlobSupported(const Core& ie, const std::string& device_name) {
    const std::vector<std::string> supported_metrics = ie.GetMetric(device_name, METRIC_KEY(SUPPORTED_METRICS));

    if (std::find(supported_metrics.begin(), supported_metrics.end(), METRIC_KEY(OPTIMIZATION_CAPABILITIES)) == supported_metrics.end()) {
        return false;
    }

    const std::vector<std::string> optimization_caps = ie.GetMetric(device_name, METRIC_KEY(OPTIMIZATION_CAPABILITIES));

    return std::find(optimization_caps.begin(), optimization_caps.end(), METRIC_VALUE(BATCHED_BLOB)) != optimization_caps.end();
}

/**
 * @brief The entry point of the Inference Engine sample application
 */
int main(int argc, char* argv[]) {
    try {
        // ------------------------------ Parsing and validation input
        // arguments------------------------------
        if (argc != 5) {
            std::cout << "Usage : " << argv[0] << " <path_to_model> <path_to_image(s)> <image_size> <device_name>" << std::endl;
            return EXIT_FAILURE;
        }

        const std::string input_model {argv[1]};
        const std::string input_image_path {argv[2]};
        size_t input_width = 0, input_height = 0;
        std::tie(input_width, input_height) = parseImageSize(argv[3]);
        const std::string device_name {argv[4]};
        // -----------------------------------------------------------------------------------------------------

        // ------------------------------ Read image names
        // -----------------------------------------------------
        auto image_names = readInputFileNames(input_image_path);

        if (image_names.empty()) {
            throw std::invalid_argument("images not found");
        }
        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Step 1. Initialize inference engine core
        // ------------------------------------------------
        Core ie;
        // -----------------------------------------------------------------------------------------------------

        // Step 2. Read a model in OpenVINO Intermediate Representation (.xml and
        // .bin files) or ONNX (.onnx file) format
        CNNNetwork network = ie.ReadNetwork(input_model);
        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Reshape model
        // -------------------------------------------------
        size_t netInputSize = isBatchedBlobSupported(ie, device_name) ? image_names.size() : 1;
        ICNNNetwork::InputShapes inputShapes = network.getInputShapes();
        for (auto& shape : inputShapes) {
            auto& dims = shape.second;
            if (dims.empty()) {
                throw std::runtime_error("Network's input shapes have empty dimensions");
            }
            dims[0] = netInputSize;
        }
        network.reshape(inputShapes);
        size_t batchSize = network.getBatchSize();
        std::cout << "Batch size is " << batchSize << std::endl;
        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Step 3. Configure input and output
        // -------------------------------------------
        // --------------------------- Prepare input blobs
        // -----------------------------------------------------
        if (network.getInputsInfo().empty()) {
            std::cerr << "Network inputs info is empty" << std::endl;
            return EXIT_FAILURE;
        }
        InputInfo::Ptr input_info = network.getInputsInfo().begin()->second;
        std::string input_name = network.getInputsInfo().begin()->first;

        input_info->setLayout(Layout::NCHW);
        input_info->setPrecision(Precision::U8);
        // set input resize algorithm to enable input autoresize
        input_info->getPreProcess().setResizeAlgorithm(ResizeAlgorithm::RESIZE_BILINEAR);
        // set input color format to ColorFormat::NV12 to enable automatic input
        // color format pre-processing
        input_info->getPreProcess().setColorFormat(ColorFormat::NV12);

        // --------------------------- Prepare output blobs
        // ----------------------------------------------------
        if (network.getOutputsInfo().empty()) {
            std::cerr << "Network outputs info is empty" << std::endl;
            return EXIT_FAILURE;
        }
        DataPtr output_info = network.getOutputsInfo().begin()->second;
        std::string output_name = network.getOutputsInfo().begin()->first;

        output_info->setPrecision(Precision::FP32);
        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Step 4. Loading a model to the device
        // ----------------------------------------
        ExecutableNetwork executable_network = ie.LoadNetwork(network, device_name);
        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Step 5. Create an infer request
        // ----------------------------------------------
        InferRequest infer_request = executable_network.CreateInferRequest();
        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Step 6. Prepare input
        // --------------------------------------------------------
        auto image_bufs = readImagesDataFromFiles(image_names, input_width * (input_height * 3 / 2));

        auto inputs = readInputBlobs(image_bufs, input_width, input_height);

        // If batch_size > 1 => batched blob supported => replace all inputs by a
        // BatchedBlob
        if (netInputSize > 1) {
            assert(netInputSize == inputs.size());
            std::cout << "Infer using BatchedBlob of NV12 images." << std::endl;
            Blob::Ptr batched_input = make_shared_blob<BatchedBlob>(inputs);
            inputs = {batched_input};
        }

        /** Read labels from file (e.x. AlexNet.labels) **/
        std::string labelFileName = fileNameNoExt(input_model) + ".labels";
        std::vector<std::string> labels;

        std::ifstream inputFile;
        inputFile.open(labelFileName, std::ios::in);
        if (inputFile.is_open()) {
            std::string strLine;
            while (std::getline(inputFile, strLine)) {
                trim(strLine);
                labels.push_back(strLine);
            }
        }

        for (size_t i = 0; i < inputs.size(); i++) {
            const auto& input = inputs[i];
            // --------------------------- Set the input blob to the InferRequest
            // ------------------------------
            infer_request.SetBlob(input_name, input);
            // -------------------------------------------------------------------------------------------------

            // --------------------------- Step 7. Do inference
            // -----------------------------------------------------
            /* Running the request synchronously */
            infer_request.Infer();
            // -------------------------------------------------------------------------------------------------

            // --------------------------- Step 8. Process output
            // ---------------------------------------------------
            Blob::Ptr output = infer_request.GetBlob(output_name);

            // Print classification results
            const auto names_offset = image_names.begin() + netInputSize * i;
            std::vector<std::string> names(names_offset, names_offset + netInputSize);

            ClassificationResult classificationResult(output, names, netInputSize, 10, labels);
            classificationResult.print();
            // -------------------------------------------------------------------------------------------------
        }
    } catch (const std::exception& ex) {
        std::cerr << ex.what() << std::endl;
        return EXIT_FAILURE;
    }
    std::cout << "This sample is an API example, for any performance measurements "
                 "please use the dedicated benchmark_app tool"
              << std::endl;
    return EXIT_SUCCESS;
}

④ /opt/intel/openvino_2021.4.689/inference_engine/samples/cpp/hello_reshape_ssd

// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include <inference_engine.hpp>
#include <memory>
#include <ngraph/ngraph.hpp>
#include <samples/ocv_common.hpp>
#include <string>
#include <vector>

#include "reshape_ssd_extension.hpp"

using namespace InferenceEngine;

int main(int argc, char* argv[]) {
    try {
        // ------------------------------ Parsing and validation of input arguments
        // ---------------------------------
        if (argc != 5) {
            std::cout << "Usage : " << argv[0] << " <path_to_model> <path_to_image> <device> <batch>" << std::endl;
            return EXIT_FAILURE;
        }
        const std::string input_model {argv[1]};
        const std::string input_image_path {argv[2]};
        const std::string device_name {argv[3]};
        const size_t batch_size {std::stoul(argv[4])};
        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Step 1. Initialize inference engine core
        // -------------------------------------
        Core ie;

        IExtensionPtr inPlaceExtension;
        if (device_name.find("CPU") != std::string::npos) {
            inPlaceExtension = std::make_shared<InPlaceExtension>();
            // register sample's custom kernel (CustomReLU)
            ie.AddExtension(inPlaceExtension);
        }
        // -----------------------------------------------------------------------------------------------------

        // Step 2. Read a model in OpenVINO Intermediate Representation (.xml and
        // .bin files) or ONNX (.onnx file) format
        CNNNetwork network = ie.ReadNetwork(input_model);

        OutputsDataMap outputs_info(network.getOutputsInfo());
        InputsDataMap inputs_info(network.getInputsInfo());
        if (inputs_info.size() != 1 || outputs_info.size() != 1)
            throw std::logic_error("Sample supports clean SSD network with one input and one output");

        // --------------------------- Resize network to match image sizes and given
        // batch----------------------
        auto input_shapes = network.getInputShapes();
        std::string input_name;
        SizeVector input_shape;
        std::tie(input_name, input_shape) = *input_shapes.begin();
        cv::Mat image = cv::imread(input_image_path);
        input_shape[0] = batch_size;
        input_shape[2] = static_cast<size_t>(image.rows);
        input_shape[3] = static_cast<size_t>(image.cols);
        input_shapes[input_name] = input_shape;
        std::cout << "Resizing network to the image size = [" << image.rows << "x" << image.cols << "] "
                  << "with batch = " << batch_size << std::endl;
        network.reshape(input_shapes);
        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Step 3. Configure input & output
        // ---------------------------------------------
        // --------------------------- Prepare input blobs
        // -----------------------------------------------------
        InputInfo::Ptr input_info;
        std::tie(input_name, input_info) = *inputs_info.begin();
        // Set input layout and precision
        input_info->setLayout(Layout::NCHW);
        input_info->setPrecision(Precision::U8);
        // --------------------------- Prepare output blobs
        // ----------------------------------------------------
        DataPtr output_info;
        std::string output_name;
        std::tie(output_name, output_info) = *outputs_info.begin();
        // SSD has an additional post-processing DetectionOutput layer
        // that simplifies output filtering, try to find it.
        if (auto ngraphFunction = network.getFunction()) {
            for (const auto& op : ngraphFunction->get_ops()) {
                if (op->get_type_info() == ngraph::op::DetectionOutput::type_info) {
                    if (output_info->getName() != op->get_friendly_name()) {
                        throw std::logic_error("Detection output op does not produce a network output");
                    }
                    break;
                }
            }
        }

        const SizeVector output_shape = output_info->getTensorDesc().getDims();
        const size_t max_proposal_count = output_shape[2];
        const size_t object_size = output_shape[3];
        if (object_size != 7) {
            throw std::logic_error("Output item should have 7 as a last dimension");
        }
        if (output_shape.size() != 4) {
            throw std::logic_error("Incorrect output dimensions for SSD model");
        }
        if (output_info == nullptr) {
            IE_THROW() << "[SAMPLES] internal error - output information is empty";
        }

        output_info->setPrecision(Precision::FP32);

        auto dumpVec = [](const SizeVector& vec) -> std::string {
            if (vec.empty())
                return "[]";
            std::stringstream oss;
            oss << "[" << vec[0];
            for (size_t i = 1; i < vec.size(); i++)
                oss << "," << vec[i];
            oss << "]";
            return oss.str();
        };
        std::cout << "Resulting input shape = " << dumpVec(input_shape) << std::endl;
        std::cout << "Resulting output shape = " << dumpVec(output_shape) << std::endl;
        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Step 4. Loading a model to the device
        // ------------------------------------------
        ExecutableNetwork executable_network = ie.LoadNetwork(network, device_name);
        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Step 5. Create an infer request
        // -------------------------------------------------
        InferRequest infer_request = executable_network.CreateInferRequest();
        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Step 6. Prepare input
        // --------------------------------------------------------
        Blob::Ptr input = infer_request.GetBlob(input_name);
        for (size_t b = 0; b < batch_size; b++) {
            matU8ToBlob<uint8_t>(image, input, b);
        }
        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Step 7. Do inference
        // --------------------------------------------------------
        infer_request.Infer();
        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Step 8. Process output
        // ------------------------------------------------------
        Blob::Ptr output = infer_request.GetBlob(output_name);
        MemoryBlob::CPtr moutput = as<MemoryBlob>(output);
        if (!moutput) {
            throw std::logic_error("We expect output to be inherited from MemoryBlob, "
                                   "but by fact we were not able to cast output to MemoryBlob");
        }
        // locked memory holder should be alive all time while access to its buffer
        // happens
        auto moutputHolder = moutput->rmap();
        const float* detection = moutputHolder.as<const float*>();

        /* Each detection has image_id that denotes processed image */
        for (size_t cur_proposal = 0; cur_proposal < max_proposal_count; cur_proposal++) {
            float image_id = detection[cur_proposal * object_size + 0];
            float label = detection[cur_proposal * object_size + 1];
            float confidence = detection[cur_proposal * object_size + 2];
            /* CPU and GPU devices have difference in DetectionOutput layer, so we
             * need both checks */
            if (image_id < 0 || confidence == 0.0f) {
                continue;
            }

            float xmin = detection[cur_proposal * object_size + 3] * image.cols;
            float ymin = detection[cur_proposal * object_size + 4] * image.rows;
            float xmax = detection[cur_proposal * object_size + 5] * image.cols;
            float ymax = detection[cur_proposal * object_size + 6] * image.rows;

            if (confidence > 0.5f) {
                /** Drawing only objects with >50% probability **/
                std::ostringstream conf;
                conf << ":" << std::fixed << std::setprecision(3) << confidence;
                cv::rectangle(image, cv::Point2f(xmin, ymin), cv::Point2f(xmax, ymax), cv::Scalar(0, 0, 255));
                std::cout << "[" << cur_proposal << "," << label << "] element, prob = " << confidence << ", bbox = (" << xmin << "," << ymin << ")-(" << xmax
                          << "," << ymax << ")"
                          << ", batch id = " << image_id << std::endl;
            }
        }

        cv::imwrite("hello_reshape_ssd_output.jpg", image);
        std::cout << "The resulting image was saved in the file: "
                     "hello_reshape_ssd_output.jpg"
                  << std::endl;
        // -----------------------------------------------------------------------------------------------------
    } catch (const std::exception& ex) {
        std::cerr << ex.what() << std::endl;
        return EXIT_FAILURE;
    }
    std::cout << std::endl
              << "This sample is an API example, for any performance measurements "
                 "please use the dedicated benchmark_app tool"
              << std::endl;
    return EXIT_SUCCESS;
}

⑤ /opt/intel/openvino_2021.4.689/inference_engine/samples/cpp/object_detection_sample_ssd

// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include <format_reader_ptr.h>
#include <gflags/gflags.h>

#include <algorithm>
#include <inference_engine.hpp>
#include <iostream>
#include <map>
#include <memory>
#include <ngraph/ngraph.hpp>
#include <samples/args_helper.hpp>
#include <samples/common.hpp>
#include <samples/slog.hpp>
#include <string>
#include <vector>

#include "object_detection_sample_ssd.h"

using namespace InferenceEngine;

/**
 * @brief Checks input args
 * @param argc number of args
 * @param argv list of input arguments
 * @return bool status true(Success) or false(Fail)
 */
bool ParseAndCheckCommandLine(int argc, char* argv[]) {
    gflags::ParseCommandLineNonHelpFlags(&argc, &argv, true);
    if (FLAGS_h) {
        showUsage();
        showAvailableDevices();
        return false;
    }

    slog::info << "Parsing input parameters" << slog::endl;

    if (FLAGS_m.empty()) {
        showUsage();
        throw std::logic_error("Model is required but not set. Please set -m option.");
    }

    if (FLAGS_i.empty()) {
        showUsage();
        throw std::logic_error("Input is required but not set. Please set -i option.");
    }

    return true;
}

/**
 * \brief The entry point for the Inference Engine object_detection sample
 * application \file object_detection_sample_ssd/main.cpp \example
 * object_detection_sample_ssd/main.cpp
 */
int main(int argc, char* argv[]) {
    try {
        /** This sample covers certain topology and cannot be generalized for any
         * object detection one **/
        // ------------------------------ Get Inference Engine version
        // ------------------------------------------------------
        slog::info << "InferenceEngine: " << GetInferenceEngineVersion() << "\n";

        // --------------------------- Parsing and validation of input arguments
        // ---------------------------------
        if (!ParseAndCheckCommandLine(argc, argv)) {
            return 0;
        }
        // -----------------------------------------------------------------------------------------------------

        // ------------------------------ Read input
        // -----------------------------------------------------------
        /** This vector stores paths to the processed images **/
        std::vector<std::string> images;
        parseInputFilesArguments(images);
        if (images.empty())
            throw std::logic_error("No suitable images were found");
        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Step 1. Initialize inference engine core
        // -------------------------------------
        slog::info << "Loading Inference Engine" << slog::endl;
        Core ie;
        // ------------------------------ Get Available Devices
        // ------------------------------------------------------
        slog::info << "Device info: " << slog::endl;
        std::cout << ie.GetVersions(FLAGS_d) << std::endl;

        if (!FLAGS_l.empty()) {
            // Custom CPU extension is loaded as a shared library and passed as a
            // pointer to base extension
            IExtensionPtr extension_ptr = std::make_shared<Extension>(FLAGS_l);
            ie.AddExtension(extension_ptr);
            slog::info << "Custom extension loaded: " << FLAGS_l << slog::endl;
        }

        if (!FLAGS_c.empty() && (FLAGS_d == "GPU" || FLAGS_d == "MYRIAD" || FLAGS_d == "HDDL")) {
            // Config for device plugin custom extension is loaded from an .xml
            // description
            ie.SetConfig({{PluginConfigParams::KEY_CONFIG_FILE, FLAGS_c}}, FLAGS_d);
            slog::info << "Config for " << FLAGS_d << " device plugin custom extension loaded: " << FLAGS_c << slog::endl;
        }
        // -----------------------------------------------------------------------------------------------------

        // Step 2. Read a model in OpenVINO Intermediate Representation (.xml and
        // .bin files) or ONNX (.onnx file) format
        slog::info << "Loading network files:" << slog::endl << FLAGS_m << slog::endl;

        /** Read network model **/
        CNNNetwork network = ie.ReadNetwork(FLAGS_m);
        // -----------------------------------------------------------------------------------------------------
        // --------------------------- Step 3. Configure input & output
        // ---------------------------------------------
        // -------------------------------- Prepare input blobs
        // --------------------------------------------------
        slog::info << "Preparing input blobs" << slog::endl;

        /** Taking information about all topology inputs **/
        InputsDataMap inputsInfo(network.getInputsInfo());

        /**
         * Some networks have SSD-like output format (ending with DetectionOutput
         * layer), but having 2 inputs as Faster-RCNN: one for image and one for
         * "image info".
         *
         * Although object_datection_sample_ssd's main task is to support clean SSD,
         * it could score the networks with two inputs as well. For such networks
         * imInfoInputName will contain the "second" input name.
         */
        if (inputsInfo.size() != 1 && inputsInfo.size() != 2)
            throw std::logic_error("Sample supports topologies only with 1 or 2 inputs");

        std::string imageInputName, imInfoInputName;

        InputInfo::Ptr inputInfo = nullptr;

        SizeVector inputImageDims;
        /** Stores input image **/

        /** Iterating over all input blobs **/
        for (auto& item : inputsInfo) {
            /** Working with first input tensor that stores image **/
            if (item.second->getInputData()->getTensorDesc().getDims().size() == 4) {
                imageInputName = item.first;

                inputInfo = item.second;

                slog::info << "Batch size is " << std::to_string(network.getBatchSize()) << slog::endl;

                /** Creating first input blob **/
                Precision inputPrecision = Precision::U8;
                item.second->setPrecision(inputPrecision);
            } else if (item.second->getInputData()->getTensorDesc().getDims().size() == 2) {
                imInfoInputName = item.first;

                Precision inputPrecision = Precision::FP32;
                item.second->setPrecision(inputPrecision);
                if ((item.second->getTensorDesc().getDims()[1] != 3 && item.second->getTensorDesc().getDims()[1] != 6)) {
                    throw std::logic_error("Invalid input info. Should be 3 or 6 values length");
                }
            }
        }

        if (inputInfo == nullptr) {
            inputInfo = inputsInfo.begin()->second;
        }
        // --------------------------- Prepare output blobs
        // -------------------------------------------------
        slog::info << "Preparing output blobs" << slog::endl;

        OutputsDataMap outputsInfo(network.getOutputsInfo());

        std::string outputName;
        DataPtr outputInfo;

        outputInfo = outputsInfo.begin()->second;
        outputName = outputInfo->getName();
        // SSD has an additional post-processing DetectionOutput layer
        // that simplifies output filtering, try to find it.
        if (auto ngraphFunction = network.getFunction()) {
            for (const auto& out : outputsInfo) {
                for (const auto& op : ngraphFunction->get_ops()) {
                    if (op->get_type_info() == ngraph::op::DetectionOutput::type_info && op->get_friendly_name() == out.second->getName()) {
                        outputName = out.first;
                        outputInfo = out.second;
                        break;
                    }
                }
            }
        }

        if (outputInfo == nullptr) {
            throw std::logic_error("Can't find a DetectionOutput layer in the topology");
        }

        const SizeVector outputDims = outputInfo->getTensorDesc().getDims();

        const int maxProposalCount = outputDims[2];
        const int objectSize = outputDims[3];

        if (objectSize != 7) {
            throw std::logic_error("Output item should have 7 as a last dimension");
        }

        if (outputDims.size() != 4) {
            throw std::logic_error("Incorrect output dimensions for SSD model");
        }

        /** Set the precision of output data provided by the user, should be called
         * before load of the network to the device **/
        outputInfo->setPrecision(Precision::FP32);
        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Step 4. Loading model to the device
        // ------------------------------------------
        slog::info << "Loading model to the device" << slog::endl;

        ExecutableNetwork executable_network = ie.LoadNetwork(network, FLAGS_d, parseConfig(FLAGS_config));
        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Step 5. Create infer request
        // -------------------------------------------------
        slog::info << "Create infer request" << slog::endl;
        InferRequest infer_request = executable_network.CreateInferRequest();
        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Step 6. Prepare input
        // --------------------------------------------------------
        /** Collect images data ptrs **/
        std::vector<std::shared_ptr<unsigned char>> imagesData, originalImagesData;
        std::vector<size_t> imageWidths, imageHeights;
        for (auto& i : images) {
            FormatReader::ReaderPtr reader(i.c_str());
            if (reader.get() == nullptr) {
                slog::warn << "Image " + i + " cannot be read!" << slog::endl;
                continue;
            }
            /** Store image data **/
            std::shared_ptr<unsigned char> originalData(reader->getData());
            std::shared_ptr<unsigned char> data(reader->getData(inputInfo->getTensorDesc().getDims()[3], inputInfo->getTensorDesc().getDims()[2]));
            if (data.get() != nullptr) {
                originalImagesData.push_back(originalData);
                imagesData.push_back(data);
                imageWidths.push_back(reader->width());
                imageHeights.push_back(reader->height());
            }
        }
        if (imagesData.empty())
            throw std::logic_error("Valid input images were not found!");

        size_t batchSize = network.getBatchSize();
        slog::info << "Batch size is " << std::to_string(batchSize) << slog::endl;
        if (batchSize != imagesData.size()) {
            slog::warn << "Number of images " + std::to_string(imagesData.size()) + " doesn't match batch size " + std::to_string(batchSize) << slog::endl;
            batchSize = std::min(batchSize, imagesData.size());
            slog::warn << "Number of images to be processed is " << std::to_string(batchSize) << slog::endl;
        }

        /** Creating input blob **/
        Blob::Ptr imageInput = infer_request.GetBlob(imageInputName);

        /** Filling input tensor with images. First b channel, then g and r channels
         * **/
        MemoryBlob::Ptr mimage = as<MemoryBlob>(imageInput);
        if (!mimage) {
            slog::err << "We expect image blob to be inherited from MemoryBlob, but "
                         "by fact we were not able "
                         "to cast imageInput to MemoryBlob"
                      << slog::endl;
            return 1;
        }
        // locked memory holder should be alive all time while access to its buffer
        // happens
        auto minputHolder = mimage->wmap();

        size_t num_channels = mimage->getTensorDesc().getDims()[1];
        size_t image_size = mimage->getTensorDesc().getDims()[3] * mimage->getTensorDesc().getDims()[2];

        unsigned char* data = minputHolder.as<unsigned char*>();

        /** Iterate over all input images limited by batch size  **/
        for (size_t image_id = 0; image_id < std::min(imagesData.size(), batchSize); ++image_id) {
            /** Iterate over all pixel in image (b,g,r) **/
            for (size_t pid = 0; pid < image_size; pid++) {
                /** Iterate over all channels **/
                for (size_t ch = 0; ch < num_channels; ++ch) {
                    /**          [images stride + channels stride + pixel id ] all in
                     * bytes            **/
                    data[image_id * image_size * num_channels + ch * image_size + pid] = imagesData.at(image_id).get()[pid * num_channels + ch];
                }
            }
        }

        if (imInfoInputName != "") {
            Blob::Ptr input2 = infer_request.GetBlob(imInfoInputName);
            auto imInfoDim = inputsInfo.find(imInfoInputName)->second->getTensorDesc().getDims()[1];

            /** Fill input tensor with values **/
            MemoryBlob::Ptr minput2 = as<MemoryBlob>(input2);
            if (!minput2) {
                slog::err << "We expect input2 blob to be inherited from MemoryBlob, "
                             "but by fact we were not able "
                             "to cast input2 to MemoryBlob"
                          << slog::endl;
                return 1;
            }
            // locked memory holder should be alive all time while access to its
            // buffer happens
            auto minput2Holder = minput2->wmap();
            float* p = minput2Holder.as<PrecisionTrait<Precision::FP32>::value_type*>();

            for (size_t image_id = 0; image_id < std::min(imagesData.size(), batchSize); ++image_id) {
                p[image_id * imInfoDim + 0] = static_cast<float>(inputsInfo[imageInputName]->getTensorDesc().getDims()[2]);
                p[image_id * imInfoDim + 1] = static_cast<float>(inputsInfo[imageInputName]->getTensorDesc().getDims()[3]);
                for (size_t k = 2; k < imInfoDim; k++) {
                    p[image_id * imInfoDim + k] = 1.0f;  // all scale factors are set to 1.0
                }
            }
        }
        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Step 7. Do inference
        // ---------------------------------------------------------
        slog::info << "Start inference" << slog::endl;
        infer_request.Infer();
        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Step 8. Process output
        // -------------------------------------------------------
        slog::info << "Processing output blobs" << slog::endl;

        const Blob::Ptr output_blob = infer_request.GetBlob(outputName);
        MemoryBlob::CPtr moutput = as<MemoryBlob>(output_blob);
        if (!moutput) {
            throw std::logic_error("We expect output to be inherited from MemoryBlob, "
                                   "but by fact we were not able to cast output to MemoryBlob");
        }
        // locked memory holder should be alive all time while access to its buffer
        // happens
        auto moutputHolder = moutput->rmap();
        const float* detection = moutputHolder.as<const PrecisionTrait<Precision::FP32>::value_type*>();

        std::vector<std::vector<int>> boxes(batchSize);
        std::vector<std::vector<int>> classes(batchSize);

        /* Each detection has image_id that denotes processed image */
        for (int curProposal = 0; curProposal < maxProposalCount; curProposal++) {
            auto image_id = static_cast<int>(detection[curProposal * objectSize + 0]);
            if (image_id < 0) {
                break;
            }

            float confidence = detection[curProposal * objectSize + 2];
            auto label = static_cast<int>(detection[curProposal * objectSize + 1]);
            auto xmin = static_cast<int>(detection[curProposal * objectSize + 3] * imageWidths[image_id]);
            auto ymin = static_cast<int>(detection[curProposal * objectSize + 4] * imageHeights[image_id]);
            auto xmax = static_cast<int>(detection[curProposal * objectSize + 5] * imageWidths[image_id]);
            auto ymax = static_cast<int>(detection[curProposal * objectSize + 6] * imageHeights[image_id]);

            std::cout << "[" << curProposal << "," << label << "] element, prob = " << confidence << "    (" << xmin << "," << ymin << ")-(" << xmax << ","
                      << ymax << ")"
                      << " batch id : " << image_id;

            if (confidence > 0.5) {
                /** Drawing only objects with >50% probability **/
                classes[image_id].push_back(label);
                boxes[image_id].push_back(xmin);
                boxes[image_id].push_back(ymin);
                boxes[image_id].push_back(xmax - xmin);
                boxes[image_id].push_back(ymax - ymin);
                std::cout << " WILL BE PRINTED!";
            }
            std::cout << std::endl;
        }

        for (size_t batch_id = 0; batch_id < batchSize; ++batch_id) {
            addRectangles(originalImagesData[batch_id].get(), imageHeights[batch_id], imageWidths[batch_id], boxes[batch_id], classes[batch_id],
                          BBOX_THICKNESS);
            const std::string image_path = "out_" + std::to_string(batch_id) + ".bmp";
            if (writeOutputBmp(image_path, originalImagesData[batch_id].get(), imageHeights[batch_id], imageWidths[batch_id])) {
                slog::info << "Image " + image_path + " created!" << slog::endl;
            } else {
                throw std::logic_error(std::string("Can't create a file: ") + image_path);
            }
        }
        // -----------------------------------------------------------------------------------------------------
    } catch (const std::exception& error) {
        slog::err << error.what() << slog::endl;
        return 1;
    } catch (...) {
        slog::err << "Unknown/internal exception happened." << slog::endl;
        return 1;
    }

    slog::info << "Execution successful" << slog::endl;
    slog::info << slog::endl
               << "This sample is an API example, for any performance measurements "
                  "please use the dedicated benchmark_app tool"
               << slog::endl;
    return 0;
}

⑥ /opt/intel/openvino_2021.4.689/inference_engine/samples/cpp/style_transfer_sample

// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include <format_reader_ptr.h>

#include <inference_engine.hpp>
#include <memory>
#include <samples/args_helper.hpp>
#include <samples/common.hpp>
#include <samples/slog.hpp>
#include <string>
#include <vector>

#include "style_transfer_sample.h"

using namespace InferenceEngine;

/**
 * @brief Checks input args
 * @param argc number of args
 * @param argv list of input arguments
 * @return bool status true(Success) or false(Fail)
 */
bool ParseAndCheckCommandLine(int argc, char* argv[]) {
    slog::info << "Parsing input parameters" << slog::endl;

    gflags::ParseCommandLineNonHelpFlags(&argc, &argv, true);
    if (FLAGS_h) {
        showUsage();
        showAvailableDevices();
        return false;
    }

    if (FLAGS_m.empty()) {
        showUsage();
        throw std::logic_error("Model is required but not set. Please set -m option.");
    }

    if (FLAGS_i.empty()) {
        showUsage();
        throw std::logic_error("Input is required but not set. Please set -i option.");
    }

    return true;
}

/**
 * @brief The entry point for inference engine deconvolution sample application
 * @file style_transfer_sample/main.cpp
 * @example style_transfer_sample/main.cpp
 */
int main(int argc, char* argv[]) {
    try {
        // ------------------------------ Get Inference Engine version
        // ------------------------------------------------------
        slog::info << "InferenceEngine: " << GetInferenceEngineVersion() << slog::endl;
        // ------------------------------ Parsing and validation of input arguments
        // ---------------------------------
        if (!ParseAndCheckCommandLine(argc, argv)) {
            return 0;
        }

        /** This vector stores paths to the processed images **/
        std::vector<std::string> imageNames;
        parseInputFilesArguments(imageNames);
        if (imageNames.empty())
            throw std::logic_error("No suitable images were found");
        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Step 1. Initialize inference engine core
        // -------------------------------------
        slog::info << "Loading Inference Engine" << slog::endl;
        Core ie;

        // ------------------------------ Get Available Devices
        // ------------------------------------------------------
        slog::info << "Device info: " << slog::endl;
        std::cout << ie.GetVersions(FLAGS_d) << std::endl;

        if (!FLAGS_l.empty()) {
            // Custom CPU extension is loaded as a shared library and passed as a
            // pointer to base extension
            IExtensionPtr extension_ptr = std::make_shared<Extension>(FLAGS_l);
            ie.AddExtension(extension_ptr);
            slog::info << "Custom Extension loaded: " << FLAGS_l << slog::endl;
        }
        if (!FLAGS_c.empty() && (FLAGS_d == "GPU" || FLAGS_d == "MYRIAD" || FLAGS_d == "HDDL")) {
            // Config for device plugin custom extension is loaded from an .xml
            // description
            ie.SetConfig({{PluginConfigParams::KEY_CONFIG_FILE, FLAGS_c}}, "GPU");
            slog::info << "Config for " << FLAGS_d << " device plugin custom extension loaded: " << FLAGS_c << slog::endl;
        }
        // -----------------------------------------------------------------------------------------------------

        // Step 2. Read a model in OpenVINO Intermediate Representation (.xml and
        // .bin files) or ONNX (.onnx file) format
        slog::info << "Loading network files:" << slog::endl << FLAGS_m << slog::endl;

        /** Read network model **/
        CNNNetwork network = ie.ReadNetwork(FLAGS_m);
        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Step 3. Configure input & output
        // ---------------------------------------------

        // --------------------------- Prepare input blobs
        // -----------------------------------------------------
        slog::info << "Preparing input blobs" << slog::endl;

        /** Taking information about all topology inputs **/
        InputsDataMap inputInfo(network.getInputsInfo());

        if (inputInfo.size() != 1)
            throw std::logic_error("Sample supports topologies only with 1 input");
        auto inputInfoItem = *inputInfo.begin();

        /** Iterate over all the input blobs **/
        std::vector<std::shared_ptr<uint8_t>> imagesData;

        /** Specifying the precision of input data.
         * This should be called before load of the network to the device **/
        inputInfoItem.second->setPrecision(Precision::FP32);

        /** Collect images data ptrs **/
        for (auto& i : imageNames) {
            FormatReader::ReaderPtr reader(i.c_str());
            if (reader.get() == nullptr) {
                slog::warn << "Image " + i + " cannot be read!" << slog::endl;
                continue;
            }
            /** Store image data **/
            std::shared_ptr<unsigned char> data(
                reader->getData(inputInfoItem.second->getTensorDesc().getDims()[3], inputInfoItem.second->getTensorDesc().getDims()[2]));
            if (data.get() != nullptr) {
                imagesData.push_back(data);
            }
        }
        if (imagesData.empty())
            throw std::logic_error("Valid input images were not found!");

        /** Setting batch size using image count **/
        network.setBatchSize(imagesData.size());
        slog::info << "Batch size is " << std::to_string(network.getBatchSize()) << slog::endl;

        // ------------------------------ Prepare output blobs
        // -------------------------------------------------
        slog::info << "Preparing output blobs" << slog::endl;

        OutputsDataMap outputInfo(network.getOutputsInfo());
        // BlobMap outputBlobs;
        std::string firstOutputName;

        const float meanValues[] = {static_cast<const float>(FLAGS_mean_val_r), static_cast<const float>(FLAGS_mean_val_g),
                                    static_cast<const float>(FLAGS_mean_val_b)};

        for (auto& item : outputInfo) {
            if (firstOutputName.empty()) {
                firstOutputName = item.first;
            }
            DataPtr outputData = item.second;
            if (!outputData) {
                throw std::logic_error("output data pointer is not valid");
            }

            item.second->setPrecision(Precision::FP32);
        }
        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Step 4. Loading model to the device
        // ------------------------------------------
        slog::info << "Loading model to the device" << slog::endl;
        ExecutableNetwork executable_network = ie.LoadNetwork(network, FLAGS_d);
        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Step 5. Create infer request
        // -------------------------------------------------
        slog::info << "Create infer request" << slog::endl;
        InferRequest infer_request = executable_network.CreateInferRequest();
        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Step 6. Prepare input
        // --------------------------------------------------------
        /** Iterate over all the input blobs **/
        for (const auto& item : inputInfo) {
            MemoryBlob::Ptr minput = as<MemoryBlob>(infer_request.GetBlob(item.first));
            if (!minput) {
                slog::err << "We expect input blob to be inherited from MemoryBlob, "
                          << "but by fact we were not able to cast it to MemoryBlob" << slog::endl;
                return 1;
            }
            // locked memory holder should be alive all time while access to its
            // buffer happens
            auto ilmHolder = minput->wmap();

            /** Filling input tensor with images. First b channel, then g and r
             * channels **/
            size_t num_channels = minput->getTensorDesc().getDims()[1];
            size_t image_size = minput->getTensorDesc().getDims()[3] * minput->getTensorDesc().getDims()[2];

            auto data = ilmHolder.as<PrecisionTrait<Precision::FP32>::value_type*>();
            if (data == nullptr)
                throw std::runtime_error("Input blob has not allocated buffer");
            /** Iterate over all input images **/
            for (size_t image_id = 0; image_id < imagesData.size(); ++image_id) {
                /** Iterate over all pixel in image (b,g,r) **/
                for (size_t pid = 0; pid < image_size; pid++) {
                    /** Iterate over all channels **/
                    for (size_t ch = 0; ch < num_channels; ++ch) {
                        /**          [images stride + channels stride + pixel id ] all in
                         * bytes            **/
                        data[image_id * image_size * num_channels + ch * image_size + pid] =
                            imagesData.at(image_id).get()[pid * num_channels + ch] - meanValues[ch];
                    }
                }
            }
        }
        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Step 7. Do inference
        // ---------------------------------------------------------
        slog::info << "Start inference" << slog::endl;
        infer_request.Infer();
        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Step 8. Process output
        // -------------------------------------------------------
        MemoryBlob::CPtr moutput = as<MemoryBlob>(infer_request.GetBlob(firstOutputName));
        if (!moutput) {
            throw std::logic_error("We expect output to be inherited from MemoryBlob, "
                                   "but by fact we were not able to cast it to MemoryBlob");
        }
        // locked memory holder should be alive all time while access to its buffer
        // happens
        auto lmoHolder = moutput->rmap();
        const auto output_data = lmoHolder.as<const PrecisionTrait<Precision::FP32>::value_type*>();

        size_t num_images = moutput->getTensorDesc().getDims()[0];
        size_t num_channels = moutput->getTensorDesc().getDims()[1];
        size_t H = moutput->getTensorDesc().getDims()[2];
        size_t W = moutput->getTensorDesc().getDims()[3];
        size_t nPixels = W * H;

        slog::info << "Output size [N,C,H,W]: " << num_images << ", " << num_channels << ", " << H << ", " << W << slog::endl;

        {
            std::vector<float> data_img(nPixels * num_channels);

            for (size_t n = 0; n < num_images; n++) {
                for (size_t i = 0; i < nPixels; i++) {
                    data_img[i * num_channels] = static_cast<float>(output_data[i + n * nPixels * num_channels] + meanValues[0]);
                    data_img[i * num_channels + 1] = static_cast<float>(output_data[(i + nPixels) + n * nPixels * num_channels] + meanValues[1]);
                    data_img[i * num_channels + 2] = static_cast<float>(output_data[(i + 2 * nPixels) + n * nPixels * num_channels] + meanValues[2]);

                    float temp = data_img[i * num_channels];
                    data_img[i * num_channels] = data_img[i * num_channels + 2];
                    data_img[i * num_channels + 2] = temp;

                    if (data_img[i * num_channels] < 0)
                        data_img[i * num_channels] = 0;
                    if (data_img[i * num_channels] > 255)
                        data_img[i * num_channels] = 255;

                    if (data_img[i * num_channels + 1] < 0)
                        data_img[i * num_channels + 1] = 0;
                    if (data_img[i * num_channels + 1] > 255)
                        data_img[i * num_channels + 1] = 255;

                    if (data_img[i * num_channels + 2] < 0)
                        data_img[i * num_channels + 2] = 0;
                    if (data_img[i * num_channels + 2] > 255)
                        data_img[i * num_channels + 2] = 255;
                }
                std::string out_img_name = std::string("out" + std::to_string(n + 1) + ".bmp");
                std::ofstream outFile;
                outFile.open(out_img_name.c_str(), std::ios_base::binary);
                if (!outFile.is_open()) {
                    throw new std::runtime_error("Cannot create " + out_img_name);
                }
                std::vector<unsigned char> data_img2;
                for (float i : data_img) {
                    data_img2.push_back(static_cast<unsigned char>(i));
                }
                writeOutputBmp(data_img2.data(), H, W, outFile);
                outFile.close();
                slog::info << "Image " << out_img_name << " created!" << slog::endl;
            }
        }
        // -----------------------------------------------------------------------------------------------------
    } catch (const std::exception& error) {
        slog::err << error.what() << slog::endl;
        return 1;
    } catch (...) {
        slog::err << "Unknown/internal exception happened" << slog::endl;
        return 1;
    }

    slog::info << "Execution successful" << slog::endl;
    slog::info << slog::endl
               << "This sample is an API example, for any performance measurements "
                  "please use the dedicated benchmark_app tool"
               << slog::endl;
    return 0;
}

⑦ /opt/intel/openvino_2021.4.689/inference_engine/samples/cpp/classification_sample_async

// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

/**
 * @brief The entry point the Inference Engine sample application
 * @file classification_sample_async/main.cpp
 * @example classification_sample_async/main.cpp
 */

#include <format_reader_ptr.h>
#include <samples/classification_results.h>
#include <sys/stat.h>

#include <condition_variable>
#include <fstream>
#include <inference_engine.hpp>
#include <map>
#include <memory>
#include <mutex>
#include <samples/args_helper.hpp>
#include <samples/common.hpp>
#include <samples/slog.hpp>
#include <string>
#include <vector>

#include "classification_sample_async.h"

using namespace InferenceEngine;

/**
 * @brief Checks input args
 * @param argc number of args
 * @param argv list of input arguments
 * @return bool status true(Success) or false(Fail)
 */
bool ParseAndCheckCommandLine(int argc, char* argv[]) {
    gflags::ParseCommandLineNonHelpFlags(&argc, &argv, true);
    if (FLAGS_h) {
        showUsage();
        showAvailableDevices();
        return false;
    }
    slog::info << "Parsing input parameters" << slog::endl;

    if (FLAGS_nt <= 0) {
        throw std::logic_error("Incorrect value for nt argument. It should be greater than 0.");
    }

    if (FLAGS_m.empty()) {
        showUsage();
        throw std::logic_error("Model is required but not set. Please set -m option.");
    }

    if (FLAGS_i.empty()) {
        showUsage();
        throw std::logic_error("Input is required but not set. Please set -i option.");
    }

    return true;
}

int main(int argc, char* argv[]) {
    try {
        // ------------------------------ Get Inference Engine version
        // ------------------------------------------------------
        slog::info << "InferenceEngine: " << GetInferenceEngineVersion() << slog::endl;

        // ------------------------------ Parsing and validation of input arguments
        // ---------------------------------
        if (!ParseAndCheckCommandLine(argc, argv)) {
            return 0;
        }
        // ------------------------------ Read input
        // -----------------------------------------------------------
        /** This vector stores paths to the processed images **/
        std::vector<std::string> imageNames;
        parseInputFilesArguments(imageNames);
        if (imageNames.empty())
            throw std::logic_error("No suitable images were found");
        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Step 1. Initialize inference engine core
        // -------------------------------------
        slog::info << "Loading Inference Engine" << slog::endl;
        Core ie;
        // ------------------------------ Get Available Devices
        // ------------------------------------------------------
        slog::info << "Device info: " << slog::endl;
        std::cout << ie.GetVersions(FLAGS_d) << std::endl;

        if (!FLAGS_l.empty()) {
            // Custom CPU extension is loaded as a shared library and passed as a
            // pointer to base extension
            IExtensionPtr extension_ptr = std::make_shared<Extension>(FLAGS_l);
            ie.AddExtension(extension_ptr);
            slog::info << "CPU Extension loaded: " << FLAGS_l << slog::endl;
        }
        if (!FLAGS_c.empty() && (FLAGS_d == "GPU" || FLAGS_d == "MYRIAD" || FLAGS_d == "HDDL")) {
            // Config for device plugin custom extension is loaded from an .xml
            // description
            ie.SetConfig({{PluginConfigParams::KEY_CONFIG_FILE, FLAGS_c}}, FLAGS_d);
            slog::info << "Config for " << FLAGS_d << " device plugin custom extension loaded: " << FLAGS_c << slog::endl;
        }
        // -----------------------------------------------------------------------------------------------------

        // Step 2. Read a model in OpenVINO Intermediate Representation (.xml and
        // .bin files) or ONNX (.onnx file) format
        slog::info << "Loading network files:" << slog::endl << FLAGS_m << slog::endl;

        /** Read network model **/
        CNNNetwork network = ie.ReadNetwork(FLAGS_m);
        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Step 3. Configure input & output
        // ---------------------------------------------
        if (network.getOutputsInfo().size() != 1)
            throw std::logic_error("Sample supports topologies with 1 output only");

        // --------------------------- Prepare input blobs
        // -----------------------------------------------------
        slog::info << "Preparing input blobs" << slog::endl;

        /** Taking information about all topology inputs **/
        InputsDataMap inputInfo(network.getInputsInfo());
        if (inputInfo.size() != 1)
            throw std::logic_error("Sample supports topologies with 1 input only");

        auto inputInfoItem = *inputInfo.begin();

        /** Specifying the precision and layout of input data provided by the user.
         * This should be called before load of the network to the device **/
        inputInfoItem.second->setPrecision(Precision::U8);
        inputInfoItem.second->setLayout(Layout::NCHW);

        std::vector<std::shared_ptr<unsigned char>> imagesData = {};
        std::vector<std::string> validImageNames = {};
        for (const auto& i : imageNames) {
            FormatReader::ReaderPtr reader(i.c_str());
            if (reader.get() == nullptr) {
                slog::warn << "Image " + i + " cannot be read!" << slog::endl;
                continue;
            }
            /** Store image data **/
            std::shared_ptr<unsigned char> data(
                reader->getData(inputInfoItem.second->getTensorDesc().getDims()[3], inputInfoItem.second->getTensorDesc().getDims()[2]));
            if (data != nullptr) {
                imagesData.push_back(data);
                validImageNames.push_back(i);
            }
        }
        if (imagesData.empty() || validImageNames.empty())
            throw std::logic_error("Valid input images were not found!");

        /** Setting batch size using image count **/
        network.setBatchSize(imagesData.size());
        size_t batchSize = network.getBatchSize();
        slog::info << "Batch size is " << std::to_string(batchSize) << slog::endl;

        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Step 4. Loading model to the device
        // ------------------------------------------
        slog::info << "Loading model to the device" << slog::endl;
        ExecutableNetwork executable_network = ie.LoadNetwork(network, FLAGS_d);
        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Step 5. Create infer request
        // -------------------------------------------------
        slog::info << "Create infer request" << slog::endl;
        InferRequest inferRequest = executable_network.CreateInferRequest();
        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Step 6. Prepare input
        // --------------------------------------------------------
        for (auto& item : inputInfo) {
            Blob::Ptr inputBlob = inferRequest.GetBlob(item.first);
            SizeVector dims = inputBlob->getTensorDesc().getDims();
            /** Fill input tensor with images. First b channel, then g and r channels
             * **/
            size_t num_channels = dims[1];
            size_t image_size = dims[3] * dims[2];

            MemoryBlob::Ptr minput = as<MemoryBlob>(inputBlob);
            if (!minput) {
                slog::err << "We expect MemoryBlob from inferRequest, but by fact we "
                             "were not able to cast inputBlob to MemoryBlob"
                          << slog::endl;
                return 1;
            }
            // locked memory holder should be alive all time while access to its
            // buffer happens
            auto minputHolder = minput->wmap();

            auto data = minputHolder.as<PrecisionTrait<Precision::U8>::value_type*>();
            if (data == nullptr)
                throw std::runtime_error("Input blob has not allocated buffer");
            /** Iterate over all input images **/
            for (size_t image_id = 0; image_id < imagesData.size(); ++image_id) {
                /** Iterate over all pixel in image (b,g,r) **/
                for (size_t pid = 0; pid < image_size; pid++) {
                    /** Iterate over all channels **/
                    for (size_t ch = 0; ch < num_channels; ++ch) {
                        /**          [images stride + channels stride + pixel id ] all in
                         * bytes            **/
                        data[image_id * image_size * num_channels + ch * image_size + pid] = imagesData.at(image_id).get()[pid * num_channels + ch];
                    }
                }
            }
        }

        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Step 7. Do inference
        // ---------------------------------------------------------
        size_t numIterations = 10;
        size_t curIteration = 0;
        std::condition_variable condVar;

        inferRequest.SetCompletionCallback([&] {
            curIteration++;
            slog::info << "Completed " << curIteration << " async request execution" << slog::endl;
            if (curIteration < numIterations) {
                /* here a user can read output containing inference results and put new
                   input to repeat async request again */
                inferRequest.StartAsync();
            } else {
                /* continue sample execution after last Asynchronous inference request
                 * execution */
                condVar.notify_one();
            }
        });

        /* Start async request for the first time */
        slog::info << "Start inference (" << numIterations << " asynchronous executions)" << slog::endl;
        inferRequest.StartAsync();

        /* Wait all repetitions of the async request */
        std::mutex mutex;
        std::unique_lock<std::mutex> lock(mutex);
        condVar.wait(lock, [&] {
            return curIteration == numIterations;
        });

        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Step 8. Process output
        // -------------------------------------------------------
        slog::info << "Processing output blobs" << slog::endl;
        OutputsDataMap outputInfo(network.getOutputsInfo());
        if (outputInfo.empty())
            throw std::runtime_error("Can't get output blobs");
        Blob::Ptr outputBlob = inferRequest.GetBlob(outputInfo.begin()->first);

        /** Validating -nt value **/
        const size_t resultsCnt = outputBlob->size() / batchSize;
        if (FLAGS_nt > resultsCnt || FLAGS_nt < 1) {
            slog::warn << "-nt " << FLAGS_nt << " is not available for this network (-nt should be less than " << resultsCnt + 1
                       << " and more than 0)\n            Maximal value " << resultsCnt << " will be used." << slog::endl;
            FLAGS_nt = resultsCnt;
        }

        /** Read labels from file (e.x. AlexNet.labels) **/
        std::string labelFileName = fileNameNoExt(FLAGS_m) + ".labels";
        std::vector<std::string> labels;

        std::ifstream inputFile;
        inputFile.open(labelFileName, std::ios::in);
        if (inputFile.is_open()) {
            std::string strLine;
            while (std::getline(inputFile, strLine)) {
                trim(strLine);
                labels.push_back(strLine);
            }
        }
        // Prints formatted classification results
        ClassificationResult classificationResult(outputBlob, validImageNames, batchSize, FLAGS_nt, labels);
        classificationResult.print();
        // -----------------------------------------------------------------------------------------------------
    } catch (const std::exception& error) {
        slog::err << error.what() << slog::endl;
        return 1;
    } catch (...) {
        slog::err << "Unknown/internal exception happened." << slog::endl;
        return 1;
    }

    slog::info << "Execution successful" << slog::endl;
    slog::info << slog::endl
               << "This sample is an API example, for any performance measurements "
                  "please use the dedicated benchmark_app tool"
               << slog::endl;
    return 0;
}

⑧ /opt/intel/openvino_2021.4.689/inference_engine/samples/cpp/ngraph_function_creation_sample

// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include <format_reader_ptr.h>
#include <gflags/gflags.h>
#include <samples/classification_results.h>

#include <inference_engine.hpp>
#include <limits>
#include <memory>
#include <samples/args_helper.hpp>
#include <samples/common.hpp>
#include <samples/slog.hpp>
#include <string>
#include <vector>

#include "ngraph/ngraph.hpp"
#include "ngraph_function_creation_sample.hpp"

using namespace InferenceEngine;
using namespace ngraph;

/**
 * @brief Checks input args
 * @param argc number of args
 * @param argv list of input arguments
 * @return bool status true(Success) or false(Fail)
 */
bool ParseAndCheckCommandLine(int argc, char* argv[]) {
    slog::info << "Parsing input parameters" << slog::endl;

    gflags::ParseCommandLineNonHelpFlags(&argc, &argv, true);
    if (FLAGS_h) {
        showUsage();
        showAvailableDevices();
        return false;
    }

    if (FLAGS_nt <= 0 || FLAGS_nt > 10) {
        throw std::logic_error("Incorrect value for nt argument. It should be "
                               "greater than 0 and less than 10.");
    }

    if (FLAGS_m.empty()) {
        showUsage();
        throw std::logic_error("Path to a .bin file with weights for the trained model is required "
                               "but not set. Please set -m option.");
    }

    if (FLAGS_i.empty()) {
        showUsage();
        throw std::logic_error("Path to an image is required but not set. Please set -i option.");
    }

    return true;
}

/**
 * @brief Read file to the buffer
 * @param file_name string
 * @param buffer to store file content
 * @param maxSize length of file
 * @return none
 */
void readFile(const std::string& file_name, void* buffer, size_t maxSize) {
    std::ifstream inputFile;

    inputFile.open(file_name, std::ios::binary | std::ios::in);
    if (!inputFile.is_open()) {
        throw std::logic_error("Cannot open weights file");
    }

    if (!inputFile.read(reinterpret_cast<char*>(buffer), maxSize)) {
        inputFile.close();
        throw std::logic_error("Cannot read bytes from weights file");
    }

    inputFile.close();
}

/**
 * @brief Read .bin file with weights for the trained model
 * @param filepath string
 * @return weightsPtr tensor blob
 */
TBlob<uint8_t>::CPtr ReadWeights(std::string filepath) {
    std::ifstream weightFile(filepath, std::ifstream::ate | std::ifstream::binary);
    int64_t fileSize = weightFile.tellg();

    if (fileSize < 0) {
        throw std::logic_error("Incorrect weights file");
    }

    size_t ulFileSize = static_cast<size_t>(fileSize);

    TBlob<uint8_t>::Ptr weightsPtr(new TBlob<uint8_t>({Precision::FP32, {ulFileSize}, Layout::C}));
    weightsPtr->allocate();
    readFile(filepath, weightsPtr->buffer(), ulFileSize);

    return weightsPtr;
}

/**
 * @brief Create ngraph function
 * @return Ptr to ngraph function
 */
std::shared_ptr<Function> createNgraphFunction() {
    TBlob<uint8_t>::CPtr weightsPtr = ReadWeights(FLAGS_m);

    if (weightsPtr->byteSize() != 1724336)
        IE_THROW() << "Incorrect weights file. This sample works only with LeNet "
                      "classification network.";

    // -------input------
    std::vector<ptrdiff_t> padBegin {0, 0};
    std::vector<ptrdiff_t> padEnd {0, 0};

    auto paramNode = std::make_shared<op::Parameter>(element::Type_t::f32, Shape(std::vector<size_t> {{64, 1, 28, 28}}));
    paramNode->set_friendly_name("Parameter");

    // -------convolution 1----
    auto convFirstShape = Shape {20, 1, 5, 5};
    std::shared_ptr<Node> convolutionFirstConstantNode =
        std::make_shared<op::Constant>(element::Type_t::f32, convFirstShape, weightsPtr->cbuffer().as<uint8_t*>());

    std::shared_ptr<Node> convolutionNodeFirst =
        std::make_shared<op::v1::Convolution>(paramNode->output(0), convolutionFirstConstantNode->output(0), Strides(SizeVector {1, 1}),
                                              CoordinateDiff(padBegin), CoordinateDiff(padEnd), Strides(SizeVector {1, 1}));

    // -------Add--------------
    auto addFirstShape = Shape {1, 20, 1, 1};
    auto offset = shape_size(convFirstShape) * sizeof(float);
    std::shared_ptr<Node> addFirstConstantNode =
        std::make_shared<op::Constant>(element::Type_t::f32, addFirstShape, (weightsPtr->cbuffer().as<uint8_t*>() + offset));

    std::shared_ptr<Node> addNodeFirst = std::make_shared<op::v1::Add>(convolutionNodeFirst->output(0), addFirstConstantNode->output(0));

    // -------MAXPOOL----------
    Shape padBeginShape {0, 0};
    Shape padEndShape {0, 0};

    std::shared_ptr<Node> maxPoolingNodeFirst =
        std::make_shared<op::v1::MaxPool>(addNodeFirst->output(0), std::vector<size_t> {2, 2}, padBeginShape, padEndShape, std::vector<size_t> {2, 2},
                                          op::RoundingType::CEIL, op::PadType::EXPLICIT);

    // -------convolution 2----
    auto convSecondShape = Shape {50, 20, 5, 5};
    offset += shape_size(addFirstShape) * sizeof(float);
    std::shared_ptr<Node> convolutionSecondConstantNode =
        std::make_shared<op::Constant>(element::Type_t::f32, convSecondShape, (weightsPtr->cbuffer().as<uint8_t*>() + offset));

    std::shared_ptr<Node> convolutionNodeSecond =
        std::make_shared<op::v1::Convolution>(maxPoolingNodeFirst->output(0), convolutionSecondConstantNode->output(0), Strides({1, 1}),
                                              CoordinateDiff(padBegin), CoordinateDiff(padEnd), Strides({1, 1}));

    // -------Add 2------------
    auto addSecondShape = Shape {1, 50, 1, 1};
    offset += shape_size(convSecondShape) * sizeof(float);
    std::shared_ptr<Node> addSecondConstantNode =
        std::make_shared<op::Constant>(element::Type_t::f32, addSecondShape, (weightsPtr->cbuffer().as<uint8_t*>() + offset));

    std::shared_ptr<Node> addNodeSecond = std::make_shared<op::v1::Add>(convolutionNodeSecond->output(0), addSecondConstantNode->output(0));

    // -------MAXPOOL 2--------
    std::shared_ptr<Node> maxPoolingNodeSecond = std::make_shared<op::v1::MaxPool>(addNodeSecond->output(0), Strides {2, 2}, padBeginShape, padEndShape,
                                                                                   Shape {2, 2}, op::RoundingType::CEIL, op::PadType::EXPLICIT);

    // -------Reshape----------
    auto reshapeFirstShape = Shape {2};
    auto reshapeOffset = shape_size(addSecondShape) * sizeof(float) + offset;
    std::shared_ptr<Node> reshapeFirstConstantNode =
        std::make_shared<op::Constant>(element::Type_t::i64, reshapeFirstShape, (weightsPtr->cbuffer().as<uint8_t*>() + reshapeOffset));

    std::shared_ptr<Node> reshapeFirstNode = std::make_shared<op::v1::Reshape>(maxPoolingNodeSecond->output(0), reshapeFirstConstantNode->output(0), true);

    // -------MatMul 1---------
    auto matMulFirstShape = Shape {500, 800};
    offset = shape_size(reshapeFirstShape) * sizeof(int64_t) + reshapeOffset;
    std::shared_ptr<Node> matMulFirstConstantNode =
        std::make_shared<op::Constant>(element::Type_t::f32, matMulFirstShape, (weightsPtr->cbuffer().as<uint8_t*>() + offset));

    std::shared_ptr<Node> matMulFirstNode = std::make_shared<op::MatMul>(reshapeFirstNode->output(0), matMulFirstConstantNode->output(0), false, true);

    // -------Add 3------------
    auto addThirdShape = Shape {1, 500};
    offset += shape_size(matMulFirstShape) * sizeof(float);
    std::shared_ptr<Node> addThirdConstantNode =
        std::make_shared<op::Constant>(element::Type_t::f32, addThirdShape, (weightsPtr->cbuffer().as<uint8_t*>() + offset));

    std::shared_ptr<Node> addThirdNode = std::make_shared<op::v1::Add>(matMulFirstNode->output(0), addThirdConstantNode->output(0));

    // -------Relu-------------
    std::shared_ptr<Node> reluNode = std::make_shared<op::Relu>(addThirdNode->output(0));

    // -------Reshape 2--------
    auto reshapeSecondShape = Shape {2};
    std::shared_ptr<Node> reshapeSecondConstantNode =
        std::make_shared<op::Constant>(element::Type_t::i64, reshapeSecondShape, (weightsPtr->cbuffer().as<uint8_t*>() + reshapeOffset));

    std::shared_ptr<Node> reshapeSecondNode = std::make_shared<op::v1::Reshape>(reluNode->output(0), reshapeSecondConstantNode->output(0), true);

    // -------MatMul 2---------
    auto matMulSecondShape = Shape {10, 500};
    offset += shape_size(addThirdShape) * sizeof(float);
    std::shared_ptr<Node> matMulSecondConstantNode =
        std::make_shared<op::Constant>(element::Type_t::f32, matMulSecondShape, (weightsPtr->cbuffer().as<uint8_t*>() + offset));

    std::shared_ptr<Node> matMulSecondNode = std::make_shared<op::MatMul>(reshapeSecondNode->output(0), matMulSecondConstantNode->output(0), false, true);

    // -------Add 4------------
    auto add4Shape = Shape {1, 10};
    offset += shape_size(matMulSecondShape) * sizeof(float);
    std::shared_ptr<Node> add4ConstantNode = std::make_shared<op::Constant>(element::Type_t::f32, add4Shape, (weightsPtr->cbuffer().as<uint8_t*>() + offset));

    std::shared_ptr<Node> add4Node = std::make_shared<op::v1::Add>(matMulSecondNode->output(0), add4ConstantNode->output(0));

    // -------softMax----------
    std::shared_ptr<Node> softMaxNode = std::make_shared<op::v1::Softmax>(add4Node->output(0), 1);

    // -------ngraph function--
    auto result_full = std::make_shared<op::Result>(softMaxNode->output(0));

    std::shared_ptr<ngraph::Function> fnPtr = std::make_shared<ngraph::Function>(result_full, ngraph::ParameterVector {paramNode}, "lenet");

    return fnPtr;
}

/**
 * @brief The entry point for inference engine automatic ngraph function
 * creation sample
 * @file ngraph_function_creation_sample/main.cpp
 * @example ngraph_function_creation_sample/main.cpp
 */
int main(int argc, char* argv[]) {
    try {
        // ------------------------------ Get Inference Engine version
        // ------------------------------------------------------
        slog::info << "InferenceEngine: " << GetInferenceEngineVersion() << slog::endl;
        // ------------------------------ Parsing and validation of input arguments
        // ---------------------------------
        if (!ParseAndCheckCommandLine(argc, argv)) {
            return 0;
        }
        // ------------------------------ Read input
        // -----------------------------------------------------------
        /** This vector stores paths to the processed images **/
        std::vector<std::string> images;
        parseInputFilesArguments(images);
        if (images.empty()) {
            throw std::logic_error("No suitable images were found");
        }
        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Step 1. Initialize inference engine core
        // -------------------------------------
        slog::info << "Loading Inference Engine" << slog::endl;
        Core ie;
        // ------------------------------ Get Available Devices
        // ------------------------------------------------------
        slog::info << "Device info: " << slog::endl;
        std::cout << ie.GetVersions(FLAGS_d) << std::endl;
        // -----------------------------------------------------------------------------------------------------

        //--------------------------- Step 2. Create network using ngraph function
        //-----------------------------------

        CNNNetwork network(createNgraphFunction());
        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Step 3. Configure input & output
        // ---------------------------------------------
        // --------------------------- Prepare input blobs
        // -----------------------------------------------------
        slog::info << "Preparing input blobs" << slog::endl;

        InputsDataMap inputInfo = network.getInputsInfo();
        if (inputInfo.size() != 1) {
            throw std::logic_error("Sample supports topologies only with 1 input");
        }

        auto inputInfoItem = *inputInfo.begin();

        /** Specifying the precision and layout of input data provided by the user.
         * Call this before loading the network to the device **/
        inputInfoItem.second->setPrecision(Precision::FP32);
        inputInfoItem.second->setLayout(Layout::NCHW);

        std::vector<std::shared_ptr<unsigned char>> imagesData;
        for (auto& i : images) {
            FormatReader::ReaderPtr reader(i.c_str());
            if (reader.get() == nullptr) {
                slog::warn << "Image " + i + " cannot be read!" << slog::endl;
                continue;
            }
            /** Store image data **/
            std::shared_ptr<unsigned char> data(
                reader->getData(inputInfoItem.second->getTensorDesc().getDims()[3], inputInfoItem.second->getTensorDesc().getDims()[2]));
            if (data.get() != nullptr) {
                imagesData.push_back(data);
            }
        }

        if (imagesData.empty()) {
            throw std::logic_error("Valid input images were not found");
        }

        /** Setting batch size using image count **/
        network.setBatchSize(imagesData.size());
        size_t batchSize = network.getBatchSize();
        slog::info << "Batch size is " << std::to_string(batchSize) << slog::endl;

        // --------------------------- Prepare output blobs
        // -----------------------------------------------------
        slog::info << "Checking that the outputs are as the sample expects" << slog::endl;
        OutputsDataMap outputInfo(network.getOutputsInfo());
        std::string firstOutputName;

        for (auto& item : outputInfo) {
            if (firstOutputName.empty()) {
                firstOutputName = item.first;
            }
            DataPtr outputData = item.second;
            if (!outputData) {
                throw std::logic_error("Output data pointer is not valid");
            }

            item.second->setPrecision(Precision::FP32);
        }

        if (outputInfo.size() != 1) {
            throw std::logic_error("This demo accepts networks with a single output");
        }

        DataPtr& output = outputInfo.begin()->second;
        auto outputName = outputInfo.begin()->first;

        const SizeVector outputDims = output->getTensorDesc().getDims();
        const int classCount = outputDims[1];

        if (classCount > 10) {
            throw std::logic_error("Incorrect number of output classes for LeNet network");
        }

        if (outputDims.size() != 2) {
            throw std::logic_error("Incorrect output dimensions for LeNet");
        }
        output->setPrecision(Precision::FP32);
        output->setLayout(Layout::NC);

        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Step 4. Loading model to the device
        // ------------------------------------------
        slog::info << "Loading model to the device" << slog::endl;
        ExecutableNetwork exeNetwork = ie.LoadNetwork(network, FLAGS_d);
        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Step 5. Create infer request
        // -------------------------------------------------
        slog::info << "Create infer request" << slog::endl;
        InferRequest infer_request = exeNetwork.CreateInferRequest();
        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Step 6. Prepare input
        // --------------------------------------------------------
        /** Iterate over all the input blobs **/
        for (const auto& item : inputInfo) {
            /** Creating input blob **/
            Blob::Ptr input = infer_request.GetBlob(item.first);

            /** Filling input tensor with images. First b channel, then g and r
             * channels **/
            size_t num_channels = input->getTensorDesc().getDims()[1];
            size_t image_size = input->getTensorDesc().getDims()[2] * input->getTensorDesc().getDims()[3];

            auto data = input->buffer().as<PrecisionTrait<Precision::FP32>::value_type*>();

            /** Iterate over all input images **/
            for (size_t image_id = 0; image_id < imagesData.size(); ++image_id) {
                /** Iterate over all pixels in image (b,g,r) **/
                for (size_t pid = 0; pid < image_size; pid++) {
                    /** Iterate over all channels **/
                    for (size_t ch = 0; ch < num_channels; ++ch) {
                        /**          [images stride + channels stride + pixel id ] all in
                         * bytes            **/
                        data[image_id * image_size * num_channels + ch * image_size + pid] = imagesData.at(image_id).get()[pid * num_channels + ch];
                    }
                }
            }
        }
        inputInfo = {};
        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Step 7. Do inference
        // ---------------------------------------------------------
        slog::info << "Start inference" << slog::endl;
        infer_request.Infer();
        // -----------------------------------------------------------------------------------------------------

        // --------------------------- Step 8. Process output
        // -------------------------------------------------------
        slog::info << "Processing output blobs" << slog::endl;

        const Blob::Ptr outputBlob = infer_request.GetBlob(firstOutputName);

        /** Validating -nt value **/
        const size_t resultsCnt = outputBlob->size() / batchSize;
        if (FLAGS_nt > resultsCnt || FLAGS_nt < 1) {
            slog::warn << "-nt " << FLAGS_nt << " is not available for this network (-nt should be less than " << resultsCnt + 1
                       << " and more than 0).\n           Maximal value " << resultsCnt << " will be used.";
            FLAGS_nt = resultsCnt;
        }

        /** Read labels from file (e.x. LeNet.labels) **/
        std::string labelFileName = fileNameNoExt(FLAGS_m) + ".labels";
        std::vector<std::string> labels;

        std::ifstream inputFile;
        inputFile.open(labelFileName, std::ios::in);
        if (inputFile.is_open()) {
            std::string strLine;
            while (std::getline(inputFile, strLine)) {
                trim(strLine);
                labels.push_back(strLine);
            }
            inputFile.close();
        }
        // Prints formatted classification results
        ClassificationResult classificationResult(outputBlob, images, batchSize, FLAGS_nt, labels);
        classificationResult.print();
        // -----------------------------------------------------------------------------------------------------
    } catch (const std::exception& ex) {
        slog::err << ex.what() << slog::endl;
        return EXIT_FAILURE;
    }
    slog::info << "This sample is an API example, for performance measurements, "
                  "use the dedicated benchmark_app tool"
               << slog::endl;
    return 0;
}

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值