提示:假设读者OpenVINO环境已经配置好
环境配置可参考:https://nickhuang1996.blog.csdn.net/article/details/81391468
pytorch模型不能直接转成OpenVINO需要的IR文件, 需要先转成onnx中间模型。
可参考pytorch官网实例:https://pytorch.org/docs/master/onnx.html#torch-onnx
model_name = "导出的onnx模型名字"
dummy_input = torch.rand(1, 3, 80, 80)
model.eval()
output_onnx = '{}.onnx'.format(model_name)
print("==> Exporting model to onnx format at'{}'".format(output_onnx))
input_names = ["input"]
output_names = ["output"]
model = "自己构建的模型"
# dynamic_axes 构建动态batch_size
torch_out = torch.onnx.export(model, dummy_input, output_onnx, verbose=True, input_names=input_names,
output_names=output_names,
dynamic_axes={"input": {0: "batch_size"},
"output": {0: "batch_size"}}
)
print("==> Loading and checking exported model from '{}'".format(output_onnx))
onnx_model = onnx.load(output_onnx)
onnx.checker.check_model(onnx_model) # assuming throw on error
print("==> Passed")
我们首先进入c盘下的:
C:\Program Files (x86)\Intel\openvino_2021.2.185\deployment_tools\model_optimizer\install_prerequisites
找到这个文件夹进入,按住shift,鼠标右键打开PowerShell,就可以打开一个在此文件夹命令窗口。
根据需要安装依赖环境,这样可以减少安装时间,我这里需要onnx环境,就运行install_prerequisites_onnx 命令
进入C盘下的这个目录,使用mo_onnx.py,对onnx模型进行转换。
运行
python mo_onnx.py --input_model <自己的onnx模型的绝对路径> --output_dir <输出模型的绝对路径>
–batch 1(如果导出模型设置的是动态batch_size,这里根据需要指定,不然报错)
测试生成的OpenVINO生成的IR文件(此处引用贾志刚老师的课程代码)
#include <inference_engine.hpp>
#include <opencv2/opencv.hpp>
#include <fstream>
using namespace InferenceEngine;
std::string labels_txt_file = "分类类别txt文件";
std::vector<std::string> readClassNames();
int main(int argc, char** argv) {
InferenceEngine::Core ie;
std::vector<std::string> devices = ie.GetAvailableDevices();
for (std::string name : devices) {
std::cout << "device name: " << name << std::endl;
}
std::string cpuName = ie.GetMetric("CPU", METRIC_KEY(FULL_DEVICE_NAME)).as<std::string>();
std::cout << "cpu full name: " << cpuName << std::endl;
std::string xml = "xml文件路径";
std::string bin = "bin文件路径";
std::vector<std::string> labels = readClassNames();
cv::Mat src = cv::imread("图片路径");
InferenceEngine::CNNNetwork network = ie.ReadNetwork(xml, bin);
InferenceEngine::InputsDataMap inputs = network.getInputsInfo();
InferenceEngine::OutputsDataMap outputs = network.getOutputsInfo();
std::string input_name = "";
for (auto item : inputs) {
input_name = item.first;
auto input_data = item.second;
input_data->setPrecision(Precision::FP32);
input_data->setLayout(Layout::NCHW);
input_data->getPreProcess().setColorFormat(ColorFormat::RGB);
std::cout << "input name: " << input_name << std::endl;
}
std::string output_name = "";
for (auto item : outputs) {
output_name = item.first;
auto output_data = item.second;
output_data->setPrecision(Precision::FP32);
std::cout << "output name: " << output_name << std::endl;
}
auto executable_network = ie.LoadNetwork(network, "CPU");
auto infer_request = executable_network.CreateInferRequest();
auto input = infer_request.GetBlob(input_name);
size_t num_channels = input->getTensorDesc().getDims()[1];
size_t h = input->getTensorDesc().getDims()[2];
size_t w = input->getTensorDesc().getDims()[3];
size_t image_size = h*w;
cv::Mat blob_image;
cv::resize(src, blob_image, cv::Size(w, h));
cv::cvtColor(blob_image, blob_image, cv::COLOR_BGR2RGB);
blob_image.convertTo(blob_image, CV_32F);
blob_image = blob_image / 255.0;
cv::subtract(blob_image, cv::Scalar(0.485, 0.456, 0.406), blob_image);
cv::divide(blob_image, cv::Scalar(0.229, 0.224, 0.225), blob_image);
// HWC =》NCHW
float* data = static_cast<float*>(input->buffer());
for (size_t row = 0; row < h; row++) {
for (size_t col = 0; col < w; col++) {
for (size_t ch = 0; ch < num_channels; ch++) {
data[image_size*ch + row*w + col] = blob_image.at<cv::Vec3f>(row, col)[ch];
}
}
}
infer_request.Infer();
auto output = infer_request.GetBlob(output_name);
const float* probs = static_cast<PrecisionTrait<Precision::FP32>::value_type*>(output->buffer());
const SizeVector outputDims = output->getTensorDesc().getDims();
std::cout << outputDims[0] << "x" << outputDims[1] << std::endl;
float max = probs[0];
int max_index = 0;
for (int i = 1; i < outputDims[1]; i++) {
if (max < probs[i]) {
max = probs[i];
max_index = i;
}
}
std::cout << "class index : " << max_index << std::endl;
std::cout << "class name : " << labels[max_index] << std::endl;
cv::putText(src, labels[max_index], cv::Point(50, 50), cv::FONT_HERSHEY_SIMPLEX, 1.0, cv::Scalar(0, 0, 255), 2, 8);
cv::imshow("输入图像", src);
cv::waitKey(0);
return 0;
}
std::vector<std::string> readClassNames()
{
std::vector<std::string> classNames;
std::ifstream fp(labels_txt_file);
if (!fp.is_open())
{
printf("could not open file...\n");
exit(-1);
}
std::string name;
while (!fp.eof())
{
std::getline(fp, name);
if (name.length())
classNames.push_back(name);
}
fp.close();
return classNames;
}
小伙伴们,赶快动手试一试吧!!!!!!!!!!!!
如有错误,欢迎评论区指正!!