接上一次搭建的yolox 的环境,这次要使用openvino对其进行python推理加速。
本文的前提是已经实现在本地配置好openvino环境,可去openvino专栏查看。
推理的代码如下:
import argparse
import logging as log
import os
import sys
import cv2
import numpy as np
from openvino.inference_engine import IECore
from yolox_x.yolox.data.data_augment import preproc as preprocess
from yolox_x.yolox.data.datasets import COCO_CLASSES
from yolox_x.yolox.utils import mkdir, multiclass_nms, demo_postprocess, vis
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(add_help=False)
args = parser.add_argument_group('Options')
args.add_argument(
'-h',
'--help',
action='help',
help='Show this help message and exit.'
)
args.add_argument(
'-m',
'--model',
# required=True,
default='./yolox_x/yolox_10.xml',
type=str,
help='Required.Path to an .xml or .onnx file with a trained model.'
)
args.add_argument(
'-i',
'--input',
# required=True,
default='./image/1.png',
type=str,
help='Required. Path to an image file.'
)
args.add_argument(
'-o',
'--output_dir',
type=str,
default='demo_output',
help='Path to your output dir.'
)
args.add_argument(
'-s',
'--score_thr',
type=float,
default=0.3,
help='Score threshold to visualize the result.'
)
args.add_argument(
'-d',
'--device',
default='CPU',
type=str,
help='Optional. Specify the target device to infer on; CPU, GPU, \
MYRIAD, HDDL or HETERO: is acceptable. The sample will look \
for a suitable plugin for device specified. Default value \
is CPU.'
)
args.add_argument(
'--labels',
default=None,
type=str,
help='Option:al. Path to a labels mapping file.'
)
args.add_argument(
'-nt',
'--number_top',
default=10,
type=int,
help='Optional.Number of top results.'
)
return parser.parse_args()
def main():
log.basicConfig(format='[ %(levelname)s ] %(message)s', level=log.INFO, stream=sys.stdout)
args = parse_args()
# step1 初始化推理引擎
log.info('Creating Inference Engine')
ie = IECore()
# step2 读取openvino的模型中间件
log.info(f'Reading the network:{args.model}')
net = ie.read_network(model=args.model)
if len(net.input_info) != 1:
log.error('Sample supports only single input topologies')
return -1
if len(net.outputs) != 1:
log.error('Sampke supports only single output topologies')
return -1
# step3 配置输入和输出
log.info('Configuring input and output blobs')
# 获取输入输出的blobs
input_blob = next(iter(net.input_info))
out_blob = next(iter(net.outputs))
# 手动设置输入输出的精度
net.input_info[input_blob].precision = 'FP32'
net.outputs[out_blob].precision = 'FP16'
# 获取模型识别的类别
num_of_classes = max(net.outputs[out_blob].shape)
# step4 加载模型在设备上
log.info('Loading the model to the plugin')
exec_net = ie.load_network(network=net, device_name=args.device)
# step5 创建推断请求
# 具有指定请求数(默认为1的ICore类的load_network()方法返回一个ExecutableNetwork
# 存储推断请求的实例。因此,您已经在上一步中创建了Infer请求。
# step6 准备输入
origin_img = cv2.imread(args.input)
_, _, h, w = net.input_info[input_blob].input_data.shape
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
image, ratio = preprocess(origin_img, (h, w), mean, std)
# step7 进行推理
log.info('Starting inference in synchronous mode')
res = exec_net.infer(inputs={input_blob: image})
# step8 计算输出
res = res[out_blob]
predictions = demo_postprocess(res, (h, w), p6=False)[0]
boxes = predictions[:, :4]
scores = predictions[:, 4, None] * predictions[:, 5:]
boxes_xyxy = np.ones_like(boxes)
boxes_xyxy[:, 0] = boxes[:, 0] - boxes[:, 2] / 2.
boxes_xyxy[:, 1] = boxes[:, 1] - boxes[:, 3] / 2.
boxes_xyxy[:, 2] = boxes[:, 0] - boxes[:, 2] / 2.
boxes_xyxy[:, 3] = boxes[:, 1] - boxes[:, 3] / 2.
boxes_xyxy /= ratio
dets = multiclass_nms(boxes_xyxy, scores, nms_thr=0.45, score_thr=0.1)
if dets is not None:
final_boxes = dets[:, :4]
final_scores, final_cls_inds = dets[:, 4], dets[:, 5]
origin_img = vis(origin_img, final_boxes, final_scores, final_cls_inds,
conf=args.score_thr, class_names=COCO_CLASSES)
mkdir(args.output_dir)
output_path = os.path.join(args.output_dir, args.input.split('/')[-1])
cv2.imwrite(output_path, origin_img)
if __name__ == '__main__':
sys.exit(main())
其部分其它代码和yolox的中间件在网盘
链接:https://pan.baidu.com/s/1Ws-RlHF8qMmfgjEtk9yjvQ
提取码:ljko