onnx模型推理
为了方便使用,这里自定义了配置的消息文件,没有使用json,主要是因为json解析bool型的值时会接析为string,需要手动转换,相对比较麻烦,protobuf的扩展性更好,使用protobuf作为配置文件。
syntax = "proto3";
package inference;
message DataConfig {
string modelPaths = 1;
string testDataPath = 2;
string modelName = 3;
repeated string filenames = 4;
}
生成python代码:protoc model_config.proto --python_out=.
OpenCV推理
import cv2
import os
import numpy as np
from google.protobuf import text_format,json_format
from model_config_pb2 import DataConfig
import time
import onnx
import onnxruntime
def parse_config(config_filename):
dc1 = DataConfig()
with open(config_filename,'rb') as f:
text_format.Parse(f.read().decode('UTF-8'), dc1)
config = json_format.MessageToDict(dc1)
return config
def inference_with_cv(config):
onnx_model = os.path.join(config['modelPaths'],config['modelName'])
if not os.path.exists(onnx_model):
raise ValueError("Path:{} not exists!".format(onnx_model))
net = cv2.dnn.readNetFromONNX(os.path.join(config['modelPaths'],config['modelName']))
image = cv2.imread(os.path.join(config['modelPaths'],'demo.png'))
blob = cv2.dnn.blobFromImage(image,scalefactor=1/255,size=(224,224),mean=[0.485,0.456,0.406],swapRB=True,crop=False)
net.setInput(blob)
start_time = time.time()
output = net.forward()
end_time = time.time()
print("推理花费时间:{:.4f} s predict result:{}".format(end_time-start_time,np.argmax(output)))
Onnxruntime推理
def inference_with_onnx(config):
onnx_model = os.path.join(config['modelPaths'],config['modelName'])
session = onnxruntime.InferenceSession(onnx_model,providers=['CPUExecutionProvider'])
output_tensor = [node.name for node in session.get_outputs()]
input_tensor = session.get_inputs()
image_path = os.path.join(config['modelPaths'],config['filenames'][0])
image = cv2.imread(image_path)
data = cv2.dnn.blobFromImage(image, scalefactor=1 / 255, size=(224, 224), mean=[0.485, 0.456, 0.406], swapRB=True,
crop=False)
output_result = session.run(output_tensor,input_feed={input_tensor[0].name:data})
print("Class:{}".format(np.argmax(output_result[0])))