使用opencv调用onnx文件,对待测图像进行预测
一、图像分类
import cv2
from PIL import Image
import numpy as np
import torch
img_path = './model_v1/1.bmp'
onnx_path = './model_v1/classify_model.onnx' # 2分类模型
model = cv2.dnn.readNet(onnx_path)
img = Image.open(img_path)
img = np.array(img)
img = cv2.resize(img, (224, 224))
input_blob = cv2.dnn.blobFromImage(img)
input_blob = input_blob / 255
model.setInput(input_blob)
logits = model.forward()
print('input_blob.shape:', input_blob.shape)
print('logits.shape:', logits.shape)
print('logits.shape:', logits)
# 进行softmax
logits = torch.from_numpy(logits)
preout = torch.softmax(logits, dim=1)
print('preout:', preout)
print('preout.shape:', preout.shape)
print('preout和:', torch.sum(preout))
运行结果:
input_blob.shape: (1, 1, 224, 224)
logits.shape: (1, 2)
logits.shape: [[-1.1753345 1.2695603]]
preout: tensor([[0.0798, 0.9202]])
preout.shape: torch.Size([1, 2])
preout和: tensor(1.)
Process finished with exit code 0
二、语义分割
import cv2
from PIL import Image
import numpy as np
import torch
img_path = './model_v1/1.bmp'
onnx_path = './model_v1/segment_model.onnx' # 5类别分割模型
model = cv2.dnn.readNet(onnx_path)
img = Image.open(img_path)
img = np.array(img)
img = cv2.resize(img, (224, 224))
input_blob = cv2.dnn.blobFromImage(img)
input_blob = input_blob / 255
#input_blob = cv2.dnn.blobFromImage(img, scalefactor=1, size=(224,224))
model.setInput(input_blob)
logits = model.forward()
print('input_blob.shape:', input_blob.shape)
print('logits.shape:', logits.shape)
logits = torch.from_numpy(logits)
preout = torch.softmax(logits, dim=1)
preout = torch.argmax(preout, dim=1)
pre_mask = np.array(preout[0,:,:])
print('logits.shape:', logits.shape)
print('pre_mask.shape', pre_mask.shape)
运行结果:
input_blob.shape: (1, 1, 224, 224)
logits.shape: (1, 5, 224, 224)
logits.shape: torch.Size([1, 5, 224, 224])
pre_mask.shape (224, 224)
Process finished with exit code 0
三、目标检测
1.使用opencv调用(失败)
曾尝试参考上述方法,使用opencv进行调用,但是发现模型输出的logits并不完整。
import cv2
from PIL import Image
import numpy as np
import onnx
img_path = './model_v1/2.jpg'
onnx_path = './model_v1/detect.onnx' # 输入为3通道,2类别目标检测模型
model = cv2.dnn.readNet(onnx_path)
img = Image.open(img_path).convert('RGB')
img = np.array(img)
img = cv2.resize(img, (416, 416))
input_blob = cv2.dnn.blobFromImage(img)
input_blob = input_blob / 255
model.setInput(input_blob)
logits = model.forward()
print('input_blob.shape:', input_blob.shape)
print('logits.shape:', logits.shape)
使用opencv调用结果:
input_blob.shape: (1, 3, 416, 416)
logits.shape: (1, 21, 52, 52)
Process finished with exit code 0
2.使用onnxruntime调用(成功)
import cv2
from PIL import Image
import numpy as np
import onnx
import onnxruntime
img_path = './model_v1/2.jpg'
onnx_path = './model_v1/detect.onnx' # 输入为3通道,2类别目标检测模型
onnx_model = onnx.load(onnx_path)
onnx.checker.check_model(onnx_model)
ort_session = onnxruntime.InferenceSession(onnx_path)
img = Image.open(img_path).convert('RGB')
img = np.array(img)
img = cv2.resize(img, (416, 416))
input_blob = cv2.dnn.blobFromImage(img)
input_blob = input_blob / 255
ori_inputs = {'images': input_blob}
logits = ort_session.run(None, ori_inputs)
print(type(logits), len(logits))
print('input_blob.shape:', input_blob.shape)
print('logits[0].shape:', logits[0].shape)
print('logits[1].shape:', logits[1].shape)
print('logits[2].shape:', logits[2].shape)
使用onnxruntime调用结果:
<class 'list'> 3
input_blob.shape: (1, 3, 416, 416)
logits[0].shape: (1, 21, 13, 13)
logits[1].shape: (1, 21, 26, 26)
logits[2].shape: (1, 21, 52, 52)
Process finished with exit code 0
参考链接:
load_onnx_test