分类模型-onnxruntime推理

data_process.py

from PIL import Image
import numpy as np
import numbers
import heapq
import os
import argparse

#调整图像大小的函数
#img:要调整大小的图像,size:目标大小,interpolation:插值方法
def img_resize(img, size, interpolation=Image.BILINEAR):
    if isinstance(size, int):
        w, h = img.size
        if (w <= h and w == size) or (h <= w and h == size):
            return img
        if w < h:
            ow = size
            oh = int(size * h / w)
            return img.resize((ow, oh), interpolation)
        else:
            oh = size
            ow = int(size * w / h)
            return img.resize((ow, oh), interpolation)
    else:
        return img.resize(size, interpolation)


#用于从图像中裁剪特定区域的Python函数,参数为要裁剪的图像和坐标
def img_crop(img, top, left, height, width):
    """
    加了对参数类型的检查,以确保所有参数都是整数。
    添加了对裁剪区域边界的检    查,以确保裁剪区域完全位于原始图像的边界内
    """
    if not (isinstance(top, int) and isinstance(left, int) and isinstance(height, int) and isinstance(width, int)):
        raise ValueError("All parameters must be integers.")
    
    if top < 0 or left < 0 or height <= 0 or width <= 0:
        raise ValueError("Crop dimensions must be non-negative.")
    
    if top + height > img.height or left + width > img.width:
        raise ValueError("Crop dimensions exceed image boundaries.")

    return img.crop((left, top, left + width, top + height))


def softmax(x):
    x = x.reshape(-1)
    e_x = np.exp(x - np.max(x))
    return e_x / e_x.sum(axis=0)


def postprocess(result):
    #return np.array(result).reshape(-1).tolist()
    return softmax(np.array(result)).tolist()

#对调整大小后的图像进行中心裁剪,会裁剪图像的中央区域
def img_center_crop(img, output_size):
    if isinstance(output_size,numbers.Number):
        output_size=(int(output_size),int(output_size))
    image_width,image_height=img.size
    
    crop_height,crop_width=output_size
    
    crop_top=int((image_height-crop_height)/2)
    crop_left=int((image_width-crop_width)/2)
    
    return img_crop(img,crop_top,crop_left,crop_height,crop_width)

run_resnet50.py

from data_process import *
import onnxruntime as ort

def run_resnet50(image):
    
    lables = []
    with open("imagenet_lable.txt", 'r') as file:
        lines = file.readlines()
        for line in lines:
            lables.append(line)

    # images_list = os.listdir(args.data)
    # image = Image.open(os.path.join(args.data, images_list[0])).convert("RGB")
    image = img_resize(image, 256)  #Inception V3需要改成342
    image = img_center_crop(image, (224,224))  #Inception V3需要改成(299,299)

    image_data = np.array(image, dtype='float32').transpose(2, 0, 1)
   
    mean_vec = np.array([0.485, 0.456, 0.406])
    stddev_vec = np.array([0.229, 0.224, 0.225])
    norm_image_data = np.zeros(image_data.shape).astype('float32')

    for i in range(image_data.shape[0]):
        norm_image_data[i, :, :] = (image_data[i, :, :] / 255 - mean_vec[i]) / stddev_vec[i]
    norm_image_data = norm_image_data.reshape(1, 3, 224, 224).astype('float32')


    # Load the model and create InferenceSession
    model_path = "resnet18.onnx"
    session = ort.InferenceSession(model_path)
    # Load and preprocess the input image inputTensor
    input_data = norm_image_data
    # Run inference
    outputs = session.run(None, {"input": input_data})

    res=postprocess(outputs)

    indices = heapq.nlargest(5, range(len(res)), res.__getitem__)
    # print(indices)
    pred = np.argmax(res)
    # print(pred)
    return lables[pred]

image = Image.open("dog.JPEG").convert("RGB")
print(run_resnet50(image))

resnet_serve.py

from run_resnet50 import *
import gradio as gr

gr.close_all()
with gr.Blocks() as demo:
    with gr.Tab("Image Classification") :
        gr.Markdown("Demonstration of image classification")
        with gr.Row():
            input_image=gr.Image(sources=["upload"],label="Upload a picture",type="pil")
            output_image=gr.Text()
        gr.Examples(examples=["./dog.JPEG","./dog.jpg","erp.png","./horses.jpg"],inputs=[input_image])
        button=gr.Button(value="Classification",variant="primary")
        button.click(run_resnet50,inputs=input_image,outputs=output_image)

demo.launch()

export_onnx

import torch
import torch.onnx
from torchvision.models import resnet50,vgg16   #torch官方可下载

# 实例化ResNet50模型
#model = resnet50(pretrained=True)
model = vgg16(pretrained=True)  #加载预训练模型
#model =torchvision.models.quantization.resnet.QuantizableResNet(pretrained=True)
model.eval()

# 设置输入样本以供ONNX导出器使用,所有的分类网络都是(224,224),唯一一个Inception V3不是,是299*299,resize是342
x = torch.randn(1, 3, 224, 224, requires_grad=True)  

# 导出模型到ONNX文件
torch.onnx.export(model, x, "resnet18.onnx", verbose=True, input_names=["input"], output_names=["output"], dynamic_axes={"input": {0: "batch_size"}, "output": {0: "batch_size"}})

print("Model exported successfully to resnet18.onnx")

"""
import onnxruntime as ort
# Load the model and create InferenceSession
model_path = "resnet50.onnx"
session = ort.InferenceSession(model_path)
# Load and preprocess the input image inputTensor

import numpy as np

# 生成一个2x3的随机数数组
input_data = np.random.rand(5, 3, 224, 224).astype("float32")
print(input_data.shape)
# Run inference
outputs = session.run(None, {"input": input_data})
print(outputs[0].shape)
"""

 

 

 

 

  • 3
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值