自己训练的Resnet部署(python版本)

路线:xx.pth -> xx.onnx -> xx.trt

实验版本:torch   1.10.0;tensorrt  7.2.3.4; onnx  1.8.1

1. pth2onnx.py # 将pytorch训练的模型xx.pth转为xx.onnx。

import torch
from model import resnet50   # 导入模型结构
import onnx
print(torch.__version__)

pth_filename = './resNet50.pth'  # 训练好的权重
onnx_filename = './resNet50.onnx'
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
net = resnet50(num_classes=10).to(device) # num_classes 根据你的类别个数调整
net.load_state_dict(torch.load(pth_filename, map_location=device))
net.eval()
dummy_input = torch.randn(1, 3, 224, 224, device = device)
torch.onnx.export(net, dummy_input, onnx_filename,
                  input_names=['input'], output_names=['ouput'],
                  export_params=True, verbose=False, opset_version=12,
                  dynamic_axes={'input':{0:"batch_size"},
                                'output':{0:"batch_size"}})
 
# 检查一下生成的onnx
test = onnx.load("resNet50.onnx")
onnx.checker.check_model(test)
print("==> Passed")

2.onnx2trt2infer.py # 转xx.trt 并推理

import os
# This sample uses an ONNX ResNet50 Model to create a TensorRT Inference Engine
import random
import sys
import numpy as np

# This import causes pycuda to automatically manage CUDA context creation and cleanup.
import pycuda.autoinit
import tensorrt as trt
from PIL import Image
import common
import json
import time

class ModelData(object):
    MODEL_PATH = "resNet50.onnx"
    INPUT_SHAPE = (3, 224, 224)
    # We can convert TensorRT data types to numpy types with trt.nptype()
    DTYPE = trt.float32

# You can set the logger severity higher to suppress messages (or lower to display more messages).
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)

# The Onnx path is used for Onnx models.
def get_engine_onnx(engine_file_path, model_file):
    def build_engine_onnx(model_file):
        builder = trt.Builder(TRT_LOGGER)
        network = builder.create_network(common.EXPLICIT_BATCH)
        config = builder.create_builder_config()
        parser = trt.OnnxParser(network, TRT_LOGGER)

        config.max_workspace_size = common.GiB(1)
        # Load the Onnx model and parse it in order to populate the TensorRT network.
        with open(model_file, 'rb') as model:
            if not parser.parse(model.read()):
                print ('ERROR: Failed to parse the ONNX file.')
                for error in range(parser.num_errors):
                    print (parser.get_error(error))
                return None
        print("num layers:",network.num_layers)
        network.get_input(0).shape = [1, 3, 224, 224]
        engine = builder.build_cuda_engine(network)
        print("Completed creating Engine")
        with open(engine_file_path, "wb") as f:
            f.write(engine.serialize())
        return engine
    if os.path.exists(engine_file_path):
        # If a serialized engine exists, load it instead of building a new one.
        print("Reading engine from file {}".format(engine_file_path))
        with open(engine_file_path, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime:
            return runtime.deserialize_cuda_engine(f.read())   
    # return builder.build_engine(network, config)
    return build_engine_onnx(model_file)

def load_normalized_test_case(test_image, pagelocked_buffer):
    # Converts the input image to a CHW Numpy array
    def normalize_image(image):
        # Resize, antialias and transpose the image to CHW.
        c, h, w = ModelData.INPUT_SHAPE
        image_arr = np.asarray(image.resize((w, h), Image.ANTIALIAS)).transpose([2, 0, 1]).astype(trt.nptype(ModelData.DTYPE)).ravel()
        # This particular ResNet50 model requires some preprocessing, specifically, mean normalization.
        return (image_arr / 255.0 - 0.45) / 0.225

    # Normalize the image and copy to pagelocked memory.
    np.copyto(pagelocked_buffer, normalize_image(Image.open(test_image)))
    return test_image


def main():
    test_image = "xx/xx.jpg"
    onnx_model_file = "./resNet50.onnx"
    engine_file_path = './resNet50.trt'

    # read class_indict
    json_path = './class_indices.json'  # 存放着类别信息
    assert os.path.exists(json_path), "file: '{}' dose not exist.".format(json_path)
    json_file = open(json_path, "r")
    class_indict = json.load(json_file)

    # Build a TensorRT engine.
    engine = get_engine_onnx(engine_file_path, onnx_model_file)
    # Inference is the same regardless of which parser is used to build the engine, since the model architecture is the same.
    # Allocate buffers and create a CUDA stream.
    inputs, outputs, bindings, stream = common.allocate_buffers(engine)
    # Contexts are used to perform inference.
    context = engine.create_execution_context()

    # Load a normalized test case into the host input page-locked buffer.
    test_case = load_normalized_test_case(test_image, inputs[0].host)
    # Run the engine. The output will be a 1D tensor of length 1000, where each value represents the
    # probability that the image corresponds to that label
    t1 = time.time()
    trt_outputs = common.do_inference_v2(context, bindings=bindings, inputs=inputs, outputs=outputs, stream=stream)
    t2 = time.time()
    predict = np.exp(trt_outputs[0]) / sum(np.exp(trt_outputs[0]))
    pred = class_indict[str(np.argmax(trt_outputs[0]))]
    print(test_image, np.argmax(predict), max(predict), pred)
    print("Inference time(fps) with the TensorRT engine: {}".format(60/(t2-t1)))
    # We use the highest probability as our prediction. Its index corresponds to the predicted label.
    # pred = labels[np.argmax(trt_outputs[0])]
    

    if "_".join(pred.split()) in os.path.splitext(os.path.basename(test_case))[0]:  # 图片名字为类的名字
        print("Correctly recognized " + test_case + " as " + pred)
    else:
        print("Incorrectly recognized " + test_case + " as " + pred)


if __name__ == '__main__':
    main()

2.1 注:其中的class_indices.json为

{

    "0": "类1",

    "1": "类2",

    ...

    "10": "类10"

}

可由下面代码得到: 

from torchvision import datasets
train_dataset = datasets.ImageFolder(root=os.path.join(image_path, "train"),
                                         transform=data_transform["train"])
xx_list = train_dataset.class_to_idx
cla_dict = dict((val, key) for key, val in xx_list.items())
# write dict into json file
json_str = json.dumps(cla_dict, indent=4) # 4 代表格式化好看
with open('class_indices.json', 'w') as json_file:
    json_file.write(json_str)

2.2 common.py 为TRT官方代码

import argparse
import os

import numpy as np
import pycuda.autoinit
import pycuda.driver as cuda
import tensorrt as trt

try:
    # Sometimes python does not understand FileNotFoundError
    FileNotFoundError
except NameError:
    FileNotFoundError = IOError

EXPLICIT_BATCH = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)

def GiB(val):
    return val * 1 << 30


def add_help(description):
    parser = argparse.ArgumentParser(description=description, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    args, _ = parser.parse_known_args()


def find_sample_data(description="Runs a TensorRT Python sample", subfolder="", find_files=[], err_msg=""):
    '''
    Parses sample arguments.
    Args:
        description (str): Description of the sample.
        subfolder (str): The subfolder containing data relevant to this sample
        find_files (str): A list of filenames to find. Each filename will be replaced with an absolute path.
    Returns:
        str: Path of data directory.
    '''

    # Standard command-line arguments for all samples.
    kDEFAULT_DATA_ROOT = os.path.join(os.sep, "usr", "src", "tensorrt", "data")
    parser = argparse.ArgumentParser(description=description, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument("-d", "--datadir", help="Location of the TensorRT sample data directory, and any additional data directories.", action="append", default=[kDEFAULT_DATA_ROOT])
    args, _ = parser.parse_known_args()

    def get_data_path(data_dir):
        # If the subfolder exists, append it to the path, otherwise use the provided path as-is.
        data_path = os.path.join(data_dir, subfolder)
        if not os.path.exists(data_path):
            if data_dir != kDEFAULT_DATA_ROOT:
                print("WARNING: " + data_path + " does not exist. Trying " + data_dir + " instead.")
            data_path = data_dir
        # Make sure data directory exists.
        if not (os.path.exists(data_path)) and data_dir != kDEFAULT_DATA_ROOT:
            print("WARNING: {:} does not exist. Please provide the correct data path with the -d option.".format(data_path))
        return data_path

    data_paths = [get_data_path(data_dir) for data_dir in args.datadir]
    print(data_paths)
    return data_paths, locate_files(data_paths, find_files, err_msg)

def locate_files(data_paths, filenames, err_msg=""):
    """
    Locates the specified files in the specified data directories.
    If a file exists in multiple data directories, the first directory is used.
    Args:
        data_paths (List[str]): The data directories.
        filename (List[str]): The names of the files to find.
    Returns:
        List[str]: The absolute paths of the files.
    Raises:
        FileNotFoundError if a file could not be located.
    """
    found_files = [None] * len(filenames)
    for data_path in data_paths:
        # Find all requested files.
        for index, (found, filename) in enumerate(zip(found_files, filenames)):
            if not found:
                file_path = os.path.abspath(os.path.join(data_path, filename))
                if os.path.exists(file_path):
                    found_files[index] = file_path

    # Check that all files were found
    for f, filename in zip(found_files, filenames):
        if not f or not os.path.exists(f):
            raise FileNotFoundError("Could not find {:}. Searched in data paths: {:}\n{:}".format(filename, data_paths, err_msg))
    return found_files

# Simple helper data class that's a little nicer to use than a 2-tuple.
class HostDeviceMem(object):
    def __init__(self, host_mem, device_mem):
        self.host = host_mem
        self.device = device_mem

    def __str__(self):
        return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)

    def __repr__(self):
        return self.__str__()

# Allocates all buffers required for an engine, i.e. host/device inputs/outputs.
def allocate_buffers(engine):
    inputs = []
    outputs = []
    bindings = []
    stream = cuda.Stream()
    print(engine)
    for binding in engine:
        size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size
        dtype = trt.nptype(engine.get_binding_dtype(binding))
        # Allocate host and device buffers
        host_mem = cuda.pagelocked_empty(size, dtype)
        device_mem = cuda.mem_alloc(host_mem.nbytes)
        # Append the device buffer to device bindings.
        bindings.append(int(device_mem))
        # Append to the appropriate list.
        if engine.binding_is_input(binding):
            inputs.append(HostDeviceMem(host_mem, device_mem))
        else:
            outputs.append(HostDeviceMem(host_mem, device_mem))
    return inputs, outputs, bindings, stream

# This function is generalized for multiple inputs/outputs.
# inputs and outputs are expected to be lists of HostDeviceMem objects.
def do_inference(context, bindings, inputs, outputs, stream, batch_size=1):
    # Transfer input data to the GPU.
    [cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]
    # Run inference.
    context.execute_async(batch_size=batch_size, bindings=bindings, stream_handle=stream.handle)
    # Transfer predictions back from the GPU.
    [cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]
    # Synchronize the stream
    stream.synchronize()
    # Return only the host outputs.
    return [out.host for out in outputs]

# This function is generalized for multiple inputs/outputs for full dimension networks.
# inputs and outputs are expected to be lists of HostDeviceMem objects.
def do_inference_v2(context, bindings, inputs, outputs, stream):
    # Transfer input data to the GPU.
    [cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]
    # Run inference.
    context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)
    # Transfer predictions back from the GPU.
    [cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]
    # Synchronize the stream
    stream.synchronize()
    # Return only the host outputs.
    return [out.host for out in outputs]

  • 2
    点赞
  • 6
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 2
    评论
以下是使用 PyTorch 加载 ResNet50 预训练模型并进行预测的示例代码。假设你已经下载了 PyTorch ResNet50 预训练模型权重文件(通常是以 `.pth` 或 `.pt` 文件格式存储)。 ```python import torch import torchvision.transforms as transforms import numpy as np from PIL import Image # 创建模型 model = torch.hub.load('pytorch/vision:v0.6.0', 'resnet50', pretrained=True) model.eval() # 预处理图像 transform = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] ) ]) # 加载图像 img = Image.open('test.jpg') # 预处理并转换图像为模型输入所需格式 img_tensor = transform(img).unsqueeze(0) # 使用模型进行预测 with torch.no_grad(): output = model(img_tensor) # 获取预测结果 pred = np.argmax(output.numpy()) # 输出预测结果 print('预测结果为:', pred) ``` 在上面的代码中,我们首先使用 `torch.hub.load` 来加载 ResNet50 预训练模型。然后,我们定义了一个图像预处理的 transform,将图像转换为模型输入所需的格式。接着,我们加载测试图像,并将其预处理为模型输入所需的格式。最后,我们使用 `model` 对图像进行预测,并输出预测结果。 如果你想在 CPU 上部署这个模型,只需要将模型和输入数据都放在 CPU 上即可。在 PyTorch 中,可以使用 `model.cpu()` 和 `img_tensor.cpu()` 将它们转移到 CPU 上。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

理心炼丹

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值