pytorch模型转onnx
有torch.jit.trace和torch.jit.script两种方式
trace模式使用用户提供的测试数据输入进去执行模型,然后跟踪执行过程构图,然后保存构建的模型。比较简单,成功率高,缺点是无法支持与输入数据内容相关的动态特性。比如说如果代码有个分支或者循环根据tensor内容而不是python常数选择走哪个分支和确定循环次数,trace根据用户提供的这一个数据执行结果来静态决定。因此如果代码有这种特性,即使导出了模型,模型本身可能也无法得到正确的结果!遇到这种情况,导出过程ONNX只会给一些警告,如:
TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!
if self.pe.size(0) < step + emb.size(0):
script需要把module改造成stript模型,有些模型代码编写的方式导致script不能支持转换,需要修改,导致转换难度更大,成功率较低。但是script模式能解决trace无法支持与输入数据内容相关的动态特性的问题。
jit trace模式 example 1
# Super Resolution model definition in PyTorch
import torch.nn as nn
import torch.nn.init as init
import torch.onnx
import torch.utils.model_zoo as model_zoo
class SuperResolutionNet(nn.Module):
def __init__(self, upscale_factor, inplace=False):
super(SuperResolutionNet, self).__init__()
self.relu = nn.ReLU(inplace=inplace)
self.conv1 = nn.Conv2d(1, 64, (5, 5), (1, 1), (2, 2))
self.conv2 = nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1))
self.conv3 = nn.Conv2d(64, 32, (3, 3), (1, 1), (1, 1))
self.conv4 = nn.Conv2d(32, upscale_factor ** 2, (3, 3), (1, 1), (1, 1))
self.pixel_shuffle = nn.PixelShuffle(upscale_factor)
self._initialize_weights()
def forward(self, x):
x = self.relu(self.conv1(x))
x = self.relu(self.conv2(x))
x = self.relu(self.conv3(x))
x = self.pixel_shuffle(self.conv4(x))
return x
def _initialize_weights(self):
init.orthogonal_(self.conv1.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv2.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv3.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv4.weight)
# Create the super-resolution model by using the above model definition.
torch_model = SuperResolutionNet(upscale_factor=3)
print("torch_model:", torch_model)
# # Load pretrained model weights
# model_url = 'https://s3.amazonaws.com/pytorch/test_data/export/superres_epoch100-44c6958e.pth'
# map_location = None
# torch_model.load_state_dict(model_zoo.load_url(model_url, map_location=map_location))
# set the model to inference mode
torch_model.eval()
batch_size = 1 # just a random number
# Input to the model
x = torch.randn(batch_size, 1, 224, 224, requires_grad=True)
torch_out = torch_model(x)
# Export the model
torch.onnx.export(torch_model, # model being run
x, # model input (or a tuple for multiple inputs)
"super_resolution.onnx", # where to save the model (can be a file or file-like object)
export_params=True, # store the trained parameter weights inside the model file
opset_version=10, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['input'], # the model's input names
output_names=['output'], # the model's output names
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
'output': {0: 'batch_size'}})
输入输出的名称不需要像TF那样指定实际模型节点的名称,而是保存出模型onnx的输入输出名称,因此是可以改成其他名称的。
export第二个参数也可以传入字典,例如:{"x":x, "beta":2},
多输入的一个特殊example
# Super Resolution model definition in PyTorch
import torch.nn as nn
import torch.nn.init as init
import torch.onnx
import torch.utils.model_zoo as model_zoo
class SuperResolutionNet(nn.Module):
def __init__(self, upscale_factor, inplace=False):
super(SuperResolutionNet, self).__init__()
self.relu = nn.ReLU(inplace=inplace)
self.conv1 = nn.Conv2d(1, 64, (5, 5), (1, 1), (2, 2))
self.conv2 = nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1))
self.conv3 = nn.Conv2d(64, 32, (3, 3), (1, 1), (1, 1))
self.conv4 = nn.Conv2d(32, upscale_factor ** 2, (3, 3), (1, 1), (1, 1))
self.pixel_shuffle = nn.PixelShuffle(upscale_factor)
self._initialize_weights()
def forward(self, x, datas):
x1 = datas[0]
x2 = datas[1]
x = self.relu(self.conv1(x))
x1 = self.relu(self.conv1(x1))
x2 = self.relu(self.conv1(x2))
x = x + x1 + x2
x = self.relu(self.conv2(x))
x = self.relu(self.conv3(x))
x = self.pixel_shuffle(self.conv4(x))
return x
def _initialize_weights(self):
init.orthogonal_(self.conv1.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv2.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv3.weight, init.calculate_gain('relu'))
init.orthogonal_(self.conv4.weight)
# Create the super-resolution model by using the above model definition.
torch_model = SuperResolutionNet(upscale_factor=3)
print("torch_model:", torch_model)
# # Load pretrained model weights
# model_url = 'https://s3.amazonaws.com/pytorch/test_data/export/superres_epoch100-44c6958e.pth'
# map_location = None
# torch_model.load_state_dict(model_zoo.load_url(model_url, map_location=map_location))
# set the model to inference mode
torch_model.eval()
batch_size = 1 # just a random number
# Input to the model
x = torch.randn(batch_size, 1, 224, 224, requires_grad=True)
x1 = torch.randn(batch_size, 1, 224, 224, requires_grad=True)
x2 = torch.randn(batch_size, 1, 224, 224, requires_grad=True)
# Export the model
torch.onnx.export(torch_model, # model being run
(x, [x1, x2]), # model input (or a tuple for multiple inputs)
"super_resolution.onnx", # where to save the model (can be a file or file-like object)
export_params=True, # store the trained parameter weights inside the model file
opset_version=10, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['input', 'input1', 'input2'], # the model's input names
output_names=['output'], # the model's output names
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
'output': {0: 'batch_size'}})
以trace方式导出clip模型的example
https://github.com/OFA-Sys/Chinese-CLIP
import torch
from PIL import Image
import torch.nn as nn
import cn_clip.clip as clip
from cn_clip.clip import load_from_name, available_models
print("Available models:", available_models())
# Available models: ['ViT-B-16', 'ViT-L-14', 'ViT-L-14-336', 'ViT-H-14', 'RN50']
device = "cuda" if torch.cuda.is_available() else "cpu"
model, preprocess = load_from_name("RN50", device=device, download_root='./')
model.eval()
image = preprocess(Image.open("pokemon.jpeg")).unsqueeze(0).to(device)
text = clip.tokenize(["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"]).to(device)
with torch.no_grad():
image_features = model.encode_image(image)
text_features = model.encode_text(text)
# 对特征进行归一化,请使用归一化后的图文特征用于下游任务
image_features /= image_features.norm(dim=-1, keepdim=True)
text_features /= text_features.norm(dim=-1, keepdim=True)
logits_per_image, logits_per_text = model.get_similarity(image, text)
probs = logits_per_image.softmax(dim=-1).cpu().numpy()
print("Label probs:", probs) # [[1.268734e-03 5.436878e-02 6.795761e-04 9.436829e-01]]
class ImgModelWrapper(nn.Module):
def __init__(self):
super(ImgModelWrapper, self).__init__()
self.model, self.preprocess = load_from_name("RN50", device=device, download_root='./')
self.model.eval()
def forward(self, image):
image_features = self.model.encode_image(image)
image_features /= image_features.norm(dim=-1, keepdim=True)
return image_features
class TxtModelWrapper(nn.Module):
def __init__(self):
super(TxtModelWrapper, self).__init__()
self.model, self.preprocess = load_from_name("RN50", device=device, download_root='./')
self.model.eval()
def forward(self, image):
text_features = self.model.encode_text(text)
text_features /= text_features.norm(dim=-1, keepdim=True)
return text_features
image = preprocess(Image.open("pokemon.jpeg")).unsqueeze(0).to(device)
img_model = ImgModelWrapper()
img_model.eval()
txt_model = TxtModelWrapper()
txt_model.eval()
torch.onnx.export(img_model, # model being run
image, # model input (or a tuple for multiple inputs)
"vit_cn_img.onnx", # where to save the model (can be a file or file-like object)
export_params=True, # store the trained parameter weights inside the model file
opset_version=15, # the ONNX version to export the model to
do_constant_folding=False, # whether to execute constant folding for optimization
input_names=['input'], # the model's input names
output_names=['output'], # the model's output names
dynamic_axes={'input': {2: 'hight', 3: 'width'}})
torch.onnx.export(txt_model, # model being run
text, # model input (or a tuple for multiple inputs)
"vit_cn_txt.onnx", # where to save the model (can be a file or file-like object)
export_params=True, # store the trained parameter weights inside the model file
opset_version=15, # the ONNX version to export the model to
do_constant_folding=False, # whether to execute constant folding for optimization
input_names=['input'], # the model's input names
output_names=['output'], # the model's output names
dynamic_axes={'input': {1: 'seq_len'}})
scaled_dot_product_attention不支持错误
exporting the operator aten::scaled_dot_product_attention to onnx opset version 14 is not supported
docs/source/en/optimization/torch2.0.mdx:33: PyTorch 2.0 includes an optimized and memory-efficient attention implementation through the [`torch.nn.functional.scaled_dot_product_attention
尝试把pytorch版本降低为1.13.1
或者把使用最新的Preview (Nightly)版本修复了这个问题