pytorch创建MLP模型及pytorch模型转onnx方法总结

最近pytorch转onnx模型时自己从头训练了mlp模型,然后使用不同的方式保存成pth,最后使用torch.onnx成功转出onnx模型,注意本例不考虑dynamic shape的情形,后面有机会再单独写。

import os
import torch
from torch import nn
from torchvision.datasets import CIFAR10
from torch.utils.data import DataLoader
from torchvision import transforms
import torch.onnx


class MLP(nn.Module):
    '''
      Multilayer Perceptron.
    '''

    def __init__(self):
        super().__init__()
        self.layers = nn.Sequential(
            nn.Flatten(),
            nn.Linear(32 * 32 * 3, 64),
            nn.ReLU(),
            nn.Linear(64, 10),
        )

    def forward(self, x):
        '''Forward pass'''
        return self.layers(x)


def test_model_load():
    modle_origin = torch.load('mlp_origin.pth')
    modle = torch.load('mlp_whole.pth')
    print(modle)
    weights = torch.load('mlp_weights.pth')
    print(weights)
    return None

def test_onnx_export():
    # modle = torch.load('mlp_whole.pth') #直接load整个模型,前提是保存的pth要是包含网络
    modle = MLP() #创建网络
    modle.load_state_dict(torch.load('mlp_weights.pth')) #load网络对应节点的权重
    modle.eval() #转换成inference模式
    # Input to the model
    x = torch.randn(1, 3, 32, 32, requires_grad=True)
    torch_out = modle(x)

    # Export the model
    torch.onnx.export(modle,  # model being run
                      x,  # model input (or a tuple for multiple inputs)
                      "mlp_test1.onnx",  # where to save the model (can be a file or file-like object)
                      export_params=True,  # store the trained parameter weights inside the model file
                      opset_version=12,  # the ONNX version to export the model to
                      do_constant_folding=True,  # whether to execute constant folding for optimization
                      input_names=['input'],  # the model's input names
                      output_names=['output'],  # the model's output names
                      # dynamic_axes={'input': {0: 'batch_size'},  # variable length axes
                      #               'output': {0: 'batch_size'}}
                    )


if __name__ == '__main__':

    # # test_model_load()
    # test_onnx_export()
    # exit

    # Set fixed random number seed
    torch.manual_seed(42)

    # Prepare CIFAR-10 dataset
    dataset = CIFAR10(os.getcwd(), download=False, transform=transforms.ToTensor())
    trainloader = torch.utils.data.DataLoader(dataset, batch_size=16, shuffle=True, num_workers=1)

    # Initialize the MLP
    mlp = MLP()
    torch.save(mlp, 'mlp_origin.pth')
    # Define the loss function and optimizer
    loss_function = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(mlp.parameters(), lr=1e-4)

    # Run the training loop
    for epoch in range(0, 2):  # 5 epochs at maximum

        # Print epoch
        print(f'Starting epoch {epoch + 1}')

        # Set current loss value
        current_loss = 0.0

        # Iterate over the DataLoader for training data
        for i, data in enumerate(trainloader, 0):

            # Get inputs
            inputs, targets = data

            # Zero the gradients
            optimizer.zero_grad()

            # Perform forward pass
            outputs = mlp(inputs)

            # Compute loss
            loss = loss_function(outputs, targets)

            # Perform backward pass
            loss.backward()

            # Perform optimization
            optimizer.step()

            # Print statistics
            current_loss += loss.item()
            if i % 500 == 499:
                print('Loss after mini-batch %5d: %.3f' %
                      (i + 1, current_loss / 500))
                current_loss = 0.0

    # Process is complete.
    torch.save(mlp.state_dict(), 'mlp_weights.pth')
    torch.save(mlp, 'mlp_whole.pth')
    print('Training process has finished.')

mlp部分参考自:https://github.com/christianversloot/machine-learning-articles/blob/main/creating-a-multilayer-perceptron-with-pytorch-and-lightning.md

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值