pytorch 转换onnx_有没有办法把经过预训练的模型从PyTorch转换成ONNX?

我在我的自定义数据集上训练了StarGAN模型。

我需要将这个模型从.pth(Pytorch)转换为.pb,以便在androidstudio上使用。

我搜索了很多,找到了一些转换的方法。

然而,所有的解决方案都不适用于我的案例。在

我试过一个只有一个的小网络nn.线性层。

在这个网络上,解决方案非常有效!在

我想,我的网络包括Conv2D层和MaxPooling2D层,所以转换处理不起作用。在

首先,这是我的网络(StarGAN)。在import torch

import torch.nn as nn

import numpy as np

class ResidualBlock(nn.Module):

def __init__(self, dim_in, dim_out):

super(ResidualBlock, self).__init__()

self.main = nn.Sequential(

nn.Conv2d(dim_in, dim_out, kernel_size=3, stride=1, padding=1, bias=False),

nn.InstanceNorm2d(dim_out, affine=True, track_running_stats=True),

nn.ReLU(inplace=True),

nn.Conv2d(dim_out, dim_out, kernel_size=3, stride=1, padding=1, bias=False),

nn.InstanceNorm2d(dim_out, affine=True, track_running_stats=True))

def forward(self, x):

return x + self.main(x)

class Generator(nn.Module):

def __init__(self, conv_dim=64, c_dim=5, repeat_num=6):

super(Generator, self).__init__()

layers = []

layers.append(nn.Conv2d(3 + c_dim, conv_dim, kernel_size=7, stride=1, padding=3, bias=False))

layers.append(nn.InstanceNorm2d(conv_dim, affine=True, track_running_stats=True))

layers.append(nn.ReLU(inplace=True))

curr_dim = conv_dim

for _ in range(2):

layers.append(nn.Conv2d(curr_dim, curr_dim * 2, kernel_size=4, stride=2, padding=1, bias=False))

layers.append(nn.InstanceNorm2d(curr_dim * 2, affine=True, track_running_stats=True))

layers.append(nn.ReLU(inplace=True))

curr_dim = curr_dim * 2

for _ in range(repeat_num):

layers.append(ResidualBlock(dim_in=curr_dim, dim_out=curr_dim))

for _ in range(2):

layers.append(nn.ConvTranspose2d(curr_dim, curr_dim // 2, kernel_size=4, stride=2, padding=1, bias=False))

layers.append(nn.InstanceNorm2d(curr_dim // 2, affine=True, track_running_stats=True))

layers.append(nn.ReLU(inplace=True))

curr_dim = curr_dim // 2

layers.append(nn.Conv2d(curr_dim, 3, kernel_size=7, stride=1, padding=3, bias=False))

layers.append(nn.Tanh())

self.main = nn.Sequential(*layers)

def forward(self, x, c):

c = c.view(c.size(0), c.size(1), 1, 1)

c = c.repeat(1, 1, x.size(2), x.size(3))

x = torch.cat([x, c], dim=1)

return self.main(x)

class Discriminator(nn.Module):

def __init__(self, image_size=128, conv_dim=64, c_dim=5, repeat_num=6):

super(Discriminator, self).__init__()

layers = []

layers.append(nn.Conv2d(3, conv_dim, kernel_size=4, stride=2, padding=1))

layers.append(nn.LeakyReLU(0.01))

curr_dim = conv_dim

for _ in range(1, repeat_num):

layers.append(nn.Conv2d(curr_dim, curr_dim * 2, kernel_size=4, stride=2, padding=1))

layers.append(nn.LeakyReLU(0.01))

curr_dim = curr_dim * 2

kernel_size = int(image_size / np.power(2, repeat_num))

self.main = nn.Sequential(*layers)

self.conv1 = nn.Conv2d(curr_dim, 1, kernel_size=3, stride=1, padding=1, bias=False)

self.conv2 = nn.Conv2d(curr_dim, c_dim, kernel_size=kernel_size, bias=False)

def forward(self, x):

h = self.main(x)

out_src = self.conv1(h)

out_cls = self.conv2(h)

return out_src, out_cls.view(out_cls.size(0), out_cls.size(1))

这是错误信息。在

^{pr2}$

有什么办法可以转换吗?帮助我。在

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值