pytoch转化为Relay IR的详细过程,以一维反卷积算子为例

# 如果script_module不是一个列表或元组对象,那么将其转换为一个包含单个元素的列表。
if not isinstance(script_module, (list, tuple)):
   script_module = [script_module]
(Pdb) p script_module
[TempOpModel(
  original_name=TempOpModel
  (convtrans): ConvTranspose1d(original_name=ConvTranspose1d)
)]
    for module_ in script_module:
        # convert one script_module
        ret_name = None
        graph = module_.graph.copy()
        convert_map = _get_convert_map(prelude)
        _run_jit_passes(graph)

(Pdb) p module_
TempOpModel(
  original_name=TempOpModel
  (convtrans): ConvTranspose1d(original_name=ConvTranspose1d)
)

(Pdb) p graph
graph(%self.1 : __torch__.TempOpModel,
      %input : Float(1:48, 3:16, 16:1)):
  %2 : __torch__.torch.nn.modules.conv.ConvTranspose1d = prim::GetAttr[name="convtrans"](%self.1)
  %3 : Tensor = prim::CallMethod[name="forward"](%2, %input)
  return (%3)

def _get_convert_map(prelude):
    init_convert_map = {}
    init_convert_map.update({
        "aten::device"                          : _none(),
        "prim::device"                          : _none(),
        "aten::sub"                             : _elemwise("subtract", prelude),
        "aten::sub_"                            : _elemwise("subtract", prelude),

(Pdb) p convert_map
{'aten::device': <function _none.<locals>._impl at 0x7faa842a6510>, 
'prim::device': <function _none.<locals>._impl at 0x7faa8429f378>, 
'aten::sub': <function _elemwise.<locals>._impl at 0x7faa8429f8c8>, 
'aten::sub_': <function _elemwise.<locals>._impl at 0x7faa8424f730>, 
op_names = get_all_op_names(graph)

(Pdb) p op_names
{'prim::GetAttr', 'aten::_convolution', 'prim::Constant', 'prim::ListConstruct'}
(Pdb) p opLocation
{'prim::GetAttr': ['<boundmethodPyCapsule.pynameof%2:__torch__.torch.nn.modules.conv.ConvTranspose1d=prim::GetAttr[name="convtrans"](%self.1)', '<boundmethodPyCapsule.pynameof%4:Tensor=prim::GetAttr[name="weight"](%2)'], 

'prim::Constant': ['<boundmethodPyCapsule.pynameof%5:None=prim::Constant(),scope:__module.convtrans', '/home//.virtualenvs/pytest_main/lib/python3.6/site-packages/torch/nn/modules/conv.py:744:0', '/home//.virtualenvs/pytest_main/lib/python3.6/site-packages/torch/nn/modules/conv.py:744:0', '/home//.virtualenvs/pytest_main/lib/python3.6/site-packages/torch/nn/modules/conv.py:744:0', '/home//.virtualenvs/pytest_main/lib/python3.6/site-packages/torch/nn/modules/conv.py:744:0', '/home//.virtualenvs/pytest_main/lib/python3.6/site-packages/torch/nn/modules/conv.py:744:0', '/home//.virtualenvs/pytest_main/lib/python3.6/site-packages/torch/nn/modules/conv.py:744:0', '/home//.virtualenvs/pytest_main/lib/python3.6/site-packages/torch/nn/modules/conv.py:744:0', '/home//.virtualenvs/pytest_main/lib/python3.6/site-packages/torch/nn/modules/conv.py:744:0', '/home//.virtualenvs/pytest_main/lib/python3.6/site-packages/torch/nn/modules/conv.py:744:0'], 

'prim::ListConstruct': ['<boundmethodPyCapsule.pynameof%7:int[]=prim::ListConstruct(%6),scope:__module.convtrans', '<boundmethodPyCapsule.pynameof%9:int[]=prim::ListConstruct(%8),scope:__module.convtrans', '<boundmethodPyCapsule.pynameof%11:int[]=prim::ListConstruct(%10),scope:__module.convtrans', '<boundmethodPyCapsule.pynameof%14:int[]=prim::ListConstruct(%13),scope:__module.convtrans'], 'aten::_convolution': ['/home//.virtualenvs/pytest_main/lib/python3.6/site-packages/torch/nn/modules/conv.py:744:0']}

(Pdb) p pythonOpInfos
{}
(Pdb) p params
OrderedDict([('convtrans.weight', 
tensor([[[ 0.0520,  0.1074,  0.0509,  0.0178,  0.1456],
         [ 0.0237, -0.0109, -0.0673,  0.1000,  0.1637],
         [-0.0981,  0.1803,  0.0272, -0.1613, -0.0457],
         [ 0.0216,  0.1088, -0.0380, -0.0591,  0.0621],
         [ 0.1351, -0.1398,  0.1233, -0.1507,  0.0476],
         [-0.0418,  0.0477, -0.0323,  0.0225, -0.0247]],

        [[ 0.0821,  0.0348,  0.0869, -0.1178, -0.0358],
         [-0.1481, -0.1062,  0.0944,  0.1560, -0.0784],
         [-0.0303, -0.0039,  0.0741, -0.1273, -0.0394],
         [-0.0064, -0.0620, -0.0491,  0.1280, -0.0826],
         [-0.0433,  0.0356, -0.0259,  0.1456, -0.0096],
         [ 0.0193,  0.0408, -0.0739, -0.1786, -0.0497]],

        [[-0.0880, -0.1268,  0.1423, -0.1029,  0.0612],
         [-0.0743, -0.1109,  0.1244,  0.1312,  0.0921],
         [-0.0825,  0.1303, -0.0542,  0.1304, -0.1414],
         [ 0.0201,  0.0802, -0.1437,  0.0261,  0.0705],
         [-0.1359, -0.1803, -0.1464, -0.0557,  0.1685],
         [-0.0962,  0.1749, -0.0538,  0.0455,  0.0916]]]))])

(Pdb) p all_graph_input
{'input': Var(data0, ty=TensorType([1, 3, 16], float32))}
(Pdb) p outputs
{'input': Var(data0, ty=TensorType([1, 3, 16], float32))}
(Pdb)

(Pdb) p param_vars
{'4': Var(convtrans.weight, ty=TensorType([3, 6, 5], float32))}
(Pdb) p tensors
{'convtrans.weight': <tvm.nd.NDArray shape=(3, 6, 5), cpu(0)>
array([[[ 0.05204504,  0.10737879,  0.05085992,  0.01778644,
          0.14557894],
        [ 0.02373412, -0.01094745, -0.06727038,  0.10004427,
          0.16368182],
        [-0.09806217,  0.18027371,  0.0271742 , -0.16134538,
         -0.04567321],
        [ 0.02157847,  0.10881242, -0.03795313, -0.05906598,
          0.06207118],
        [ 0.13505211, -0.13979703,  0.1232866 , -0.15066983,
          0.04760224],
        [-0.04178976,  0.04774293, -0.03234811,  0.02252495,
         -0.02472406]],

       [[ 0.08207271,  0.03483064,  0.08690766, -0.11780085,
         -0.03578456],
        [-0.1481149 , -0.10615434,  0.09444034,  0.15595832,
         -0.07839983],
        [-0.03033054, -0.00392053,  0.07410107, -0.12734742,
         -0.03937349],
        [-0.00643608, -0.06196438, -0.04909575,  0.12800507,
         -0.08259162],
        [-0.04331389,  0.03562476, -0.02590224,  0.14555703,
         -0.00957916],
        [ 0.01929455,  0.04076916, -0.0738614 , -0.17856503,
         -0.04972638]],

       [[-0.08797599, -0.126846  ,  0.1423104 , -0.10286964,
          0.06122406],
        [-0.07428135, -0.11089861,  0.12439905,  0.13115016,
          0.09209503],
        [-0.08250588,  0.13028632, -0.05417855,  0.13035387,
         -0.14136499],
        [ 0.02007781,  0.08017329, -0.14367329,  0.02614663,
          0.07046822],
        [-0.13585632, -0.18025172, -0.14644973, -0.05570375,
          0.16851662],
        [-0.09621125,  0.17488135, -0.05382039,  0.04552593,
          0.09159414]]], dtype=float32)}
(Pdb) p packed_param_map
{}
(Pdb)

(Pdb) p tvm_params
{'convtrans.weight': <tvm.nd.NDArray shape=(3, 6, 5), cpu(0)>
array([[[ 0.05204504,  0.10737879,  0.05085992,  0.01778644,
          0.14557894],
        [ 0.02373412, -0.01094745, -0.06727038,  0.10004427,
          0.16368182],
        [-0.09806217,  0.18027371,  0.0271742 , -0.16134538,
         -0.04567321],
        [ 0.02157847,  0.10881242, -0.03795313, -0.05906598,
          0.06207118],
        [ 0.13505211, -0.13979703,  0.1232866 , -0.15066983,
          0.04760224],
        [-0.04178976,  0.04774293, -0.03234811,  0.02252495,
         -0.02472406]],

       [[ 0.08207271,  0.03483064,  0.08690766, -0.11780085,
         -0.03578456],
        [-0.1481149 , -0.10615434,  0.09444034,  0.15595832,
         -0.07839983],
        [-0.03033054, -0.00392053,  0.07410107, -0.12734742,
         -0.03937349],
        [-0.00643608, -0.06196438, -0.04909575,  0.12800507,
         -0.08259162],
        [-0.04331389,  0.03562476, -0.02590224,  0.14555703,
         -0.00957916],
        [ 0.01929455,  0.04076916, -0.0738614 , -0.17856503,
         -0.04972638]],

       [[-0.08797599, -0.126846  ,  0.1423104 , -0.10286964,
          0.06122406],
        [-0.07428135, -0.11089861,  0.12439905,  0.13115016,
          0.09209503],
        [-0.08250588,  0.13028632, -0.05417855,  0.13035387,
         -0.14136499],
        [ 0.02007781,  0.08017329, -0.14367329,  0.02614663,
          0.07046822],
        [-0.13585632, -0.18025172, -0.14644973, -0.05570375,
          0.16851662],
        [-0.09621125,  0.17488135, -0.05382039,  0.04552593,
          0.09159414]]], dtype=float32)}

(Pdb) p outputs
{
'input': Var(data0, ty=TensorType([1, 3, 16], float32)), 
 '4': Var(convtrans.weight, ty=TensorType([3, 6, 5], float32))
}

(Pdb) p ret_name
['19']
(Pdb) p graph
graph(%self.1 : __torch__.TempOpModel,
      %input : Float(1:48, 3:16, 16:1)):
  %2 : __torch__.torch.nn.modules.conv.ConvTranspose1d = prim::GetAttr[name="convtrans"](%self.1)
  %4 : Tensor = prim::GetAttr[name="weight"](%2)
  %5 : None = prim::Constant(), scope: __module.convtrans
  %6 : int = prim::Constant[value=2](), scope: __module.convtrans # /home//.virtualenvs/pytest_main/lib/python3.6/site-packages/torch/nn/modules/conv.py:744:0
  %7 : int[] = prim::ListConstruct(%6), scope: __module.convtrans
  %8 : int = prim::Constant[value=1](), scope: __module.convtrans # /home//.virtualenvs/pytest_main/lib/python3.6/site-packages/torch/nn/modules/conv.py:744:0
  %9 : int[] = prim::ListConstruct(%8), scope: __module.convtrans
  %10 : int = prim::Constant[value=1](), scope: __module.convtrans # /home//.virtualenvs/pytest_main/lib/python3.6/site-packages/torch/nn/modules/conv.py:744:0
  %11 : int[] = prim::ListConstruct(%10), scope: __module.convtrans
  %12 : bool = prim::Constant[value=1](), scope: __module.convtrans # /home//.virtualenvs/pytest_main/lib/python3.6/site-packages/torch/nn/modules/conv.py:744:0
  %13 : int = prim::Constant[value=0](), scope: __module.convtrans # /home//.virtualenvs/pytest_main/lib/python3.6/site-packages/torch/nn/modules/conv.py:744:0
  %14 : int[] = prim::ListConstruct(%13), scope: __module.convtrans
  %15 : int = prim::Constant[value=1](), scope: __module.convtrans # /home//.virtualenvs/pytest_main/lib/python3.6/site-packages/torch/nn/modules/conv.py:744:0
  %16 : bool = prim::Constant[value=0](), scope: __module.convtrans # /home//.virtualenvs/pytest_main/lib/python3.6/site-packages/torch/nn/modules/conv.py:744:0
  %17 : bool = prim::Constant[value=0](), scope: __module.convtrans # /home//.virtualenvs/pytest_main/lib/python3.6/site-packages/torch/nn/modules/conv.py:744:0
  %18 : bool = prim::Constant[value=1](), scope: __module.convtrans # /home//.virtualenvs/pytest_main/lib/python3.6/site-packages/torch/nn/modules/conv.py:744:0
  %19 : Float(1:198, 6:33, 33:1) = aten::_convolution(%input, %4, %5, %7, %9, %11, %12, %14, %15, %16, %17, %18), scope: __module.convtrans # /home//.virtualenvs/pytest_main/lib/python3.6/site-packages/torch/nn/modules/conv.py:744:0
  return (%19)

(Pdb) p ret
[[CallNode(Op(squeeze), 
[CallNode(Op(nn.conv2d), 
[CallNode(Op(nn.dilate), 
[CallNode(Op(expand_dims), [Var(data0, ty=TensorType([1, 3, 16], float32))], 
relay.attrs.ExpandDimsAttrs(0x3ad01e0), [])], relay.attrs.DilateAttrs(0x3af3a70), 
[]), 
CallNode(Op(transpose), 
[CallNode(Op(reshape), 
[CallNode(Op(reverse), 
[CallNode(Op(reshape), 
[CallNode(Op(expand_dims), [Var(convtrans.weight, ty=TensorType([3, 6, 5], float32))], relay.attrs.ExpandDimsAttrs(0x3acf0c0), [])], 
relay.attrs.ReshapeAttrs(0x3a85790), [])], relay.attrs.ReverseAttrs(0x3acf590), [])], relay.attrs.ReshapeAttrs(0x3af3430), [])], relay.attrs.TransposeAttrs(0x3af3750), [])], relay.attrs.Conv2DAttrs(0x3aa8860), [])], relay.attrs.SqueezeAttrs(0x3ad0510), [])]]

(Pdb) p func
FunctionNode([Var(data0, ty=TensorType([1, 3, 16], float32)), 
Var(convtrans.weight, ty=TensorType([3, 6, 5], float32))], (nullptr), CallNode(Op(squeeze), [CallNode(Op(nn.conv2d), [CallNode(Op(nn.dilate), [CallNode(Op(expand_dims), [Var(data0, ty=TensorType([1, 3, 16], float32))], relay.attrs.ExpandDimsAttrs(0x3ad01e0), [])], relay.attrs.DilateAttrs(0x3af3a70), []), CallNode(Op(transpose), [CallNode(Op(reshape), [CallNode(Op(reverse), [CallNode(Op(reshape), [CallNode(Op(expand_dims), [Var(convtrans.weight, ty=TensorType([3, 6, 5], float32))], relay.attrs.ExpandDimsAttrs(0x3acf0c0), [])], relay.attrs.ReshapeAttrs(0x3a85790), [])], relay.attrs.ReverseAttrs(0x3acf590), [])], relay.attrs.ReshapeAttrs(0x3af3430), [])], relay.attrs.TransposeAttrs(0x3af3750), [])], relay.attrs.Conv2DAttrs(0x3aa8860), [])], relay.attrs.SqueezeAttrs(0x3ad0510), []), [], (nullptr))

  • 3
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值