mxnet转keras的记录

import keras
from keras import layers
import numpy as np
import mxnet as mx
import cv2


def build_net():
    the_input = layers.Input(shape=(48,48,3))
    x = layers.Conv2D(64,(3,3),strides=(1,1),padding="valid",use_bias=True,name='conv1')(the_input)

    x = layers.PReLU(name='prelu1',shared_axes=[1,2])(x)
    x = layers.AveragePooling2D(pool_size=(3,3),strides=(2,2),padding="same",name="pool1")(x)
    #
    x = layers.DepthwiseConv2D((3,3),padding="valid",use_bias=True,name="conv2_dw")(x)
    # x = layers.LeakyReLU(0.25, name='prelu2_dw')(x)
    x = layers.PReLU(name='prelu2_dw', shared_axes=[1, 2])(x)
    x = layers.Conv2D(128,(1,1),strides=(1,1),use_bias=True,name='conv2_sep')(x)
    # x = layers.LeakyReLU(0.25, name='prelu2_sep')(x)
    x = layers.PReLU(name='prelu2_sep', shared_axes=[1, 2])(x)
    x = layers.AveragePooling2D(pool_size=(3,3),strides=(2,2),name="pool2")(x)

    x = layers.DepthwiseConv2D((3,3),padding="valid",use_bias=True,name="conv3_dw")(x)
    # x = layers.LeakyReLU(0.25, name='prelu3_dw')(x)
    x = layers.PReLU(name='prelu3_dw', shared_axes=[1, 2])(x)
    x = layers.Conv2D(256,(1,1),strides=(1,1),use_bias=True,name='conv3_sep')(x)
    # x = layers.LeakyReLU(0.25, name='prelu3_sep')(x)
    x = layers.PReLU(name='prelu3_sep', shared_axes=[1, 2])(x)
    x = layers.AveragePooling2D(pool_size=(2,2),strides=(2,2),name="pool3")(x)

    x = layers.DepthwiseConv2D((2,2),padding="valid",use_bias=True,name="conv4_dw")(x)
    # x = layers.LeakyReLU(0.25, name='prelu4_dw')(x)
    x = layers.PReLU(name='prelu4_dw', shared_axes=[1, 2])(x)
    x = layers.Conv2D(512,(1,1),strides=(1,1),use_bias=True,name='conv4_sep')(x)
    # x = layers.LeakyReLU(0.25, name='prelu4_sep')(x)
    x = layers.PReLU(name='prelu4_sep', shared_axes=[1, 2])(x)

    x = layers.DepthwiseConv2D((3,3),padding="valid",use_bias=True,name="conv5_dw")(x)
    # x = layers.LeakyReLU(0.25, name='prelu5_dw')(x)
    x = layers.PReLU(name='prelu5_dw', shared_axes=[1, 2])(x)
    x = layers.Conv2D(512,(1,1),strides=(1,1),use_bias=True,name='conv5_sep')(x)
    # x = layers.LeakyReLU(0.25, name='prelu5_sep')(x)
    x = layers.PReLU(name='prelu5_sep', shared_axes=[1, 2])(x)

    x = layers.Dense(212,use_bias=True,name="conv6_3")(x)
    x = layers.BatchNormalization(name="bn6_3",momentum=0.9,)(x)

    return keras.Model(the_input,x)


def set_dwconv_weights(model,layer_name,arg_params,aux_params):
    layer = model.get_layer(layer_name)

    weights_keras, bias_keras = layer.get_weights()

    weights_mx = arg_params[layer_name+"_weight"].asnumpy()
    bias_mx = arg_params[layer_name+"_bias"].asnumpy()

    # print(weights_keras.shape)
    # print(weights_mx.shape)

    input_channal = weights_mx.shape[0]
    output_channal = weights_mx.shape[1]

    for i in range(input_channal):
        for j in range(output_channal):
            weights_keras[:,:,i,j] = weights_mx[i,j,:,:]
    bias_keras = bias_mx

    layer.set_weights([weights_keras,bias_keras])

    #check
    # layer = model.get_layer(layer_name)
    # weights_keras, bias_keras = layer.get_weights()
    # for i in range(input_channal):
    #     for j in range(output_channal):
    #         print(np.sum(weights_keras[:,:,i,j] - weights_mx[i,j,:,:]))
    # print(bias_keras - bias_mx)

def set_conv_weights(model,layer_name,arg_params,aux_params):
    layer = model.get_layer(layer_name)

    weights_keras, bias_keras = layer.get_weights()

    weights_mx = arg_params[layer_name+"_weight"].asnumpy()
    bias_mx = arg_params[layer_name+"_bias"].asnumpy()

    print(weights_keras.shape)
    print(weights_mx.shape)

    input_channal = weights_mx.shape[0]
    output_channal = weights_mx.shape[1]
    H1,H2 = weights_mx.shape[2:]
    for i in range(input_channal):
        for j in range(output_channal):
            for l in range(H1):
                for k in range(H2):
                    weights_keras[l,k,j,i] = weights_mx[i,j,l,k]
    bias_keras = bias_mx

    layer.set_weights([weights_keras,bias_keras])

    #check
    # layer = model.get_layer(layer_name)
    # weights_keras, bias_keras = layer.get_weights()
    # for i in range(input_channal):
    #     for j in range(output_channal):
    #         print(np.sum(weights_keras[:,:,j,i] - weights_mx[i,j,:,:]))
    # print(bias_keras - bias_mx)

def set_leaky_weights(model,layer_name,arg_params,aux_params):
    weight = arg_params[layer_name+"_gamma"].asnumpy()
    layer = model.get_layer(layer_name)
    weight = np.expand_dims(weight,0)
    weight = np.expand_dims(weight,0)
    weight = np.expand_dims(weight, 0)
    layer.set_weights(weight)

def set_bn_weights(model,layer_name,arg_params,aux_params):
    layer = model.get_layer(layer_name)
    # print(layer.weights)
    # print(len(layer.weights))
    # weights_keras = layer.get_weights()
    # for weight in weights_keras:
    #     print(weight.shape)
    moving_var = aux_params[layer_name+"_moving_var"].asnumpy()
    moving_mean = aux_params[layer_name+"_moving_mean"].asnumpy()
    gamma = arg_params[layer_name+"_gamma"].asnumpy()
    beta = arg_params[layer_name+"_beta"].asnumpy()
    # print("==========================")
    # print(moving_var.shape)
    # print(moving_mean.shape)
    # print(gamma.shape)
    # print(beta.shape)
    layer.set_weights([gamma,beta,moving_mean,moving_var])

    #check
    # gamma_keras, beta_keras, moving_mean_keras, moving_var_keras = layer.get_weights()
    #
    # print(np.sum(gamma-gamma_keras))
    # print(np.sum(beta-beta_keras))
    # print(np.sum(moving_mean-moving_mean_keras))
    # print(np.sum(moving_var-moving_var_keras))

def set_dense_weights(model,layer_name,arg_params,aux_params):
    layer = model.get_layer(layer_name)
    weight_keras, bias_keras = layer.get_weights()

    weight_mx = arg_params[layer_name+"_weight"].asnumpy()
    bias_mx = arg_params[layer_name+"_bias"].asnumpy()

    # print(weight_keras.shape,weight_mx.shape)
    # print(bias_keras.shape,bias_mx.shape)

    for i in range(weight_mx.shape[0]):
        weight_keras[:,i] = weight_mx[i,:]
    bias_keras = bias_mx
    layer.set_weights([weight_keras,bias_keras])

    # #check
    # weight_keras,bias_keras = layer.get_weights()
    # for i in range(weight_mx.shape[0]):
    #     print(np.sum(weight_keras[:,i] - weight_mx[i,:]))
    # print(np.sum(bias_mx-bias_keras))

if __name__ == "__main__":
    model = build_net()
    model.summary()
    sym, arg_params, aux_params = mx.model.load_checkpoint('models/lnet106-normal', 0)
    # print(arg_params)
    # print([key for key, _ in arg_params.items()])
    # print([key for key,_ in aux_params.items()])

    # layer_name = "conv4_dw"
    # set_dwconv_weights(model,layer_name,arg_params)
    #
    # layer_name = "conv3_sep"
    # set_conv_weights(model,layer_name,arg_params,aux_params)

    # layer_name = "bn6_3"
    # set_bn_weights(model,layer_name,arg_params,aux_params)

    # layer_name = "conv6_3"
    # set_dense_weights(model,layer_name,arg_params,aux_params)

    dw_layer_names = ["conv2_dw","conv3_dw","conv4_dw","conv5_dw"]
    conv_layer_names = ["conv1","conv2_sep","conv3_sep","conv4_sep","conv5_sep"]
    leaky_layer_names = ["prelu1","prelu2_dw","prelu2_sep","prelu3_dw","prelu3_sep","prelu4_dw","prelu4_sep","prelu5_dw","prelu5_sep"]
    dense_layer_names = ["conv6_3"]
    bn_layer_names = ["bn6_3"]

    for layer_name in dw_layer_names:
        print(layer_name)
        set_dwconv_weights(model,layer_name,arg_params,aux_params)
    print("set dw conv layers done!")

    for layer_name in conv_layer_names:
        print(layer_name)
        set_conv_weights(model,layer_name,arg_params,aux_params)
    print("set conv layers done!")

    for layer_name in leaky_layer_names:
        print(layer_name)
        set_leaky_weights(model,layer_name,arg_params,aux_params)
    print("set leaky layers done!")

    for layer_name in dense_layer_names:
        print(layer_name)
        set_dense_weights(model,layer_name,arg_params,aux_params)
    print("set dense layers done!")

    for layer_name in bn_layer_names:
        print(layer_name)
        set_bn_weights(model,layer_name,arg_params,aux_params)
    print("set bn layers done!")

    model.save_weights("frommx.h5")
    model.load_weights("frommx.h5")

    ###check out_put
    # print("+++++++++mx_out++++++++++++++++++++++++++")
    # internals = sym.get_internals()
    # print(internals)
    #
    # sym1 = internals[51]
    # # sym = internals['conv6_3']
    #
    # print(sym1)
    # mod = mx.mod.Module(symbol=mx.symbol.Group([sym, sym1]), context=mx.cpu(), label_names=None)
    # mod.bind(for_training=False, data_shapes=[('data', (1, 3, 48, 48))],
    #          label_shapes=mod._label_shapes)
    # mod.set_params(arg_params, aux_params, allow_missing=True)

    img = cv2.imread("images/test_data/LFPW_image_test_0008_0.jpg")
    # img = cv2.imread("images/face.jpeg")
    print(img.shape)
    img_draw1 = np.copy(img)
    img_draw2 = np.copy(img)

    img = cv2.resize(img, (48, 48))
    img2 = np.copy(img)
    img2 =cv2.cvtColor(img2,cv2.COLOR_BGR2RGB)
    img2 = img2 / 255.0
    # img = img / 255.0

    # B = img[:, :, 0]
    # G = img[:, :, 0]
    # R = img[:, :, 0]
    #
    # img1 = np.array([R, G, B])
    # img1 = np.expand_dims(img1, 0)

    # prob1 = mod.predict(mx.nd.array(img1))
    #
    # prob1 = prob1.asnumpy()
    # print(prob1.shape)
    #
    # points = np.reshape(prob1[0], (-1, 2))
    # print(points)
    #
    # for point in points:
    #     cv2.circle(img_draw1, (int(img_draw1.shape[1] * point[0]), int(img_draw1.shape[0] * point[1])), 2, (0, 255, 0), 2)
    #
    # cv2.imwrite("images/out_mx.jpg", img_draw1)

    ##keras pro
    prob = np.load("pro.npy")
    print(prob.shape)

    print("+++++++++keras_out++++++++++++++++++++++++++")
    img2 = np.expand_dims(img2,0)
    prob2 = model.predict(img2)
    # print(prob2)
    print(prob2.shape)

    # for i in range(prob.shape[1]): #64
    #     for j in range(prob.shape[2]): #46
    #         for k in range(prob.shape[3]): #46
    #             print(np.sum(prob[0,i,j,k] - prob2[0,j,k,i]))
    print(np.mean(prob-prob2[0,0,:,:]))
    points = np.reshape(prob2[0,0], (-1, 2))
    print(points)

    for point in points:
        cv2.circle(img_draw2, (int(img_draw2.shape[1] * point[0]), int(img_draw2.shape[0] * point[1])), 2, (0, 255, 0), 2)
        # cv2.circle(img2, (int(img2.shape[1] * point[0]), int(img2.shape[0] * point[1])), 1, (0, 255, 0),1)

    cv2.imwrite("images/out_keras.jpg", img_draw2)

    # print(np.sum(prob1-prob2))

'''
  [[ 0.1353147  -0.0579986  -0.01030864 ... -0.28304923 -0.02570831
     0.5062662 ]
   [-0.14282435 -0.0184907   0.02211118 ...  0.12231904 -0.36815152
     0.1277793 ]
   [-0.22176422 -0.01984607 -0.01824915 ... -0.03450887  0.00403773
    -0.19866396]
   ...
   [-0.03818798 -0.0659131  -0.08594778 ... -0.11230922 -0.11797775
    -0.14423251]
   [-0.03948684 -0.02449722  0.00924962 ... -0.14035185 -0.06742968
    -0.0276219 ]
   [-0.01135029 -0.00452198 -0.02984257 ... -0.04574583 -0.10807924
    -0.14593129]]]]
'''
```json
{
  "nodes": [
    {
      "op": "null",
      "name": "data",
      "inputs": []
    },
    {
      "op": "null",
      "name": "conv1_weight",
      "attrs": {
        "kernel": "(3, 3)",
        "num_filter": "64"
      },
      "inputs": []
    },
    {
      "op": "null",
      "name": "conv1_bias",
      "attrs": {
        "kernel": "(3, 3)",
        "num_filter": "64"
      },
      "inputs": []
    },
    {
      "op": "Convolution",
      "name": "conv1",
      "attrs": {
        "kernel": "(3, 3)",
        "num_filter": "64"
      },
      "inputs": [[0, 0, 0], [1, 0, 0], [2, 0, 0]]
    },
    {
      "op": "null",
      "name": "prelu1_gamma",
      "attrs": {
        "__init__": "[\"Constant\", {\"value\": 0.25}]",
        "act_type": "prelu"
      },
      "inputs": []
    },
    {
      "op": "LeakyReLU",
      "name": "prelu1",
      "attrs": {"act_type": "prelu"},
      "inputs": [[3, 0, 0], [4, 0, 0]]
    },
    {
      "op": "Pooling",
      "name": "pool1",
      "attrs": {
        "kernel": "(3, 3)",
        "pool_type": "avg",
        "pooling_convention": "full",
        "stride": "(2, 2)"
      },
      "inputs": [[5, 0, 0]]
    },
    {
      "op": "null",
      "name": "conv2_dw_weight",
      "attrs": {
        "kernel": "(3, 3)",
        "num_filter": "64",
        "num_group": "64"
      },
      "inputs": []
    },
    {
      "op": "null",
      "name": "conv2_dw_bias",
      "attrs": {
        "kernel": "(3, 3)",
        "num_filter": "64",
        "num_group": "64"
      },
      "inputs": []
    },
    {
      "op": "Convolution",
      "name": "conv2_dw",
      "attrs": {
        "kernel": "(3, 3)",
        "num_filter": "64",
        "num_group": "64"
      },
      "inputs": [[6, 0, 0], [7, 0, 0], [8, 0, 0]]
    },
    {
      "op": "null",
      "name": "prelu2_dw_gamma",
      "attrs": {
        "__init__": "[\"Constant\", {\"value\": 0.25}]",
        "act_type": "prelu"
      },
      "inputs": []
    },
    {
      "op": "LeakyReLU",
      "name": "prelu2_dw",
      "attrs": {"act_type": "prelu"},
      "inputs": [[9, 0, 0], [10, 0, 0]]
    },
    {
      "op": "null",
      "name": "conv2_sep_weight",
      "attrs": {
        "kernel": "(1, 1)",
        "num_filter": "128"
      },
      "inputs": []
    },
    {
      "op": "null",
      "name": "conv2_sep_bias",
      "attrs": {
        "kernel": "(1, 1)",
        "num_filter": "128"
      },
      "inputs": []
    },
    {
      "op": "Convolution",
      "name": "conv2_sep",
      "attrs": {
        "kernel": "(1, 1)",
        "num_filter": "128"
      },
      "inputs": [[11, 0, 0], [12, 0, 0], [13, 0, 0]]
    },
    {
      "op": "null",
      "name": "prelu2_sep_gamma",
      "attrs": {
        "__init__": "[\"Constant\", {\"value\": 0.25}]",
        "act_type": "prelu"
      },
      "inputs": []
    },
    {
      "op": "LeakyReLU",
      "name": "prelu2_sep",
      "attrs": {"act_type": "prelu"},
      "inputs": [[14, 0, 0], [15, 0, 0]]
    },
    {
      "op": "Pooling",
      "name": "pool2",
      "attrs": {
        "kernel": "(3, 3)",
        "pool_type": "avg",
        "pooling_convention": "full",
        "stride": "(2, 2)"
      },
      "inputs": [[16, 0, 0]]
    },
    {
      "op": "null",
      "name": "conv3_dw_weight",
      "attrs": {
        "kernel": "(3, 3)",
        "num_filter": "128",
        "num_group": "128"
      },
      "inputs": []
    },
    {
      "op": "null",
      "name": "conv3_dw_bias",
      "attrs": {
        "kernel": "(3, 3)",
        "num_filter": "128",
        "num_group": "128"
      },
      "inputs": []
    },
    {
      "op": "Convolution",
      "name": "conv3_dw",
      "attrs": {
        "kernel": "(3, 3)",
        "num_filter": "128",
        "num_group": "128"
      },
      "inputs": [[17, 0, 0], [18, 0, 0], [19, 0, 0]]
    },
    {
      "op": "null",
      "name": "prelu3_dw_gamma",
      "attrs": {
        "__init__": "[\"Constant\", {\"value\": 0.25}]",
        "act_type": "prelu"
      },
      "inputs": []
    },
    {
      "op": "LeakyReLU",
      "name": "prelu3_dw",
      "attrs": {"act_type": "prelu"},
      "inputs": [[20, 0, 0], [21, 0, 0]]
    },
    {
      "op": "null",
      "name": "conv3_sep_weight",
      "attrs": {
        "kernel": "(1, 1)",
        "num_filter": "256"
      },
      "inputs": []
    },
    {
      "op": "null",
      "name": "conv3_sep_bias",
      "attrs": {
        "kernel": "(1, 1)",
        "num_filter": "256"
      },
      "inputs": []
    },
    {
      "op": "Convolution",
      "name": "conv3_sep",
      "attrs": {
        "kernel": "(1, 1)",
        "num_filter": "256"
      },
      "inputs": [[22, 0, 0], [23, 0, 0], [24, 0, 0]]
    },
    {
      "op": "null",
      "name": "prelu3_sep_gamma",
      "attrs": {
        "__init__": "[\"Constant\", {\"value\": 0.25}]",
        "act_type": "prelu"
      },
      "inputs": []
    },
    {
      "op": "LeakyReLU",
      "name": "prelu3_sep",
      "attrs": {"act_type": "prelu"},
      "inputs": [[25, 0, 0], [26, 0, 0]]
    },
    {
      "op": "Pooling",
      "name": "pool3",
      "attrs": {
        "kernel": "(2, 2)",
        "pool_type": "avg",
        "pooling_convention": "full",
        "stride": "(2, 2)"
      },
      "inputs": [[27, 0, 0]]
    },
    {
      "op": "null",
      "name": "conv4_dw_weight",
      "attrs": {
        "kernel": "(2, 2)",
        "num_filter": "256",
        "num_group": "256"
      },
      "inputs": []
    },
    {
      "op": "null",
      "name": "conv4_dw_bias",
      "attrs": {
        "kernel": "(2, 2)",
        "num_filter": "256",
        "num_group": "256"
      },
      "inputs": []
    },
    {
      "op": "Convolution",
      "name": "conv4_dw",
      "attrs": {
        "kernel": "(2, 2)",
        "num_filter": "256",
        "num_group": "256"
      },
      "inputs": [[28, 0, 0], [29, 0, 0], [30, 0, 0]]
    },
    {
      "op": "null",
      "name": "prelu4_dw_gamma",
      "attrs": {
        "__init__": "[\"Constant\", {\"value\": 0.25}]",
        "act_type": "prelu"
      },
      "inputs": []
    },
    {
      "op": "LeakyReLU",
      "name": "prelu4_dw",
      "attrs": {"act_type": "prelu"},
      "inputs": [[31, 0, 0], [32, 0, 0]]
    },
    {
      "op": "null",
      "name": "conv4_sep_weight",
      "attrs": {
        "kernel": "(1, 1)",
        "num_filter": "512"
      },
      "inputs": []
    },
    {
      "op": "null",
      "name": "conv4_sep_bias",
      "attrs": {
        "kernel": "(1, 1)",
        "num_filter": "512"
      },
      "inputs": []
    },
    {
      "op": "Convolution",
      "name": "conv4_sep",
      "attrs": {
        "kernel": "(1, 1)",
        "num_filter": "512"
      },
      "inputs": [[33, 0, 0], [34, 0, 0], [35, 0, 0]]
    },
    {
      "op": "null",
      "name": "prelu4_sep_gamma",
      "attrs": {
        "__init__": "[\"Constant\", {\"value\": 0.25}]",
        "act_type": "prelu"
      },
      "inputs": []
    },
    {
      "op": "LeakyReLU",
      "name": "prelu4_sep",
      "attrs": {"act_type": "prelu"},
      "inputs": [[36, 0, 0], [37, 0, 0]]
    },
    {
      "op": "null",
      "name": "conv5_dw_weight",
      "attrs": {
        "kernel": "(3, 3)",
        "num_filter": "512",
        "num_group": "512"
      },
      "inputs": []
    },
    {
      "op": "null",
      "name": "conv5_dw_bias",
      "attrs": {
        "kernel": "(3, 3)",
        "num_filter": "512",
        "num_group": "512"
      },
      "inputs": []
    },
    {
      "op": "Convolution",
      "name": "conv5_dw",
      "attrs": {
        "kernel": "(3, 3)",
        "num_filter": "512",
        "num_group": "512"
      },
      "inputs": [[38, 0, 0], [39, 0, 0], [40, 0, 0]]
    },
    {
      "op": "null",
      "name": "prelu5_dw_gamma",
      "attrs": {
        "__init__": "[\"Constant\", {\"value\": 0.25}]",
        "act_type": "prelu"
      },
      "inputs": []
    },
    {
      "op": "LeakyReLU",
      "name": "prelu5_dw",
      "attrs": {"act_type": "prelu"},
      "inputs": [[41, 0, 0], [42, 0, 0]]
    },
    {
      "op": "null",
      "name": "conv5_sep_weight",
      "attrs": {
        "kernel": "(1, 1)",
        "num_filter": "512"
      },
      "inputs": []
    },
    {
      "op": "null",
      "name": "conv5_sep_bias",
      "attrs": {
        "kernel": "(1, 1)",
        "num_filter": "512"
      },
      "inputs": []
    },
    {
      "op": "Convolution",
      "name": "conv5_sep",
      "attrs": {
        "kernel": "(1, 1)",
        "num_filter": "512"
      },
      "inputs": [[43, 0, 0], [44, 0, 0], [45, 0, 0]]
    },
    {
      "op": "null",
      "name": "prelu5_sep_gamma",
      "attrs": {
        "__init__": "[\"Constant\", {\"value\": 0.25}]",
        "act_type": "prelu"
      },
      "inputs": []
    },
    {
      "op": "LeakyReLU",
      "name": "prelu5_sep",
      "attrs": {"act_type": "prelu"},
      "inputs": [[46, 0, 0], [47, 0, 0]]
    },
    {
      "op": "null",
      "name": "conv6_3_weight",
      "attrs": {"num_hidden": "212"},
      "inputs": []
    },
    {
      "op": "null",
      "name": "conv6_3_bias",
      "attrs": {"num_hidden": "212"},
      "inputs": []
    },
    {
      "op": "FullyConnected",
      "name": "conv6_3",
      "attrs": {"num_hidden": "212"},
      "inputs": [[48, 0, 0], [49, 0, 0], [50, 0, 0]]
    },
    {
      "op": "null",
      "name": "bn6_3_gamma",
      "attrs": {
        "fix_gamma": "False",
        "momentum": "0.9"
      },
      "inputs": []
    },
    {
      "op": "null",
      "name": "bn6_3_beta",
      "attrs": {
        "fix_gamma": "False",
        "momentum": "0.9"
      },
      "inputs": []
    },
    {
      "op": "null",
      "name": "bn6_3_moving_mean",
      "attrs": {
        "__init__": "[\"zero\", {}]",
        "fix_gamma": "False",
        "momentum": "0.9"
      },
      "inputs": []
    },
    {
      "op": "null",
      "name": "bn6_3_moving_var",
      "attrs": {
        "__init__": "[\"one\", {}]",
        "fix_gamma": "False",
        "momentum": "0.9"
      },
      "inputs": []
    },
    {
      "op": "BatchNorm",
      "name": "bn6_3",
      "attrs": {
        "fix_gamma": "False",
        "momentum": "0.9"
      },
      "inputs": [[51, 0, 0], [52, 0, 0], [53, 0, 0], [54, 0, 1], [55, 0, 1]]
    }
  ],
  "arg_nodes": [
    0,
    1,
    2,
    4,
    7,
    8,
    10,
    12,
    13,
    15,
    18,
    19,
    21,
    23,
    24,
    26,
    29,
    30,
    32,
    34,
    35,
    37,
    39,
    40,
    42,
    44,
    45,
    47,
    49,
    50,
    52,
    53,
    54,
    55
  ],
  "node_row_ptr": [
    0,
    1,
    2,
    3,
    4,
    5,
    6,
    7,
    8,
    9,
    10,
    11,
    12,
    13,
    14,
    15,
    16,
    17,
    18,
    19,
    20,
    21,
    22,
    23,
    24,
    25,
    26,
    27,
    28,
    29,
    30,
    31,
    32,
    33,
    34,
    35,
    36,
    37,
    38,
    39,
    40,
    41,
    42,
    43,
    44,
    45,
    46,
    47,
    48,
    49,
    50,
    51,
    52,
    53,
    54,
    55,
    56
  ],
  "heads": [[56, 0]],
  "attrs": {"mxnet_version": ["int", 10200]}
}

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值