从计算图复制variable到另一个计算图

定义两个计算图,内部操作全部一样,一个keras一个tensorflow,将keras的参数复制给tensorflow。

from keras.layers import Conv2D,MaxPool2D,Permute,TimeDistributed,Flatten,Input,Dense,Lambda
import keras.backend as K
from keras.engine.topology import Layer
import tensorflow as tf
import numpy as np

#定义tensorflow模型
def build_net(the_input):

    x = tf.layers.conv2d(the_input,filters=64,kernel_size=(3,3),strides=[1,1],padding="SAME",activation="relu")
    x = tf.layers.max_pooling2d(x,pool_size=(2,2),strides=(2,2))
    # print(x)
    x = tf.layers.conv2d(x, filters=128, kernel_size=(3, 3), strides=[1, 1], padding="SAME",activation="relu")
    x = tf.layers.max_pooling2d(x,pool_size=(2,2),strides=(2,2))
    x = tf.layers.conv2d(x, filters=256, kernel_size=(3, 3), strides=[1, 1], padding="SAME",activation="relu")
    x = tf.layers.max_pooling2d(x,pool_size=(2,2),strides=(2,2))
    x = tf.transpose(x,perm=[0,2,1,3])
    # print(x)
    shape = x.shape.as_list()
    x = tf.reshape(x,shape=[-1,shape[1],shape[2]*shape[3]])
    # print(x)

    x = tf.layers.dense(x,92,activation=None,use_bias=False)
    # print(x)
    y_pred = tf.layers.dense(x,5990,activation="softmax",name="out",use_bias=True)
    # print(y_pred)

    return y_pred

#定义keras模型
def cnn(input):
    x = Conv2D(filters=64,kernel_size=(3,3),strides=(1,1),activation="relu",padding="SAME")(input)
    x = MaxPool2D(pool_size=(2,2))(x)
    x = Conv2D(filters=128,kernel_size=(3,3),strides=(1,1),activation="relu",padding="SAME")(x)
    x = MaxPool2D(pool_size=(2, 2))(x)
    x = Conv2D(filters=256,kernel_size=(3,3),strides=(1,1),activation="relu",padding="SAME")(x)
    x = MaxPool2D(pool_size=(2, 2))(x)

    x = Permute([2,1,3])(x)
    x = TimeDistributed(Flatten())(x)
    return x


def ctc_net(input):
    x = cnn(input)
    # print(x)

    # x = Lambda(dense,output_shape=(35,92),arguments={"w":wl,"b":bl})(x)
    # x = Lambda(dense, output_shape=(35, 5990), arguments={"w": wo, "b": bo})(x)

    x = Dense(92,activation=None,use_bias=None)(x)
    x = Dense(5990, activation="softmax", use_bias=True)(x)
    # x = tf.layers.dense(x,92,activation=None,use_bias=False)
    # y = tf.layers.dense(x,5990,activation="softmax",use_bias=True)

    return x

开始复制

if __name__ == "__main__":
    from keras.models import Model
    graph1 = tf.Graph()
    values1=[]

    with graph1.as_default():
        with tf.Session(graph=graph1) as sess:
            sess.run(tf.global_variables_initializer())

            input = Input(shape=(32,280,1),dtype="float32")
            y = ctc_net(input) #keras模型

            model = Model(inputs=input,outputs=y)
            model.summary()
            model.save("new_dense.h5")

            for var in tf.global_variables():
                print(var)
                values1.append(sess.run(var))

    # print(values1)
    print("=============================================")

    graph2 = tf.Graph()
    with graph2.as_default():
        the_input = tf.placeholder(dtype=tf.float32, shape=(None, 32, 280, 1), name="the_input") # 4*35*1
        y_pred = build_net(the_input)

        assign_ops = []
        feed_dict = {}

        for value1, var2 in zip(values1, tf.global_variables(), ):
            # if isinstance(var,tf.Variable):
            # var2 = var1
            if hasattr(var2, '_assign_placeholder'):
                assign_placeholder = x._assign_placeholder
                assign_op = x._assign_op
            else:
                assign_placeholder = tf.placeholder(tf.float32, shape=var2.shape)
                assign_op = var2.assign(assign_placeholder)
                var2._assign_placeholder = assign_placeholder
                var2._assign_op = assign_op
            assign_ops.append(assign_op)
            feed_dict[assign_placeholder] = value1

            print(var2)

        with tf.Session(graph=graph2) as sess:
            sess.run(tf.global_variables_initializer())
            # for value1, var2 in zip(values1, tf.global_variables(), ):
            # sess.run(var2.assign(var2,feed_dict={var2.name:value1}))
            # print(var2)
            saver = tf.train.Saver()
            values2 = sess.run(assign_ops, feed_dict=feed_dict)
            saver.save(sess,"model/assigned") #保存当前session
            print("=================================================")

    with graph2.as_default():
        with tf.Session(graph=graph2) as sess:
            sess.run(tf.global_variables_initializer())
            saver.restore(sess,"model/assigned") #恢复session
            # the_input = tf.placeholder(dtype=tf.float32, shape=(None, 32, 280, 1), name="the_input") # 4*35*1
            # y_pred = build_net(the_input)

            for value1,value2,var2 in zip(values1,values2,tf.global_variables()):
                # if isinstance(var,tf.Variable):
                # var2 = var1
                value22 = sess.run(var2)
                print(np.sum(value1),np.sum(value2),np.sum(value22),np.sum(value1 - value2),np.sum(value1-value22),np.sum(value2-value22))
                # print(value1==value2,value2==value22)

复制的思路是仿照keras的setWeights的方式:

  1. as-default切换默认计算图,用tf.gloable_variables()遍历所有variable
  2. 给被赋值的模型的variable全部添加assign操作和assign_placeholder。
  3. 将所有的assign操作和placeholder的传入值放到对应的list中
  4. 开session,run所有的节点
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值