tf.keras 使用类似Dict的方式进行建模、训练并保存

# coding:utf-8

import sys, os

sys.path.append("..")
sys.path.append("../../")

import tensorflow as tf
import numpy as np



def build_input_dict_model() -> tf.keras.Model:

    inputs = {"dense1": tf.keras.Input((5,), dtype=tf.float32, name="input_dense_feature"),
              "sparse_name": tf.keras.Input((1,), dtype=tf.int32, name="sparse_name"),
              "sparse_add": tf.keras.Input((1, ), dtype=tf.int32, name="sparse_add")}

    embedding_dict = {"sparse_name": tf.keras.layers.Embedding(100, 8, name="sparse_name_embedding"),
                      "sparse_add": tf.keras.layers.Embedding(50, 16, name="sparse_add_embedding")}

    sparse_name_list = ["sparse_name", "sparse_add"]

    embedding_list = []
    for feature_name in sparse_name_list:  # 最好固定feature_name的顺序
        if feature_name in embedding_dict:
            tmp = embedding_dict[feature_name](inputs[feature_name])
            embedding_list.append(tmp)



    # shape=[batch, 1, embed_dim]
    # embedding_list = []
    # sparse_name_embed = tf.keras.layers.Embedding(100, 8, name="sparse_name_embedding")(inputs["sparse_name"])
    # embedding_list.append(sparse_name_embed)
    # sparse_add_embed = tf.keras.layers.Embedding(50, 16, name="sparse_add_embedding")(inputs["sparse_add"])
    # embedding_list.append(sparse_add_embed)

    # embedding = tf.concat([sparse_name_embed, sparse_add_embed], axis=-1)
    embedding = tf.concat(embedding_list, axis=-1)
    embedding = tf.squeeze(embedding, axis=1)
    # embedding_concat = tf.keras.layers.Flatten()(embedding)  # shape=[batch, num_sparse_feature * embed_dim]
    input_features = tf.concat([inputs["dense1"], embedding], axis=-1)

    hidden = tf.keras.layers.Dense(32, activation=tf.nn.relu)(input_features)
    probs = tf.keras.layers.Dense(1, activation=tf.nn.sigmoid, use_bias=False)(hidden)

    model = tf.keras.Model(inputs, probs)

    return model



def train():

    input_dense_feature = np.random.randn(8, 5)
    input_sparse_name = np.random.randint(0, 50, size=(8, 1))
    input_sparse_add = np.random.randint(2, 15, size=(8, 1))

    labels = np.random.randint(0, 1, size=[8, 1])

    model = build_input_dict_model()

    optim = tf.keras.optimizers.Adam()

    def train_step():
        with tf.GradientTape() as tape:
            probs = model(
                {"dense1": input_dense_feature, "sparse_name": input_sparse_name, "sparse_add": input_sparse_add})
            losses = tf.keras.losses.binary_crossentropy(labels, probs, from_logits=False)
            loss = tf.reduce_mean(losses)

        grads = tape.gradient(loss, model.trainable_weights)

        optim.apply_gradients(zip(grads, model.trainable_weights))
        return loss


    for i in range(100):
        loss = train_step()
        print(f"step: {i}, loss: {loss}")

    model.save("saved_model")


def predict():

    np.random.seed(1234)
    model = tf.keras.models.load_model("saved_model")

    input_dense_feature = np.random.randn(8, 5)
    input_sparse_name = np.random.randint(0, 50, size=(8, 1))
    input_sparse_add = np.random.randint(2, 15, size=(8, 1))

    labels = np.random.randint(0, 1, size=[8, 1])

    probs = model(
        {"dense1": input_dense_feature, "sparse_name": input_sparse_name, "sparse_add": input_sparse_add})
    inputs = {"dense1": input_dense_feature, "sparse_name": input_sparse_name, "sparse_add": input_sparse_add}

    print(model.predict(inputs))



# train()
predict()

使用tensorflow=2.8

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值