深度推荐模型——xDeepFM [KDD 18][Microsoft]

重磅推荐专栏: 《Transformers自然语言处理系列教程》
手把手带你深入实践Transformers,轻松构建属于自己的NLP智能应用!

视频讲解:https://www.yuque.com/chudi/tzqav9/ny150b#aalY8
在这里插入图片描述
在这里插入图片描述

import tensorflow as tf
from tensorflow import keras
from utils import *

EPOCH = 10
BATCH_SIZE = 32
VEC_DIM = 10
DNN_LAYERS = [64, 128, 64]
CIN_LAYER_NUMS = [16, 16, 16, 16]
DROPOUT_RATE = 0.5

base, test = loadData()
# 所有的特征各个类别值个数之和
FEAT_CATE_NUM = base.shape[1] - 1
K = tf.keras.backend


def run():
    # 返回id化特征 和 one-hot特征
    val_x_id, val_x_hot, val_y = getAllData(test)
    train_x_id, train_x_hot, train_y = getAllData(base)
    cate_num = val_x_id[0].shape[0]
    hot_num = val_x_hot[0].shape[0]

    inputs_id = keras.Input((cate_num,))
    emb_0 = keras.layers.Embedding(FEAT_CATE_NUM, VEC_DIM, input_length=cate_num)(
        inputs_id)  # (batch , cate_num, VEC_DIM)
    emb_1 = keras.layers.Flatten()(emb_0)  # (batch , cate_num * VEC_DIM)
    deep = keras.layers.Dropout(DROPOUT_RATE)(emb_1)
    # lr部分
    lr = keras.Input((hot_num,))
    # CIN 部分
    cin_outputs = []
    x0 = tf.split(emb_0, VEC_DIM, 2)  # (VEC_DIM , batch , cate_num , 1)
    x0 = tf.transpose(x0, perm=[1, 0, 2, 3])  # (batch , VEC_DIM , cate_num , 1)
    xl = x0
    for hk in CIN_LAYER_NUMS:
        hk_pre = xl.shape[2]
        z_tensor = tf.matmul(x0, xl, transpose_b=True)  # (batch , VEC_DIM , cate_num , hk_pre)
        z_tensor = tf.reshape(z_tensor, shape=[-1, VEC_DIM, cate_num * hk_pre])  # (batch , VEC_DIM , cate_num * hk_pre)
        x_tensor = tf.keras.layers.Conv1D(filters=hk, kernel_size=1, strides=1, padding='valid',
                                          activation='relu')(z_tensor)  # (batch , VEC_DIM , hk)
        x_tensor = tf.transpose(x_tensor, perm=[0, 2, 1])  # (batch , hkl , VEC_DIM)
        cin_outputs.append(tf.reduce_sum(x_tensor, axis=2))  # (batch , hkl)
        xl = tf.split(x_tensor, VEC_DIM, 2)  # (VEC_DIM , batch , hkl , 1)
        xl = tf.transpose(xl, perm=[1, 0, 2, 3])  # (batch , VEC_DIM , hkl , 1)

    # deep部分
    for units in DNN_LAYERS:
        deep = keras.layers.Dense(units, activation='relu')(deep)
        deep = keras.layers.Dropout(DROPOUT_RATE)(deep)

    x_deep_fm = keras.layers.concatenate([lr, deep] + cin_outputs)

    x_deep_fm = keras.layers.Dropout(DROPOUT_RATE)(x_deep_fm)
    outputs = keras.layers.Dense(1, activation='sigmoid', kernel_regularizer=keras.regularizers.l2(0.001))(x_deep_fm)

    model = keras.Model(inputs=[inputs_id, lr], outputs=outputs)
    model.compile(loss='binary_crossentropy', optimizer=tf.train.AdamOptimizer(0.001), metrics=[keras.metrics.AUC()])
    tbCallBack = keras.callbacks.TensorBoard(log_dir='./logs',
                                             histogram_freq=0,
                                             write_graph=True,
                                             write_grads=True,
                                             write_images=True,
                                             embeddings_freq=0,
                                             embeddings_layer_names=None,
                                             embeddings_metadata=None)

    model.fit([train_x_id, train_x_hot], train_y, batch_size=BATCH_SIZE, epochs=EPOCH, verbose=2,
              validation_data=([val_x_id, val_x_hot], val_y),
              callbacks=[tbCallBack])


run()
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

小爷毛毛(卓寿杰)

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值