深度推荐模型——DIEN [AAAI 19][Alibaba]

重磅推荐专栏: 《Transformers自然语言处理系列教程》
手把手带你深入实践Transformers,轻松构建属于自己的NLP智能应用!

在这里插入图片描述
在这里插入图片描述

import tensorflow as tf
from tensorflow import keras
from utils import *
import numpy as np

EPOCH = 10
BATCH_SIZE = 32
VEC_DIM = 10
DROPOUT_RATE = 0.5
HEAD_NUM = 4
HIDE_SIZE = 128
LAYER_NUM = 3
DNN_LAYERS = [200, 80]
data, max_user_id, max_item_id = load_data()
# 行为特征个数
BEHAVIOR_FEAT_NUM = 32
K = tf.keras.backend


def Dice(tensor):
    batch_nor_tensor = keras.layers.BatchNormalization()(tensor)
    batch_nor_sig_tensor = keras.activations.sigmoid(batch_nor_tensor)
    alphas = K.variable(np.random.random(tensor.shape[-1]))
    return batch_nor_sig_tensor * tensor + (1.0 - batch_nor_sig_tensor) * alphas * tensor


def run():
    # 将所有的特征的各个类别值统一id化。x中每行为各特征的类别值的id
    train_user_id_data, train_now_item_id_data, train_item_ids_data, train_rating_ids_data, train_y_data, \
    test_user_id_data, test_now_item_id_data, test_item_ids_data, test_rating_ids_data, test_y_data = get_all_data(data)

    user_id = keras.Input((1,))
    now_item_id = keras.Input((1,))
    items_ids = keras.Input((BEHAVIOR_FEAT_NUM,))
    ratings_ids = keras.Input((BEHAVIOR_FEAT_NUM,))

    usr_emb = keras.layers.Embedding(max_user_id + 1, VEC_DIM, input_length=1)(user_id)  # [-1,1,vec_dim]
    usr_emb = keras.layers.Flatten()(usr_emb)  # [-1,vec_dim]
    now_item_emb = keras.layers.Embedding(max_item_id + 1, VEC_DIM, input_length=1)(now_item_id)  # [-1,1,vec_dim]
    now_item_emb_tile = tf.tile(now_item_emb, [1, BEHAVIOR_FEAT_NUM, 1])  # [-1,BEA_FEAT_NUM,vec_dim]
    now_item_emb = keras.layers.Flatten()(now_item_emb)  # [-1,vec_dim]

    # Behavior Layer
    items_emb = keras.layers.Embedding(max_item_id + 1, VEC_DIM, input_length=BEHAVIOR_FEAT_NUM)(
        items_ids)  # [-1,BEA_FEAT_NUM,vec_dim]
    ratings_emb = keras.layers.Embedding(6, VEC_DIM, input_length=BEHAVIOR_FEAT_NUM)(
        ratings_ids)  # [-1,BEA_FEAT_NUM,vec_dim]
    behavior_layer_output = items_emb * ratings_emb  # [-1,BEA_FEAT_NUM,vec_dim]

    # Interest Extractor Layer
    interest_extractor_layer_output = keras.layers.GRU(VEC_DIM, return_sequences=True,dropout=DROPOUT_RATE)(
        behavior_layer_output)  # [-1,BEA_FEAT_NUM,vec_dim]

    # Interest Evolution Layer
    interest_evolution_layer_output = now_item_emb_tile * interest_extractor_layer_output  # [-1,BEA_FEAT_NUM,vec_dim]
    interest_evolution_layer_output = keras.layers.GRU(VEC_DIM,dropout=DROPOUT_RATE)(interest_evolution_layer_output)  # [-1,vec_dim]

    deep = keras.layers.concatenate([usr_emb, now_item_emb, interest_evolution_layer_output])

    for units in DNN_LAYERS:
        deep = keras.layers.Dense(units)(deep)
        deep = Dice(deep)
        deep = keras.layers.Dropout(DROPOUT_RATE)(deep)

    # Auxiliary Loss
    auxiliary_dnn = []
    for i in range(BEHAVIOR_FEAT_NUM - 1):
        auxiliary_dnn.append(tf.reshape(behavior_layer_output[:, i + 1, :], shape=(-1, VEC_DIM)) * tf.reshape(
            interest_extractor_layer_output[:, i, :], shape=(-1, VEC_DIM)))  # [BEA_FEAT_NUM-1,-1,vec_dim]

    behavior_layer_output_t = tf.transpose(auxiliary_dnn, perm=[1, 0, 2])  # [-1,BEA_FEAT_NUM-1,vec_dim]
    behavior_layer_output_t = tf.reshape(behavior_layer_output_t, shape=(
    -1, (BEHAVIOR_FEAT_NUM - 1) * VEC_DIM))  # [-1,(BEA_FEAT_NUM-1) * vec_dim]
    behavior_layer_output_t = keras.layers.Dropout(DROPOUT_RATE)(behavior_layer_output_t)

    deep = keras.layers.concatenate([behavior_layer_output_t, deep])
    outputs = keras.layers.Dense(1, activation='sigmoid')(deep)

    model = keras.Model(inputs=[user_id, now_item_id, items_ids, ratings_ids], outputs=outputs)
    model.compile(loss='binary_crossentropy', optimizer=tf.train.AdamOptimizer(0.001), metrics=[keras.metrics.AUC()])
    tbCallBack = keras.callbacks.TensorBoard(log_dir='./logs',
                                             histogram_freq=0,
                                             write_graph=True,
                                             write_grads=True,
                                             write_images=True,
                                             embeddings_freq=0,
                                             embeddings_layer_names=None,
                                             embeddings_metadata=None)

    model.fit([train_user_id_data, train_now_item_id_data, train_item_ids_data, train_rating_ids_data], train_y_data,
              batch_size=BATCH_SIZE, epochs=EPOCH, verbose=2,
              validation_data=(
                  [test_user_id_data, test_now_item_id_data, test_item_ids_data, test_rating_ids_data], test_y_data),
              callbacks=[tbCallBack], workers=4)


run()
  • 1
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 2
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

小爷毛毛(卓寿杰)

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值