2022芒果TV算法赛_用户下一个观看视频预测_baseline_CF召回&YoutubeDNN

用户下一个观看视频预测_baseline_CF

第三届“马栏山杯”国际音视频算法大赛

1. 赛题介绍

设法提高用户观看体验,是芒果TV平台的核心技术挑战之一,我们不断为此而努力!及时发现用户的兴趣和调整内容展示对于实现这一目标非常重要,在给定用户观影历史和上下文的行为的条件下,进行序列预测是一项非常重要且困难的推荐任务,本赛题将以此为背景,希望选手在真实样本数据集下建立出最优的序列预测模型。

2. 赛题任务

根据大赛组织方提供的用户在芒果TV产生的行为,视频特征,标签特征等数据,构建模型,预测用户在芒果TV下一个时刻观看的视频。

3. baseline_CF

import os
import pandas as pd
import time
from datetime import datetime
from tqdm import tqdm

from collections import defaultdict
import math,pickle
import numpy as np

data_dir = '/Desktop/比赛/2022用户下一个观看视频预测'
seq = pd.read_csv(os.path.join(data_dir, 'dataset/main_vv_seq_train.csv'))

candidates_items = pd.read_csv(os.path.join(data_dir, 'dataset/candidate_items_A.csv'))
candidates_set = set(candidates_items.vid)
def get_sim_item(df, user_col, item_col, use_iif=False):  
    user_item_ = df.groupby(user_col)[item_col].agg(set).reset_index()  
    user_item_dict = dict(zip(user_item_[user_col], user_item_[item_col]))  
    
    item_user_ = df.groupby(item_col)[user_col].agg(set).reset_index()  
    item_user_dict = dict(zip(item_user_[item_col], item_user_[user_col]))   
    
    sim_item_corr = {}

    for item, users in tqdm(item_user_dict.items()):
        sim_item_corr.setdefault(item, {}) 
        for u in users:
            tmp_len = len(user_item_dict[u])
            for relate_item in user_item_dict[u]:
                sim_item_corr[item].setdefault(relate_item, 0)
                sim_item_corr[item][relate_item] += 1/ (math.log(len(users)+1) * math.log(tmp_len+1))
            
    return sim_item_corr  

sim_item_corr = get_sim_item(train_sessions, 'did','vid',use_iif=True)
def recommend(sim_item_corr, popular_items, top_k, session_item_list, item_num=100):  
    rank = {}  
    for i in session_item_list:  
        if i not in sim_item_corr.keys():
            continue
        for j, wij in sorted(sim_item_corr[i].items(), key=lambda d: d[1], reverse=True)[0:item_num]:  
            if j not in candidates_set:
                continue
            if j not in session_item_list:  
                rank.setdefault(j, 0)  
                rank[j] += wij
    if len(rank) > 0:
        rank = sorted(rank.items(), key=lambda d: d[1], reverse=True)[:top_k]
        rank = np.array(rank)
        # item_list = list(rank[:,0].astype('int32'))
        item_list = list(rank[:,0])
        score_list = rank[:,1]
    else:
        item_list = []
        score_list = []
            
    return item_list, score_list
top_k = 6
train_session_dict = seq.groupby('did')['vid'].agg(list).to_dict()
session_id_list = []
item_id_list = []
rank_list = []
for session_id,session_item_list in tqdm(train_session_dict.items()):
    item_list, score_list = recommend(sim_item_corr,popular_items,top_k,session_item_list)
    
    session_id_list += [session_id for _ in range(len(item_list))]
    item_id_list += list(item_list)
    rank_list += [x for x in range(1,len(item_list)+1)]
res_df = pd.DataFrame()
res_df['did'] = session_id_list
res_df['vid'] = item_id_list
res_df['rank'] = rank_list

res_df.to_csv(data_dir + '/results/baseline_CF_0627.csv',index=False)

线上得分0.1672。这个比赛数据集相对于recsys2022那个来说,可操作空间大一些。

4. YoutubeDNN召回

4.1 数据处理

def gen_data_set(data, negsample=5):
    item_ids = data['vid'].unique()

    train_set = []
    test_set = []
    for reviewerID, hist in tqdm(data.groupby('did')):
        pos_list = hist['vid'].tolist()
        
        if negsample > 0:
            candidate_set = list(set(item_ids) - set(pos_list))   
            neg_list = np.random.choice(candidate_set,size=len(pos_list)*negsample,replace=True)  # 对于每个正样本,选择n个负样本
            
        if len(pos_list) == 1:
            train_set.append((reviewerID, [pos_list[0]], pos_list[0],1,len(pos_list)))
            test_set.append((reviewerID, [pos_list[0]], pos_list[0],1,len(pos_list)))
            
        # 滑窗构造正负样本
        for i in range(1, len(pos_list)):
            hist = pos_list[:i]
            
            if i != len(pos_list) - 1:
                train_set.append((reviewerID, hist[::-1], pos_list[i], 1, len(hist[::-1])))  # 正样本 [user_id, his_item, pos_item, label, len(his_item)]
                for negi in range(negsample):
                    train_set.append((reviewerID, hist[::-1], neg_list[i*negsample+negi], 0,len(hist[::-1]))) # 负样本 [user_id, his_item, neg_item, label, len(his_item)]
            else:
                # 将最长的那一个序列长度作为测试数据
                test_set.append((reviewerID, hist[::-1], pos_list[i],1,len(hist[::-1])))
                
    random.shuffle(train_set)
    random.shuffle(test_set)
    
    return train_set, test_set

# 将输入的数据进行padding,使得序列特征的长度都一致
def gen_model_input(train_set,user_profile,seq_max_len):

    train_uid = np.array([line[0] for line in train_set])
    train_seq = [line[1] for line in train_set]
    train_iid = np.array([line[2] for line in train_set])
    train_label = np.array([line[3] for line in train_set])
    train_hist_len = np.array([line[4] for line in train_set])

    train_seq_pad = pad_sequences(train_seq, maxlen=seq_max_len, padding='post', truncating='post', value=0)
    train_model_input = {"did": train_uid, "vid": train_iid, "hist_article_id": train_seq_pad,"hist_len": train_hist_len}

    return train_model_input, train_label
def youtubednn_u2i_dict(data, topk=7):    
    sparse_features = ["vid", "did"]
    SEQ_LEN = 30 # 用户点击序列的长度,短的填充,长的截断
    
    user_profile_ = data[["did"]].drop_duplicates('did')
    item_profile_ = data[["vid"]].drop_duplicates('vid')  
    
    # 类别编码
    features = ["vid", "did"]
    feature_max_idx = {}
    
    for feature in features:
        lbe = LabelEncoder()
        data[feature] = lbe.fit_transform(data[feature])
        feature_max_idx[feature] = data[feature].max() + 1
    
    # 提取did和vid的画像,具体选择哪些特征还需要进一步的分析和考虑
    user_profile = data[["did"]].drop_duplicates('did')
    item_profile = data[["vid"]].drop_duplicates('vid')  
    
    user_index_2_rawid = dict(zip(user_profile['did'], user_profile_['did']))
    item_index_2_rawid = dict(zip(item_profile['vid'], item_profile_['vid']))
    
    # 划分训练和测试集
    # 由于深度学习需要的数据量通常都是非常大的,所以为了保证召回的效果,往往会通过滑窗的形式扩充训练样本
    train_set, test_set = gen_data_set(data, 0)
    # 整理输入数据,具体的操作可以看上面的函数
    train_model_input, train_label = gen_model_input(train_set, user_profile, SEQ_LEN)
    test_model_input, test_label = gen_model_input(test_set, user_profile, SEQ_LEN)
    
    # 确定Embedding的维度
    embedding_dim = 32
    
    # 将数据整理成模型可以直接输入的形式
    user_feature_columns = [SparseFeat('did', feature_max_idx['did'], embedding_dim),
                            VarLenSparseFeat(SparseFeat('hist_article_id', feature_max_idx['vid'], embedding_dim,
                                                        embedding_name="vid"), SEQ_LEN, 'mean', 'hist_len'),]
    item_feature_columns = [SparseFeat('vid', feature_max_idx['vid'], embedding_dim)]
    
    
    from collections import Counter
    train_counter = Counter(train_model_input['vid'])
    item_count = [train_counter.get(i, 0) for i in range(item_feature_columns[0].vocabulary_size)]
    sampler_config = NegativeSampler('frequency', num_sampled=5, item_name='vid', item_count=item_count)
    
    import tensorflow as tf
    if tf.__version__ >= '2.0.0':
        tf.compat.v1.disable_eager_execution()
    else:
        K.set_learning_phase(True)
        
    # 模型的定义 
    print("staring!")
    model = YoutubeDNN(user_feature_columns, item_feature_columns, user_dnn_hidden_units=(256, 64, embedding_dim),sampler_config=sampler_config)
    # 模型编译
    model.compile(optimizer="adam", loss=sampledsoftmaxloss)  

    history = model.fit(train_model_input, train_label, batch_size=256, epochs=10, verbose=1, validation_split=0.0)
    
    test_user_model_input = test_model_input
    all_item_model_input = {"vid": item_profile['vid'].values}

    user_embedding_model = Model(inputs=model.user_input, outputs=model.user_embedding)
    item_embedding_model = Model(inputs=model.item_input, outputs=model.item_embedding)
    
    user_embs = user_embedding_model.predict(test_user_model_input, batch_size=2 ** 12)
    item_embs = item_embedding_model.predict(all_item_model_input, batch_size=2 ** 12)
    
    # embedding保存之前归一化一下
    user_embs = user_embs / np.linalg.norm(user_embs, axis=1, keepdims=True)
    item_embs = item_embs / np.linalg.norm(item_embs, axis=1, keepdims=True)
    
    # 将Embedding转换成字典的形式方便查询
    raw_user_id_emb_dict = {user_index_2_rawid[k]: \
                                v for k, v in zip(user_profile['did'], user_embs)}
    raw_item_id_emb_dict = {item_index_2_rawid[k]: \
                                v for k, v in zip(item_profile['vid'], item_embs)}
    # 将Embedding保存到本地
    pickle.dump(raw_user_id_emb_dict, open('./temp_data/user_youtube_emb.pkl', 'wb'))
    pickle.dump(raw_item_id_emb_dict, open('./temp_data/item_youtube_emb.pkl', 'wb'))
    
    # faiss紧邻搜索,通过user_embedding 搜索与其相似性最高的topk个vid
    index = faiss.IndexFlatIP(embedding_dim)
    index.add(item_embs) # 将vid向量构建索引
    sim, idx = index.search(np.ascontiguousarray(user_embs), topk) # 通过did去查询最相似的topk个vid
    
    user_recall_items_dict = collections.defaultdict(dict)
    for target_idx, sim_value_list, rele_idx_list in tqdm(zip(test_user_model_input['user_id'], sim, idx)):
        target_raw_id = user_index_2_rawid[target_idx]
        # 从1开始是为了去掉\id本身, 所以最终获得的相似商品只有topk-1
        for rele_idx, sim_value in zip(rele_idx_list[1:], sim_value_list[1:]): 
            rele_raw_id = item_index_2_rawid[rele_idx]
            user_recall_items_dict[target_raw_id][rele_raw_id] = user_recall_items_dict.get(target_raw_id, {})\
                                                                    .get(rele_raw_id, 0) + sim_value
            
    user_recall_items_dict = {k: sorted(v.items(), key=lambda x: x[1], reverse=True) for k, v in user_recall_items_dict.items()}
    return user_recall_items_dict
user_recall_items_dict = youtubednn_u2i_dict(data, topk=20)

后续只要把字典读出来,转化成dataframe即可,不过我提交单独youtubeDNN的召回结果,咋是0分呢!有大佬能讲解一下吗?

  • 0
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 2
    评论
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值