贝叶斯个性化排序 bpr

一些要注意的点

1.虽然训练集和测试集都只保留4、5分的数据,但是最后做推荐时不需要去掉小于4分的数据,只需要按照从高到低的顺序推荐即可。

2.损失函数中的\hat{r_{ui}}\hat{r_{uj}}都指的是预测分数而不是训练集里的实际评分,因为这是单类问题,只有喜欢与不喜欢两种状态。

import math
import numpy as np
import pandas as pd
from random import *
import sys
import random
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics import precision_score, recall_score, f1_score

import sys


import sys

# log_print = open('Bayesian.py运行结果.log', 'w')
# sys.stdout = log_print
# sys.stderr = log_print






def getSigma(a):
    return 1/(1+math.exp(-a));

d = 20
au = av = bu = bv = 0.01;
gama = 0.01;
T = 500

k = 50

col = ['user','movie','rating','timestamp']
Origin_data = pd.read_csv('./ml-100k/u1.base',sep='\t',names=col,encoding='latin-1')
test_data = pd.read_csv('./ml-100k/u1.test',sep='\t',names=col,encoding='latin-1')
Origin_data.drop(['timestamp'],axis=1,inplace=True)
Origin_data  = Origin_data[(Origin_data['rating']==4)|(Origin_data['rating']==5)]

test_data.drop(['timestamp'],axis=1,inplace=True)
test_data  = test_data[(test_data['rating']==4)|(test_data['rating']==5)]

ori_user = Origin_data['user'].drop_duplicates()
ori_user = ori_user.tolist()

ori_movie = Origin_data['movie'].drop_duplicates()
ori_movie = ori_movie.tolist();

test_user = test_data['user'].drop_duplicates()
test_user = test_user.tolist()

like_user = {}
dislike_user = {}

for i in range(len(Origin_data)):
    u = Origin_data.iloc[i]['user']
    m = Origin_data.iloc[i]['movie']
    r = Origin_data.iloc[i]['rating']
    if u not in like_user:
        like_user[u] = []
    if u not in dislike_user:
        dislike_user[u] = []
    if(r >= 4):
        like_user[u].append(m)

for i in ori_user:
    dislike_user[i] = list(set(ori_movie)-set(like_user[i]))

test_like_user = {}

for i in range(len(test_data)):
    u = test_data.iloc[i]['user']
    m = test_data.iloc[i]['movie']
    r = test_data.iloc[i]['rating']
    if u not in test_like_user:
        test_like_user[u] = []
    if r >= 4:
        test_like_user[u].append(m)


n = len(ori_user)
m = len(ori_movie)

aver = len(Origin_data)/n/m

U = {}
V = {}
bias = {}

for i in ori_user:
    tem = np.random.random(d);
    tem = (tem-0.5)*0.01
    U[i] = tem;

for i in ori_movie:
    tem = np.random.random(d);
    tem = (tem - 0.5) * 0.01
    V[i] = tem;

for i in ori_movie:
    user = Origin_data.loc[Origin_data['movie']==i]['user'].drop_duplicates()
    sum = len(user)/n - aver
    bias[i] = sum

lenth = len(Origin_data)

for t in range(T):
    for t2 in range(lenth):
        ran = randint(0,lenth - 1)
        u = Origin_data.iloc[ran]['user']
        i = Origin_data.iloc[ran]['movie']

        l = len(dislike_user[u])
        ran1 = randint(0,l-1)
        j = dislike_user[u][ran1]

        rui = np.sum(U[u]*V[i])+bias[i]
        ruj = np.sum(U[u]*V[j])+bias[j]
        ruij = rui-ruj;
        #print(ruij)
        sig = getSigma((-ruij))

        # print(U[u]*V[m].T)
        # print(rui)
        # print(np.sum(U[u]*V[m]))

        delUu = -sig*(V[i]-V[j])+au*U[u]

        delVi = -sig*U[u]+av*V[i];
        delVj = -sig*(-U[u])+av*V[j]
        delbi = -sig + bv*bias[i];
        delbj = -sig*(-1)+bv*bias[j];

        U[u] = U[u] - gama * delUu
        V[i] = V[i] - gama * delVi
        V[j] = V[j] - gama * delVj
        bias[i] = bias[i] - gama * delbi
        bias[j] = bias[j] - gama * delbj

recomand = {}
p = 0
r = 0
for u in test_user:
    rec = []
    ratingHash = {}
    testSet = set(test_like_user[u])
    lenth = len(testSet)
    hav = set(like_user[u])
    num = 1
    for m in ori_movie:
        if m not in hav:
            ratingHash[m] = np.sum(U[u]*V[m])+bias[m]
    ratingHash = sorted(ratingHash.items(),key=lambda x:x[1],reverse=True)
    for i,val in ratingHash:
        if (num == 6):
            break;
        if i in testSet:
            p += 1/5
            r += 1/lenth
        num+=1


p/=len(test_user)
r/=len(test_user)

print(p)
print(r)

# log_print.close();

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值