一、LFM(也叫Funk SVD)
1、LFM原理解析
LFM(latent factor model)隐语义模型核心思想是通过隐含特征联系用户和物品,如下图:
- P矩阵是user-LF矩阵,即用户和隐含特征矩阵。LF有三个,表示共有三个隐含特征
- Q矩阵是LF-item矩阵,即隐含特征和物品的矩阵
- R矩阵是user-item矩阵,由P*Q得来
- 能处理稀疏评分矩阵
利用矩阵分解技术,将原始user-item的评分矩阵(稠密或稀疏)分解为P和Q两个矩阵,然后利用P*Q还原出user-item评分矩阵R。整个过程相当于降维处理,其中:
- 矩阵值P11表示用户1对隐含特征1的权重值
- 矩阵值Q11表示隐含特征1在物品1上的权重值
- 矩阵值R11表示预测的用户1对物品1的评分
利用LFM预测用户对物品的评分,k表示隐含特征数量:
因此最终,我们的目标也就是要求出P矩阵和Q矩阵及其当中的每一个值,然后再对用户-物品的评分进行预测。
2、损失函数
对于评分预测我们利用平方差来构建损失函数:
加入L2正则化:
对损失函数求偏导:
3、随机梯度下降法优化
梯度下降更新参数:
同理:
随机梯度下降:向量乘法,每一个分量相乘求和
由于P矩阵和Q矩阵是两个不同的矩阵,通常分别采取不同的正则参数,如λ1和λ2
4、算法实现
import pandas as pd
import numpy as np
class LFM(object):
def __init__(self,alpha,reg_p,reg_q,number_LatentFactors=10,number_epochs=10,columns=["uid","iid","rating"]):
self.alpha = alpha # 学习率
self.reg_p = reg_p # P矩阵正则化参数
self.reg_q = reg_q # q矩阵正则化参数
self.number_LatentFactors = number_LatentFactors # 隐式类别数量
self.number_epochs = number_epochs # 最大迭代次数
self.columns = columns
def fit(self,dataset):
'''
fit dataset
:param dataset:uid,iid,rating
:return:
'''
self.dataset = pd.DataFrame(dataset)
self.users_ratings = dataset.groupby(self.columns[0]).agg([list])[[self.columns[1],self.columns[2]]]
self.items_ratings = dataset.groupby(self.columns[1]).agg([list])[[self.columns[0],self.columns[2]]]
self.global_mean = self.dataset[self.columns[2]].mean()
self.P,self.Q = self.lfm()
def _init_matrix(self):
'''
初始化P和Q矩阵,同时设置0-1之间的随机值作为原始值
:return:P Q
'''
# user-LF
P = dict(zip(self.users_ratings.index,np.random.rand(len(self.users_ratings),self.number_LatentFactors).astype(np.float32)))
Q = dict(zip(self.items_ratings.index,np.random.rand(len(self.items_ratings),self.number_LatentFactors).astype(np.float32)))
return P,Q
def lfm(self):
'''
随机梯度下降,优化结果
:return:
'''
P,Q = self._init_matrix()
for i in range(self.number_epochs):
print("iter_____%d" % i)
error_list = []
for uid,iid,real_rating in self.dataset.itertuples(index=False):
# user-LF P
# item-LF Q
v_pu = P[uid] # 用户向量
v_qi = Q[iid] # 物品向量
err = np.float32(real_rating - np.dot(v_pu,v_qi))
v_pu += self.alpha * (err * v_qi - self.reg_p * v_pu)
v_qi += self.alpha * (err * v_pu - self.reg_q * v_qi)
P[uid] = v_pu
Q[iid] = v_qi
error_list.append(err ** 2)
print(np.sqrt(np.mean(error_list)))
return P,Q
def predict(self,uid,iid):
# 如果uid或iid不在,我们使用全局平均分作为预测结果返回
if uid not in self.users_ratings.index or iid not in self.items_ratings.index:
return self.global_mean
p_u = self.P[uid]
q_i = self.Q[iid]
return np.dot(p_u,q_i)
def test(self,testset):
'''预测测试集数据'''
for uid,iid,real_rating in testset.itertuples(index=False):
try:
pred_rating = self.predict(uid,iid)
except Exception as e:
print(e)
else:
yield uid,iid,real_rating,pred_rating
if __name__ == '__main__':
dtype = [("userId",np.int32),("movieId",np.int32),("rating",np.float32)]
dataset = pd.read_csv("./ratings.csv",usecols=range(3),dtype=dict(dtype))
lfm = LFM(0.02,0.01,0.01,10,100,["userId","movieId","rating"])
lfm.fit(dataset)
while True:
uid = input("uid:")
iid = input("iid:")
print(lfm.predict(int(uid),int(iid)))
break
二、BiasSvd
BiasSvd其实就是上面提到的Funk SVD矩阵分解基础上加上了偏置项。
1、BiasSvd
利用BiasSvd预测用户对物品的评分,k表示隐含特征数量:
2、损失函数
同样对于评分预测我们利用平方差来构建损失函数:
加入L2正则化:
对损失函数求偏导:
3、随机梯度下降法优化
梯度下降更新参数:
随机梯度下降:
由于P矩阵和Q矩阵是两个不同的矩阵,通常分别采取不同的正则参数,如λ1和λ2
4、算法实现
import math
import random
import pandas as pd
import numpy as np
class BiasSvd(object):
def __init__(self,alpha,reg_p,reg_q,reg_bu,reg_bi,number_LatentFactors=10,number_epochs=10,columns={"uid","iid","rating"}):
self.alpha = alpha
self.reg_p = reg_p
self.reg_q = reg_q
self.reg_bu = reg_bu
self.reg_bi = reg_bi
self.number_LatentFactors = number_LatentFactors # 隐式类别数量
self.number_epochs = number_epochs
self.columns = columns
def fit(self,dataset):
'''
fit dataset
:param dataset:uid,iid,rating
:return:
'''
self.dataset = pd.DataFrame(dataset)
self.users_ratings = dataset.groupby(self.columns[0]).agg([list])[[self.columns[1],self.columns[2]]]
self.items_ratings = dataset.groupby(self.columns[1]).agg([list])[[self.columns[0],self.columns[2]]]
self.global_mean = self.dataset[self.columns[2]].mean()
self.P,self.Q,self.bu,self.bi = self.biasSvd()
def _init_matrix(self):
'''
初始化P和Q矩阵,同时设置0-1之间的随机值作为初始值
:return:P,Q
'''
# user-LF
P = dict(zip(self.users_ratings.index,np.random.rand(len(self.users_ratings),self.number_LatentFactors).astype(np.float32)))
Q = dict(zip(self.items_ratings.index,np.random.rand(len(self.items_ratings),self.number_LatentFactors).astype(np.float32)))
return P,Q
def biasSvd(self):
'''
使用随机梯度下降,优化结果
:return:P, Q, bu, bi
'''
P,Q = self._init_matrix()
# 初始化bu、bi的值,全部设为0
bu = dict(zip(self.users_ratings.index,np.zeros(len(self.users_ratings))))
bi = dict(zip(self.items_ratings.index,np.zeros(len(self.items_ratings))))
for i in range(self.number_epochs):
print("iter____%d" % i)
error_list = []
for uid,iid,real_rating in self.dataset.itertuples(index=False):
v_pu = P[uid]
v_qi = Q[iid]
err = np.float32(real_rating - self.global_mean - bu[uid] - bi[iid] - np.dot(v_pu,v_qi))
v_pu += self.alpha * (err * v_qi - self.reg_p * v_pu)
v_qi += self.alpha * (err * v_pu - self.reg_q * v_qi)
P[uid] = v_pu
Q[iid] = v_qi
bu[uid] += self.alpha * (err - self.reg_bu * bu[uid])
bi[iid] += self.alpha * (err - self.reg_bi * bi[iid])
error_list.append(err ** 2)
print(np.sqrt(np.mean(error_list)))
return P,Q,bu,bi
def predict(self,uid,iid):
if uid not in self.users_ratings.index or iid not in self.items_ratings.index:
return self.global_mean
p_u = self.P[uid]
q_i = self.Q[iid]
return self.global_mean + self.bu[uid] + self.bi[iid] + np.dot(p_u,q_i)
if __name__ == '__main__':
dtype = [("userId",np.int32),("movieId",np.int32),("rating",np.float32)]
dataset = pd.read_csv("./ratings.csv",usecols=range(3),dtype=dict(dtype))
bsvd = BiasSvd(0.02,0.01,0.01,0.01,0.01,10,20)
bsvd.fit(dataset)
while True:
uid = input("uid:")
iid = input("iid:")
print(bsvd.predict(int(uid),int(iid)))
-------------------------------------------------------------------
参考课程:推荐系统及算法,如侵删。