java svd_svdpp分解推荐,java实现

importjava.util.Map;importjava.util.Scanner;importjava.util.Map.Entry;importjavolution.util.FastList;importjavolution.util.FastMap;/*** 融合了BiasLFM以及用户的历史评分行为

* 结合了邻域和LFM*/

public class SVDPP extendsAbsMF {private FastMap BU = null;//user的偏置项

private FastMap BI = null;//item的偏置项

private FastMap Y = null;private double sumMean = 0.0;//全体评分的平均

publicSVDPP() {

}public static voidmain(String[] args) {

String dataPath= "resultData.txt";

SVDPP svdpp= newSVDPP();

svdpp.loadData(dataPath);

svdpp.initParam(30, 0.02, 0.01, 50);

svdpp.train();

System.out.println("Input userID...");

Scanner in= newScanner(System.in);while (true) {

String userID=in.nextLine();

FastList recommendedItems = svdpp.calRecSingleUser(userID, 50);

svdpp.displayRecoItem(userID, recommendedItems);

System.out.println("Input userID...");

}

}/*** 初始化F,α,λ,max_iter,U,I,BU,BI,Y

*

*@paramF 隐因子数目

*@paramα 学习速率

*@paramλ 正则化参数,以防过拟合

*@parammax_iter 迭代次数*/@Overridepublic void initParam(int F, double α, double λ, intmax_iter) {

System.out.println("init U,I,BU,BI...");this.F =F;this.α =α;this.λ =λ;this.max_iter =max_iter;this.U = new FastMap();this.I = new FastMap();this.BU = new FastMap();this.BI = new FastMap();this.Y = new FastMap();int itemCount = 0;//所有user对有过行为的item总数

Double[] randomUValue= null;

Double[] randomIValue= null;

Double[] randomYValue= null;//对U,I,Y矩阵随机初始化

for (Entry>entry : ratingData.entrySet()) {

String userID=entry.getKey();this.BU.put(userID, 0.0);

itemCount+=entry.getValue().size();

randomUValue= newDouble[F];for (int i = 0; i < F; i++) {double rand = Math.random() / Math.sqrt(F);//随机数填充初始化矩阵,并和1/sqrt(F)成正比

randomUValue[i] =rand;

}

U.put(userID, randomUValue);for (EntryentryItem : entry.getValue().entrySet()) {this.sumMean +=entryItem.getValue();

String itemID=entryItem.getKey();this.BI.put(itemID, 0.0);if(I.containsKey(itemID))continue;

randomIValue= newDouble[F];

randomYValue= newDouble[F];for (int i = 0; i < F; i++) {double randI = Math.random() /Math.sqrt(F);

randomIValue[i]=randI;double randY = Math.random() /Math.sqrt(F);

randomYValue[i]=randY;

}

I.put(itemID, randomIValue);

Y.put(itemID, randomYValue);

}

}this.sumMean /=itemCount;

}/*** 随机梯度下降训练U,I,BU,BI,Y*/@Overridepublic voidtrain() {

System.out.println("training U,I,BU,BI,Y...");for (int step = 0; step < this.max_iter; step++) {

System.out.println("第" + (step + 1) + "次迭代...");for (Entry> entry : this.ratingData.entrySet()) {double[] z_Item = new double[this.F];//此用户历史数据的隐偏好之和

for(String item : entry.getValue().keySet()) {

Double[] Y_Item= this.Y.get(item);for (int f = 0; f < this.F; f++) {

z_Item[f]+=Y_Item[f];

}

}double itemLength_Sqrt = 1.0 / Math.sqrt(1.0 *entry.getValue().size());double[] s = new double[this.F];

String userID=entry.getKey();for (EntryitemRatingEntry : entry.getValue().entrySet()) {

String itemID=itemRatingEntry.getKey();double pui = this.predictRating(userID, itemID);double err = itemRatingEntry.getValue() - pui;//根据当前参数计算误差(真实值-预测值)

double bu = this.BU.get(userID);

bu+= this.α * (err - this.λ *bu);this.BU.put(userID, bu);double bi = this.BI.get(itemID);

bi+= this.α * (err - this.λ *bi);this.BI.put(itemID, bi);

Double[] userValue= this.U.get(userID);

Double[] itemValue= this.I.get(itemID);for (int i = 0; i < this.F; i++) {

s[i]+= itemValue[i] *err;double us =userValue[i];double it =itemValue[i];

us+= this.α * (err * it - this.λ * us);//后一项是来防止过拟合的正则化项,λ需要根据具体应用场景反复实验得到。损失函数的优化使用随机梯度下降算法

it += this.α * (err * (us + z_Item[i] * itemLength_Sqrt) - this.λ *it);

userValue[i]=us;

itemValue[i]=it;

}

}for(String item : entry.getValue().keySet()) {

Double[] Y_Item= this.Y.get(item);for (int f = 0; f < this.F; f++) {double y =Y_Item[f];

y+= this.α * (s[f] * itemLength_Sqrt - this.λ *y);

Y_Item[f]=y;

}

}

}this.α *= 0.9;//每次迭代步长要逐步缩小

}

}/*** userID对itemID的评分

* U每行表示该用户对各个隐因子的偏好程度

* I每列表示该物品在各个隐患因子中的概率分布

* rating=(U+Y/sqrt(sum(ui)))*I+sumMean+BU+BI

*

*@paramuserID

*@paramitemID

*@return

*/@Overridepublic doublepredictRating(String userID, String itemID) {double[] z_Item = new double[this.F];

Map ratingItem = this.ratingData.get(userID);for(String item : ratingItem.keySet()) {

Double[] Y_Item= this.Y.get(item);for (int f = 0; f < this.F; f++)

z_Item[f]+=Y_Item[f];

}double p = 0.0;

Double[] userValue= this.U.get(userID);

Double[] itemValue= this.I.get(itemID);for (int i = 0; i < this.F; i++) {double rating = userValue[i] + z_Item[i] / Math.sqrt(1.0 *ratingItem.size());

p+= rating *itemValue[i];

}

p+= this.BU.get(userID) + this.BI.get(itemID) + this.sumMean;returnp;

}

}

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值