time svd java代码_推荐算法学习---SVD++

1引言

上一篇MF写了PSVD和RSVD算法

https://www.jianshu.com/p/ec024854f824

评分的行为从侧面反映了用户的喜好,可以将这样的反映通过隐式参数的形式体现在模型中,从而得到一个更为精细的模型,便是 SVD++

预测规则改为:

math?formula=%5Ctilde%7Br%7D_%7Bui%7D%3D%5Cmu%20%2Bb_u%2Bb_i%2BU_%7Bu%5Ccdot%7DV_%7Bi%5Ccdot%7D%5ET%2B%5Ctilde%20%7B%5Coverline%20U%7D_%7Bu%5Ccdot%7DV_%7Bi%5Ccdot%7D%5ET其中

math?formula=%5Ctilde%20%7B%5Coverline%20U%7D_%7Bu%5Ccdot%7D%3D%5Cfrac%20%7B1%7D%7B%5Csqrt%20%7B%7C%5Ctilde%20I_u%7C%7D%7D%5Csum%20_%7Bi'%5Cepsilon%20%5Ctilde%20I_u%7DW_%7Bi'%7D,而

math?formula=%5Ctilde%20I_u是用户u隐式反馈集,

math?formula=W_%7Bi'%7D是隐式的用户潜在特征向量(类似V,W的一个m*d的矩阵)

2 SVD++

目标方程:

math?formula=min_%7B%5Ctheta%7D%5Csum%20_%7Bu%3D1%7D%5En%5Csum%20_%7Bi%3D1%7D%5Emyui%5B%5Cfrac%20%7B1%7D%7B2%7D(r_%7Bui%7D-%5Ctilde%20r_%7Bui%7D)%5E2%2Breg(U_%7Bu%5Ccdot%20%2CV_%7Bi%5Ccdot%7D%2Cb_u%2Cb_i%2CW%7D)%5D其中

math?formula=reg(U_%7Bu%5Ccdot%20%2CV_%7Bi%5Ccdot%7D%2Cb_u%2Cb_i%2CW%7D)%3D%5Cfrac%20%7B%5Calpha%20_u%7D%7B2%7D%7C%7CU_%7Bu%5Ccdot%7D%7C%7C%5E2%2B%5Cfrac%20%7B%5Calpha%20_v%7D%7B2%7D%7C%7CV_%7Bi%5Ccdot%7D%7C%7C%5E2%2B%5Cfrac%20%7B%5Cbeta%20_u%7D%7B2%7D%7C%7Cb_u%7C%7C%5E2%2B%5Cfrac%20%7B%5Cbeta%20_v%7D%7B2%7D%7C%7Cb_i%7C%7C%5E2%2B%5Cfrac%20%7B%5Calpha%20_w%7D%7B2%7D%5Csum%20_%7Bi'%5Cepsilon%20%5Ctilde%20u%7D%7C%7CW_%7Bi'%5Ccdot%7D%7C%7C%5E2

仍然用梯度下降的方法,推导不写啦

0414b5ff6c84

所以更新规则还是:

好像和SVD也没啥区别啦,多了一个需要更新的变量而已。

3伪代码

0414b5ff6c84

4代码

我们仍是使用Ml100k的数据集,不过选用了ua.base作为训练集,有90570条记录,我们将其随机分成两半,一半做显式反馈集,一半做隐式反馈集,即ua.base.explicit和ua.base.implicit,选用ba.test作为测试集。

其他参数取

math?formula=%5Calpha%20_u%3D%5Calpha%20_v%20%3D%5Calpha%20_w%20%3D%20%5Cbeta%20_u%20%3D%5Cbeta%20_v%3D0.01%2C%5Cgamma%20%3D0.01%2Cd%3D20%2CT%3D100

math?formula=p%3D80000

初始化如下:

0414b5ff6c84

java代码:(其实和上一个差不多啦)

package one;

import java.io.BufferedReader;

import java.io.FileReader;

import java.io.IOException;

import java.util.ArrayList;

import java.util.Random;

public class train {

// user number:n

// item number:m

static int n = 943;

static int m = 1682;

static ArrayList LoadData(String FileName) {

// 使用ArrayList来存储每行读取到的字符串

ArrayList arrayList = new ArrayList<>();

try {

FileReader fr = new FileReader(FileName);

BufferedReader bf = new BufferedReader(fr);

String str;

while ((str = bf.readLine()) != null) {

arrayList.add(str);

}

bf.close();

fr.close();

} catch (IOException e) {

// TODO Auto-generated catch block

e.printStackTrace();

}

return arrayList;

}

public static double Mae(double[][] test, double[][] train) {

int cnt = 0;

double mae = 0.0;

for (int i = 0; i < n; i++) {

for (int j = 0; j < m; j++) {

if (test[i][j] != 0) {

mae += Math.abs(test[i][j] - train[i][j]);

cnt++;

}

}

}

mae /= cnt;

return mae;

}

public static double Rmse(double[][] test, double[][] train) {

int cnt = 0;

double rmse = 0.0;

for (int i = 0; i < n; i++) {

for (int j = 0; j < m; j++) {

if (test[i][j] != 0) {

rmse += (test[i][j] - train[i][j]) * (test[i][j] - train[i][j]);

cnt++;

}

}

}

rmse /= cnt;

rmse = Math.sqrt(rmse);

return rmse;

}

public static void main(String[] args) {

double ave_all = 0;

int cnt_all = 0;

double[] ave_use = new double[n];

int[] cnt_use = new int[n];

double[] ave_item = new double[m];

int[] cnt_item = new int[m];

double[] bias_use = new double[n];

int[] bias_use_cnt = new int[n];

double[] bias_item = new double[m];

int[] bias_item_cnt = new int[m];

double[][] train = new double[n][m];

// 新添,用户隐式反馈

class Implicit {

int[] item = new int[m];

double[] rate = new double[m];

int cnt = 0;

}

Implicit[] imp = new Implicit[n];

for (int i = 0; i < n; i++)

imp[i] = new Implicit();

boolean[][] yui = new boolean[n][m];

double[][] test = new double[n][m];

for (int i = 0; i < n; i++)

for (int j = 0; j < m; j++)

yui[i][j] = false;

// 新添加的,记录每一条记录,方便后面采样的工作

int cnt = 0;

class Record {

int u = 0;

int i = 0;

double rate = 0;

}

Record[] record = new Record[80000];

for (int i = 0; i < 80000; i++)

record[i] = new Record();

// 读训练集

ArrayList arrayList = LoadData("ua.base");

int length = arrayList.size();

String[] tmp = new String[4];

for (int i = 0; i < 4; i++)

tmp[i] = new String();

// 分数据集,这里偷个懒,奇偶分

boolean flag = true;

for (int i = 0; i < length; i++) {

tmp = arrayList.get(i).split("\t");

int user = Integer.valueOf(tmp[0]) - 1;

int item = Integer.valueOf(tmp[1]) - 1;

double rating = Double.valueOf(tmp[2]);

if (flag == true) {

int cnt1 = imp[user].cnt;

imp[user].item[cnt1] = item;

imp[user].rate[cnt1] = rating;

imp[user].cnt++;

flag = false;

continue;

}

train[user][item] = rating;

yui[user][item] = true;

// 全局平均

ave_all += rating;

cnt_all++;

// 用户平均

ave_use[user] += rating;

cnt_use[user]++;

// 商品平均

ave_item[item] += rating;

cnt_item[item]++;

// 新添

record[cnt].u = user;

record[cnt].i = item;

record[cnt++].rate = rating;

flag = true;

}

// 计算全局平均值、用户平均值、商品平均值

ave_all /= cnt_all;

for (int i = 0; i < n; i++) {

if (cnt_use[i] != 0) {

ave_use[i] /= cnt_use[i];

} else

ave_use[i] = ave_all;

}

for (int j = 0; j < m; j++) {

if (cnt_item[j] != 0) {

ave_item[j] /= cnt_item[j];

} else

ave_item[j] = ave_all;

}

// 计算两Bias

for (int i = 0; i < n; i++) {

for (int j = 0; j < m; j++) {

if (yui[i][j] == true) {

bias_use[i] += train[i][j] - ave_all;

bias_use_cnt[i]++;

bias_item[j] += train[i][j] - ave_all;

bias_item_cnt[j]++;

}

}

}

for (int i = 0; i < n; i++) {

if (bias_use_cnt[i] != 0) {

bias_use[i] /= bias_use_cnt[i];

} else

bias_use[i] = 0;

}

for (int i = 0; i < m; i++) {

if (bias_item_cnt[i] != 0) {

bias_item[i] /= bias_item_cnt[i];

} else

bias_item[i] = 0;

}

// 读测试集

ArrayList arrayList_test = LoadData("ua.test");

int len = arrayList_test.size();

for (int i = 0; i < len; i++) {

tmp = arrayList_test.get(i).split("\t");

int user = Integer.valueOf(tmp[0]) - 1;

int item = Integer.valueOf(tmp[1]) - 1;

double rating = Double.valueOf(tmp[2]);

test[user][item] = rating;

}

// 初始化

Random ra = new Random();

double alpha_u = 0.01;

double alpha_v = 0.01;

double alpha_w = 0.01;

double beta_u = 0.01;

double beta_v = 0.01;

double lamma = 0.01;

int d = 20;

int T = 100;

int p = 90570 / 2;

double[][] U_uk = new double[n][d];

double[][] V_ik = new double[m][d];

double[][] W_ik = new double[m][d];

for (int u = 0; u < n; u++)

for (int k = 0; k < d; k++) {

U_uk[u][k] = (ra.nextDouble() - 0.5) * 0.01;

}

for (int i = 0; i < m; i++)

for (int k = 0; k < d; k++) {

V_ik[i][k] = (ra.nextDouble() - 0.5) * 0.01;

W_ik[i][k] = (ra.nextDouble() - 0.5) * 0.01;

}

double e_ui = 0.0;

double r_ui = 0.0;

long startTime = System.currentTimeMillis();

// train

for (int t = 0; t < T; t++) {

for (int t2 = 0; t2 < p; t2++) {

// 采样

int sample = ra.nextInt(p);

int user = record[sample].u;

int item = record[sample].i;

double rate = record[sample].rate;

// 计算预测的rui

r_ui = 0.0;

double[] U_ui = new double[d];

for (int k = 0; k < d; k++) {

int len1 = imp[user].cnt;

for (int i = 0; i < len1; i++) {

U_ui[k] += W_ik[imp[user].item[i]][k];

}

if (len1 != 0)

U_ui[k] /= len1;

else

U_ui[k] = 0;

}

for (int k = 0; k < d; k++) {

r_ui += U_uk[user][k] * V_ik[item][k] + U_ui[k] * V_ik[item][k];

}

r_ui += ave_all + bias_use[user] + bias_item[item];

if (r_ui > 5)

r_ui = 5.0;

else if (r_ui < 1)

r_ui = 1.0;

e_ui = rate - r_ui;

// 计算梯度并且更新

for (int k = 0; k < d; k++) {

double temp_u = U_uk[user][k];

double temp_v = V_ik[item][k];

U_uk[user][k] = U_uk[user][k] + lamma * (e_ui * V_ik[item][k] - alpha_u * U_uk[user][k]);

V_ik[item][k] = V_ik[item][k] + lamma * (e_ui * temp_u - alpha_v * V_ik[item][k]);

if (imp[user].cnt != 0) {

W_ik[item][k] = W_ik[item][k]

+ lamma * (e_ui * temp_v / imp[user].cnt - alpha_w * W_ik[item][k]);

}

}

ave_all = ave_all + lamma * e_ui;

bias_use[user] = bias_use[user] + lamma * (e_ui - beta_u * bias_use[user]);

bias_item[item] = bias_item[item] + lamma * (e_ui - beta_v * bias_item[item]);

}

lamma *= 0.9;

}

long endTime = System.currentTimeMillis();

System.out.println("程序运行时间: " + (endTime - startTime) + "ms");

for (int u = 0; u < n; u++) {

for (int i = 0; i < m; i++) {

if (yui[u][i] == false) {

double r = 0;

double[] U_ui = new double[d];

for (int k = 0; k < d; k++) {

int len1 = imp[u].cnt;

for (int ii = 0; ii < len1; ii++) {

int item = imp[u].item[ii];

U_ui[k] += W_ik[item][k];

}

if (len1 != 0)

U_ui[k] /= len1;

else

U_ui[k] = 0;

}

for (int k = 0; k < d; k++) {

r += U_uk[u][k] * V_ik[i][k] + U_ui[k] * V_ik[i][k];

}

train[u][i] = ave_all + bias_use[u] + bias_item[i] + r;

if (train[u][i] > 5)

train[u][i] = 5;

else if (train[u][i] < 1)

train[u][i] = 1;

}

}

}

double mae = Mae(test, train);

double rmse = Rmse(test, train);

System.out.println("Rmse: " + rmse);

System.out.println("MAE: " + mae);

}

}

5结果和分析

RMSE

MAE

RSVD

0.9912

0.7752

SVD++

0.9888

0.7741

因为数据集比较小,所以最终结果优化并没有很明显,但是可以看出确实更准确了。

此代码用时16837ms,课件说是30s,不知道是环境问题还是代码优化问题(思考.jpg)

参考:

https://blog.csdn.net/wjmishuai/article/details/71191945

  • 0
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值