隐语义模型LFM基础代码

'''
LFM Model
'''
import pandas as pd
import numpy as np

class LFM:
    def __init__(self,alpha,reg_p,reg_q,number_latentFactors=10,number_epochs=10,columns=["uid","iid","rating"]):
        self.alpha = alpha #学习率
        self.reg_p = reg_p #p矩阵正则
        self.reg_q = reg_q #q矩阵正则
        self.number_latentFactors = number_latentFactors #隐式类别数量
        self.number_epochs = number_epochs #最大迭代次数
        self.columns = columns

    def fit(self,dataset):
        """
        :param dataset:uid,iid,rating
        :return:
        """
        self.dataset = pd.DataFrame(dataset)
        self.users_ratings = dataset.groupby(self.columns[0])[[self.columns[1],self.columns[2]]].apply(list)
        self.items_ratings = dataset.groupby(self.columns[0])[[self.columns[1],self.columns[2]]].apply(list)

        # self.users_ratings = dataset.groupby(self.columns[0])
        # self.users_ratings = self.users_ratings.set_index
        # print(self.users_ratings.index)
        # self.items_ratings = dataset.groupby(self.columns[1])
        self.globalMean = self.dataset[self.columns[2]].mean()

        self.p,self.q = self.sgd()

    def __init__matrix(self):
        """
        初始化P和Q矩阵,同时设置为0,1之间的随机值作为初始值
        :return: 
        """""
        #User_LF
        p = dict(zip(
            self.users_ratings.index,
            np.random.rand(len(self.users_ratings),self.number_latentFactors).astype(np.float32)
        ))

        #Item_LF
        q = dict(zip(
            self.items_ratings.index,
            np.random.rand(len(self.items_ratings),self.number_latentFactors).astype(np.float32)
        ))

        return p,q

    def sgd(self):
        """
        使用随机梯度下降,优化结果
        :return:
        """
        p,q = self.__init__matrix()

        for i in range(self.number_epochs):
            print("iter%d"%i)
            error_list = []
            for uid,iid,r_ui in self.dataset.itertuples(index=False):
                #user_LF P
                #item_LF Q
                v_pu = p[uid] #用户向量
                v_qi = q[iid] #物品向量
                err = np.float32(r_ui-np.dot(v_pu,v_qi))

                v_pu = self.alpha * (err * v_pu - self.reg_p * v_pu)
                v_qi = self.alpha * (err * v_qi - self.reg_q * v_qi)

                p[uid] = v_pu
                q[iid] = v_qi

                error_list.append(err**2)

            print(np.sqrt(np.mean(error_list)))
        return p,q

    def predict(self,uid,iid):
        #如果uid或iid不存在,使用全局平均分作为预测结果返回
        if uid not in self.users_ratings.index or iid not in self.items_ratings.index:
            return self.globalMean

        p_u = self.p[uid]
        q_i = self.q[iid]

        return np.dot(p_u,q_i)


if __name__ == "__main__":
    path = "data/ml-1m/ratings.dat"
    header = ["uid", "iid", "rating"]
    dtype = {"uid": np.int32, "iid": np.int32, "rating": np.float32}
    data = pd.read_csv(path,sep="::",usecols=range(3),names= header,dtype=dtype)

    alpha = 0.8
    reg_p = 0.2
    reg_q = 0.2

    FM = LFM(alpha,reg_p,reg_q)
    FM.fit(data)


  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
以下是基于LFM+MLP模型的GRU的代码实现,使用Python和PyTorch框架: ```python import torch import torch.nn as nn class LFM_MLP_GRU(nn.Module): def __init__(self, num_users, num_items, embedding_size, hidden_size, mlp_layers): super(LFM_MLP_GRU, self).__init__() # LFM embedding layers self.user_embedding = nn.Embedding(num_users, embedding_size) self.item_embedding = nn.Embedding(num_items, embedding_size) # MLP layers layers = [] input_size = 2 * embedding_size for layer in mlp_layers: layers.append(nn.Linear(input_size, layer)) layers.append(nn.ReLU()) input_size = layer self.mlp_layers = nn.Sequential(*layers) # GRU layer self.gru = nn.GRU(input_size, hidden_size) # Output layer self.output_layer = nn.Linear(hidden_size, 1) def forward(self, user_ids, item_ids): # LFM embedding user_embedding = self.user_embedding(user_ids) item_embedding = self.item_embedding(item_ids) # Concatenate user and item embeddings lfm_input = torch.cat([user_embedding, item_embedding], dim=1) # Pass through MLP layers mlp_output = self.mlp_layers(lfm_input) # Reshape MLP output for GRU input gru_input = mlp_output.unsqueeze(0) # Pass through GRU layer gru_output, _ = self.gru(gru_input) # Pass through output layer and return output = self.output_layer(gru_output) return output.squeeze() ``` 此代码实现中,LFM部分使用了embedding层,MLP部分使用了多个全连接层,GRU部分使用了一个GRU层,输出层使用了一个全连接层。在前向传播中,用户和物品的embedding首先被拼接起来,然后通过MLP层得到一个向量,然后通过GRU层得到状态,最后通过输出层得到评分预测值。 需要注意的是,此代码实现并没有包含训练过程和数据预处理过程,需要根据具体需求进行补充。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值