相关理论思想指导,请查看《推荐系统实践》——基于物品的协同过滤算法,本例根据以上理论思想,基于电影背景,使用python实现如下:
#-*- coding: utf-8 -*-
'''''
Created on 2015-06-22
@author: Lockvictor
'''
import sys
import random
import math
import os
from operator import itemgetter
from collections import defaultdict
random.seed(0)
class ItemBasedCF(object):
''''' TopN recommendation - Item Based Collaborative Filtering '''
def __init__(self):
#定义一个训练集
self.trainset = {}
#定义一个测试集
self.testset = {}
#定义相似的电影数
self.n_sim_movie = 20
#定义推荐的电影数
self.n_rec_movie = 10
self.movie_sim_mat = {}
#定义电影流行度,有一个看过该电影,流行度+1,没有人看过,流行度的值默认为0
self.movie_popular = {}
# 记录电影数量
self.movie_count = 0
# sys.stderr 是用来重定向标准错误信息的
print('Similar movie number = %d' % self.n_sim_movie, file=sys.stderr)
print('Recommended movie number = %d' %
self.n_rec_movie, file=sys.stderr)
@staticmethod
def loadfile(filename):
''''' load a file, return a generator. '''
# 以只读的方式打开一个文件
fp = open(filename, 'r')
# enumerate()为枚举,i为行号从0开始,line为值
for i, line in enumerate(fp):
# yield 迭代去下一个值,类似next()
# line.strip()用于去除字符串头尾指定的字符。
yield line.strip('\r\n')
# 计数
if i % 100000 == 0:
print ('loading %s(%s)' % (filename, i), file=sys.stderr)
fp.close()
# 打印加载文件成功
print ('load %s succ' % filename, file=sys.stderr)
# 划分训练集和测试集 pivot用来定义训练集和测试集的比例
def generate_dataset(self, filename, pivot=0.7):
''''' load rating data and split it to training set and test set '''
trainset_len = 0
testset_len = 0
# 加载文件
for line in self.loadfile(filename):
# 依次获取每行的user,movie,rating
user, movie, rating, _ = line.split('::')
# split the data by pivot
if random.random() < pivot:
self.trainset.setdefault(user, {})
self.trainset[user][movie] = int(rating)
trainset_len += 1
else:
self.testset.setdefault(user, {})
self.testset[user][movie] = int(rating)
testset_len += 1
print ('split training set and test set succ', file=sys.stderr)
print ('train set = %s' % trainset_len, file=sys.stderr)
print ('test set = %s' % testset_len, file=sys.stderr)
# 计算电影之间的相似度
def calc_movie_sim(self):
''''' calculate movie similarity matrix '''
print('counting movies number and popularity...', file=sys.stderr)
for user, movies in self.trainset.items():
for movie in movies:
# 计算电影的流行度,有一个看过该电影,流行度+1,没有人看过,流行度的值默认为0
if movie not in self.movie_popular:
self.movie_popular[movie] = 0
self.movie_popular[movie] += 1
print('count movies number and popularity succ', file=sys.stderr)
# save the total number of movies
# 计算训练集中所有的电影数
self.movie_count = len(self.movie_popular)
print('total movie number = %d' % self.movie_count, file=sys.stderr)
# count co-rated users between items
itemsim_mat = self.movie_sim_mat
print('building co-rated users matrix...', file=sys.stderr)
# 获取物品-物品的共现矩阵
for user, movies in self.trainset.items():
for m1 in movies:
itemsim_mat.setdefault(m1, defaultdict(int))
for m2 in movies:
if m1 == m2:
continue
itemsim_mat[m1][m2] += 1
print('build co-rated users matrix succ', file=sys.stderr)
# 计算物品-物品相似矩阵
# calculate similarity matrix
print('calculating movie similarity matrix...', file=sys.stderr)
# 记录计算用户兴趣相似度的次数
simfactor_count = 0
# 计算用户兴趣相似度复杂度上限值
PRINT_STEP = 2000000
for m1, related_movies in itemsim_mat.items():
for m2, count in related_movies.items():
# 计算物品-物品相似度,还可以进行归一化优化
itemsim_mat[m1][m2] = count / math.sqrt(
self.movie_popular[m1] * self.movie_popular[m2])
simfactor_count += 1
if simfactor_count % PRINT_STEP == 0:
print('calculating movie similarity factor(%d)' %
simfactor_count, file=sys.stderr)
print('calculate movie similarity matrix(similarity factor) succ',
file=sys.stderr)
print('Total similarity factor number = %d' %
simfactor_count, file=sys.stderr)
# 进行具体推荐
def recommend(self, user):
''''' Find K similar movies and recommend N movies. '''
# 相似的电影数,需要根据实际的物品-物品共现矩阵来取值,即该电影的相似电影个数
K = self.n_sim_movie
# 推荐的电影数,对一个用户推荐的总电影数
N = self.n_rec_movie
# 用于存放该用户的推荐电影以及对应的兴趣度(相似度*评分)
rank = {}
watched_movies = self.trainset[user]
for movie, rating in watched_movies.items():
for related_movie, similarity_factor in sorted(self.movie_sim_mat[movie].items(),
key=itemgetter(1), reverse=True)[:K]:
if related_movie in watched_movies:
continue
rank.setdefault(related_movie, 0)
rank[related_movie] += similarity_factor * rating
# return the N best movies
# 只返回前N个推荐的电影
return sorted(rank.items(), key=itemgetter(1), reverse=True)[:N]
# 计算 准确率,召回率,覆盖率,流行度
def evaluate(self):
''''' print evaluation result: precision, recall, coverage and popularity '''
print('Evaluation start...', file=sys.stderr)
N = self.n_rec_movie
# varables for precision and recall
# 记录推荐正确的电影数
hit = 0
# 记录推荐电影的总数
rec_count = 0
# 记录测试数据中总数
test_count = 0
# varables for coverage
# 记录所有推荐的电影数
all_rec_movies = set()
# varables for popularity
popular_sum = 0
for i, user in enumerate(self.trainset):
if i % 500 == 0:
print ('recommended for %d users' % i, file=sys.stderr)
test_movies = self.testset.get(user, {})
rec_movies = self.recommend(user)
for movie, _ in rec_movies:
if movie in test_movies:
hit += 1
all_rec_movies.add(movie)
popular_sum += math.log(1 + self.movie_popular[movie])
rec_count += N
test_count += len(test_movies)
# 计算准确度
precision = hit / (1.0 * rec_count)
# 计算召回率
recall = hit / (1.0 * test_count)
# 计算覆盖率
coverage = len(all_rec_movies) / (1.0 * self.movie_count)
# 计算流行度
popularity = popular_sum / (1.0 * rec_count)
print ('precision=%.4f\trecall=%.4f\tcoverage=%.4f\tpopularity=%.4f' %
(precision, recall, coverage, popularity), file=sys.stderr)
if __name__ == '__main__':
ratingfile = os.path.join('ml-1m', 'ratings.dat')
itemcf = ItemBasedCF()
itemcf.generate_dataset(ratingfile)
itemcf.calc_movie_sim()
itemcf.evaluate()