- 计算速度要快
- 结果可以不是特别精确
- 有预先设计好的推荐模型
- 基于自定义模型
- 推荐优先级计算基本原理:用户最近一段时间的口味是相似的(很久以前的用户行为对现在的推荐没有太多参考作用)
- 备选商品推荐优先级:
- 备选商品 【 X , Y , Z 】 【X,Y,Z】 【X,Y,Z】是当前商品 D D D 的商品相似度列表中与商品 D D D 最相似的的前 N N N 个商品
- 备选商品
X
X
X 的推荐优先级分数为:
s i m ( A , X ) × 5 + s i m ( B , X ) × 4 + s i m ( C , X ) × 1 3 + l g 2 − l g 1 \cfrac{sim(A,X)×5+sim(B,X)×4+sim(C,X)×1}{3}+lg2-lg1 3sim(A,X)×5+sim(B,X)×4+sim(C,X)×1+lg2−lg1
其中- l g 2 lg2 lg2里的 2 2 2表示用户对商品的评分大于3分的商品数量,
- l g 1 lg1 lg1里的 1 1 1表示用户对商品的评分小于3分的商品数量,
- 分界线 3 3 3分是人为设定的
一、业务系统
可以在前端JS埋点,也可以在Java后端埋点
ProductRestApi.java
@RequestMapping("/rest/product")
@Controller
public class ProductRestApi {
private static Logger logger = Logger.getLogger(ProductRestApi.class.getName());
@Autowired
private RecommenderService recommenderService;
@Autowired
private ProductService productService;
@Autowired
private UserService userService;
@Autowired
private RatingService ratingService;
@RequestMapping(value = "/rate/{id}", produces = "application/json", method = RequestMethod.GET )
@ResponseBody
public Model rateToProduct(@PathVariable("id")int id, @RequestParam("score")Double score, @RequestParam("username")String username, Model model) {
User user = userService.findByUsername(username);
ProductRatingRequest request = new ProductRatingRequest(user.getUserId(), id, score);
boolean complete = ratingService.productRating(request);
//埋点日志
if(complete) {
System.out.print("=========埋点=========");
logger.info(Constant.PRODUCT_RATING_PREFIX + ":" + user.getUserId() +"|"+ id +"|"+ request.getScore() +"|"+ System.currentTimeMillis()/1000);
}
model.addAttribute("success",true);
model.addAttribute("message"," 已完成评分!");
return model;
}
}
二、Flume阶段
编写Agent的配置文件,在flume的conf目录下新建logexecsource-kafkasink.conf,对flume连接kafka做配置:
- 使用exec类的source监控某一个日志文件
- 使用org.apache.flume.sink.kafka.KafkaSink类型的sink,将日志文件中带有“PRODUCT_RATING_PREFIX”关键词的日志条目输出到Kafka
#一、定义Agent中各个组件的名称:agentRS是agent的名称,agentRS中定义了一个叫exectail的source,如果有多个,使用空格间隔
# r1:表示agentRS的Source的名称
agentRS.sources = r1
# c1:表示agentRS的Channel的名称
agentRS.channels = c1
# k1:表示agentRS的Sink的名称
agentRS.sinks = k1
#二、配置source:组名名.属性名=属性值
# 表示agentRS的输入源类型为实时监控的单个本机文件的内容
agentRS.sources.r1.type = exec
# 下面这个路径是需要收集日志的绝对路径
agentRS.sources.r1.command = tail –f /mnt/d/Projects/BigData/ECommerceRecommenderSystem/businessServer/src/main/log/agentRS.log
#三、配置拦截器
agentRS.sources.r1.interceptors=i1
agentRS.sources.r1.interceptors.i1.type=regex_filter
# 定义日志过滤前缀的正则
agentRS.sources.r1.interceptors.i1.regex=.+PRODUCT_RATING_PREFIX.+
#四、配置chanel
# 表示agentRS的channel类型是memory内存型
agentRS.channels.c1.type = memory
# 表示agentRS的channel总容量10000个event
agentRS.channels.c1.capacity = 10000
# 表示agentRS的channel传输时收集到了1000条event以后再去提交事务
#agentRS.channels.c1.transactionCapacity = 1000
#五、配置sink
# 表示agentRS的输出目的地是Kafka
agentRS.sinks.k1.type = org.apache.flume.sink.kafka.KafkaSink
# 表示输出目的地的Kafka的目标Topic
agentRS.sinks.k1.kafka.topic = logRS
agentRS.sinks.k1.kafka.bootstrap.servers = localhost:9092
agentRS.sinks.k1.kafka.producer.acks = 1
agentRS.sinks.k1.kafka.flumeBatchSize = 20
#六、连接组件:同一个source可以对接多个channel,一个sink只能从一个channel拿数据!
agentRS.sources.r1.channels = c1
agentRS.sinks.k1.channel = c1
三、Kafka阶段
Application.java
/**
* Project: ECommerceRecommendSystem
* Created on 2019/4/28 14:59
*/
import org.apache.kafka.streams.KafkaStreams;
import org.apache.kafka.streams.StreamsConfig;
import org.apache.kafka.streams.processor.TopologyBuilder;
import java.util.Properties;
public class Application {
public static void main(String[] args) {
String brokers = "localhost:9092";
String zookeepers = "localhost:2181";
// 定义输入和输出的topic
String from = "logRS";
String to = "recommender";
// 定义kafka stream 配置参数
Properties settings = new Properties();
settings.put(StreamsConfig.APPLICATION_ID_CONFIG, "logFilter");
settings.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, brokers);
settings.put(StreamsConfig.ZOOKEEPER_CONNECT_CONFIG, zookeepers);
// 创建kafka stream 配置对象
StreamsConfig config = new StreamsConfig(settings);
// 定义拓扑构建器
TopologyBuilder builder = new TopologyBuilder();
builder.addSource("SOURCE", from)
.addProcessor("PROCESSOR", ()->new LogProcessor(), "SOURCE")
.addSink("SINK", to, "PROCESSOR");
// 创建kafka stream
KafkaStreams streams = new KafkaStreams( builder, config );
streams.start();
System.out.println("kafka stream started!");
}
}
LogProcessor.java
/**
* Project: ECommerceRecommendSystem
* Created on 2019/4/28 15:08
*/
import org.apache.kafka.streams.processor.Processor;
import org.apache.kafka.streams.processor.ProcessorContext;
public class LogProcessor implements Processor<byte[], byte[]>{
private ProcessorContext context;
@Override
public void init(ProcessorContext processorContext) {
this.context = processorContext;
}
@Override
public void process(byte[] dummy, byte[] line) {
// 核心处理流程
String input = new String(line);
// 提取数据,以固定前缀过滤日志信息
if( input.contains("PRODUCT_RATING_PREFIX:") ){
System.out.println("product rating data coming! " + input);
input = input.split("PRODUCT_RATING_PREFIX:")[1].trim();
context.forward("logProcessor".getBytes(), input.getBytes());
}
}
@Override
public void punctuate(long l) {
}
@Override
public void close() {
}
}
四、SparkStreaming阶段
OnlineRecommender.scala
import com.mongodb.casbah.commons.MongoDBObject
import com.mongodb.casbah.{MongoClient, MongoClientURI}
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import redis.clients.jedis.Jedis
/**
* Project: ECommerceRecommendSystem
* Created on 2019/4/28 9:18
*/
// 定义一个连接助手对象,建立到redis和mongodb的连接
object ConnHelper extends Serializable{
// 懒变量定义,使用的时候才初始化
lazy val jedis = new Jedis("localhost")
lazy val mongoClient = MongoClient(MongoClientURI("mongodb://localhost:27017/recommender"))
}
case class MongoConfig( uri: String, db: String )
// 定义标准推荐对象
case class Recommendation( productId: Int, score: Double )
// 定义用户的推荐列表
case class UserRecs( userId: Int, recs: Seq[Recommendation] )
// 定义商品相似度列表
case class ProductRecs( productId: Int, recs: Seq[Recommendation] )
object OnlineRecommender {
// 定义常量和表名
val MONGODB_RATING_COLLECTION = "Rating"
val STREAM_RECS = "StreamRecs"
val PRODUCT_RECS = "ProductRecs"
val MAX_USER_RATING_NUM = 20
val MAX_SIM_PRODUCTS_NUM = 20
def main(args: Array[String]): Unit = {
val config = Map(
"spark.cores" -> "local[*]",
"mongo.uri" -> "mongodb://localhost:27017/recommender",
"mongo.db" -> "recommender",
"kafka.topic" -> "recommender"
)
// 创建spark conf
val sparkConf = new SparkConf().setMaster(config("spark.cores")).setAppName("OnlineRecommender")
val spark = SparkSession.builder().config(sparkConf).getOrCreate()
val sc = spark.sparkContext
val ssc = new StreamingContext(sc, Seconds(2))
import spark.implicits._
implicit val mongoConfig = MongoConfig( config("mongo.uri"), config("mongo.db") )
// 加载数据,相似度矩阵,广播出去
val simProductsMatrix = spark.read
.option("uri", mongoConfig.uri)
.option("collection", PRODUCT_RECS)
.format("com.mongodb.spark.sql")
.load()
.as[ProductRecs]
.rdd
// 为了后续查询相似度方便,把数据转换成map形式
.map{item =>
( item.productId, item.recs.map( x=>(x.productId, x.score) ).toMap )
}
.collectAsMap()
// 定义广播变量
val simProcutsMatrixBC = sc.broadcast(simProductsMatrix)
// 创建kafka配置参数
val kafkaParam = Map(
"bootstrap.servers" -> "localhost:9092",
"key.deserializer" -> classOf[StringDeserializer],
"value.deserializer" -> classOf[StringDeserializer],
"group.id" -> "recommender",
"auto.offset.reset" -> "latest"
)
// 创建一个DStream
val kafkaStream = KafkaUtils.createDirectStream[String, String]( ssc,
LocationStrategies.PreferConsistent,
ConsumerStrategies.Subscribe[String, String]( Array(config("kafka.topic")), kafkaParam )
)
// 对kafkaStream进行处理,产生评分流,userId|productId|score|timestamp
val ratingStream = kafkaStream.map{msg=>
var attr = msg.value().split("\\|")
( attr(0).toInt, attr(1).toInt, attr(2).toDouble, attr(3).toInt )
}
// 核心算法部分,定义评分流的处理流程
ratingStream.foreachRDD{
rdds => rdds.foreach{
case ( userId, productId, score, timestamp ) =>
println("rating data coming!>>>>>>>>>>>>>>>>>>")
// TODO: 核心算法流程
// 1. 从redis里取出当前用户的最近评分,保存成一个数组Array[(productId, score)]
val userRecentlyRatings = getUserRecentlyRatings( MAX_USER_RATING_NUM, userId, ConnHelper.jedis )
// 2. 从相似度矩阵中获取当前商品最相似的商品列表,作为备选列表,保存成一个数组Array[productId]
val candidateProducts = getTopSimProducts( MAX_SIM_PRODUCTS_NUM, productId, userId, simProcutsMatrixBC.value )
// 3. 计算每个备选商品的推荐优先级,得到当前用户的实时推荐列表,保存成 Array[(productId, score)]
val streamRecs = computeProductScore( candidateProducts, userRecentlyRatings, simProcutsMatrixBC.value )
// 4. 把推荐列表保存到mongodb
saveDataToMongoDB( userId, streamRecs )
}
}
// 启动streaming
ssc.start()
println("streaming started!")
ssc.awaitTermination()
}
/**
* 从redis里获取最近num次评分
*/
import scala.collection.JavaConversions._
def getUserRecentlyRatings(num: Int, userId: Int, jedis: Jedis): Array[(Int, Double)] = {
// 从redis中用户的评分队列里获取评分数据,list键名为uid:USERID,值格式是 PRODUCTID:SCORE
jedis.lrange( "userId:" + userId.toString, 0, num )
.map{ item =>
val attr = item.split("\\:")
( attr(0).trim.toInt, attr(1).trim.toDouble )
}
.toArray
}
// 获取当前商品的相似列表,并过滤掉用户已经评分过的,作为备选列表
def getTopSimProducts(num: Int,
productId: Int,
userId: Int,
simProducts: scala.collection.Map[Int, scala.collection.immutable.Map[Int, Double]])
(implicit mongoConfig: MongoConfig): Array[Int] ={
// 从广播变量相似度矩阵中拿到当前商品的相似度列表
val allSimProducts = simProducts(productId).toArray
// 获得用户已经评分过的商品,过滤掉,排序输出
val ratingCollection = ConnHelper.mongoClient( mongoConfig.db )( MONGODB_RATING_COLLECTION )
val ratingExist = ratingCollection.find( MongoDBObject("userId"->userId) )
.toArray
.map{item=> // 只需要productId
item.get("productId").toString.toInt
}
// 从所有的相似商品中进行过滤
allSimProducts.filter( x => ! ratingExist.contains(x._1) )
.sortWith(_._2 > _._2)
.take(num)
.map(x=>x._1)
}
// 计算每个备选商品的推荐得分
def computeProductScore(candidateProducts: Array[Int],
userRecentlyRatings: Array[(Int, Double)],
simProducts: scala.collection.Map[Int, scala.collection.immutable.Map[Int, Double]])
: Array[(Int, Double)] ={
// 定义一个长度可变数组ArrayBuffer,用于保存每一个备选商品的基础得分,(productId, score)
val scores = scala.collection.mutable.ArrayBuffer[(Int, Double)]()
// 定义两个map,用于保存每个商品的高分和低分的计数器,productId -> count
val increMap = scala.collection.mutable.HashMap[Int, Int]()
val decreMap = scala.collection.mutable.HashMap[Int, Int]()
// 遍历每个备选商品,计算和已评分商品的相似度
for( candidateProduct <- candidateProducts; userRecentlyRating <- userRecentlyRatings ){
// 从相似度矩阵中获取当前备选商品和当前已评分商品间的相似度
val simScore = getProductsSimScore( candidateProduct, userRecentlyRating._1, simProducts )
if( simScore > 0.4 ){
// 按照公式进行加权计算,得到基础评分
scores += ( (candidateProduct, simScore * userRecentlyRating._2) )
if( userRecentlyRating._2 > 3 ){
increMap(candidateProduct) = increMap.getOrDefault(candidateProduct, 0) + 1
} else {
decreMap(candidateProduct) = decreMap.getOrDefault(candidateProduct, 0) + 1
}
}
}
// 根据公式计算所有的推荐优先级,首先以productId做groupby
scores.groupBy(_._1).map{
case (productId, scoreList) =>
( productId, scoreList.map(_._2).sum/scoreList.length + log(increMap.getOrDefault(productId, 1)) - log(decreMap.getOrDefault(productId, 1)) )
}
// 返回推荐列表,按照得分排序
.toArray
.sortWith(_._2>_._2)
}
def getProductsSimScore(product1: Int, product2: Int,
simProducts: scala.collection.Map[Int, scala.collection.immutable.Map[Int, Double]]): Double ={
simProducts.get(product1) match {
case Some(sims) => sims.get(product2) match {
case Some(score) => score
case None => 0.0
}
case None => 0.0
}
}
// 自定义log函数,以N为底
def log(m: Int): Double = {
val N = 10
math.log(m)/math.log(N)
}
// 写入mongodb
def saveDataToMongoDB(userId: Int, streamRecs: Array[(Int, Double)])(implicit mongoConfig: MongoConfig): Unit ={
val streamRecsCollection = ConnHelper.mongoClient(mongoConfig.db)(STREAM_RECS)
// 按照userId查询并更新
streamRecsCollection.findAndRemove( MongoDBObject( "userId" -> userId ) )
streamRecsCollection.insert( MongoDBObject( "userId" -> userId, "recs" -> streamRecs.map(x=>MongoDBObject("productId"->x._1, "score"->x._2)) ) )
}
}