Spark RDD案例(一)分组TopN
1. 背景
- 作为分布式数据处理引擎,Spark抽象出了很多算子,使得编程对比mapreduce更加遍历,实现需求时,也可以更加灵活,但也更容易出错。
- 本文是大数据常见场景分组TopN的简化案例,实际企业生产中也会相对频繁遇到类似需求
2. 案例
- 需求
以下数据是类似网站日志的记录,需要求出每个科目老师访问次数最多的那2个。 - 数据
http://bigdata.doit.cn/laozhang
http://bigdata.doit.cn/laozhang
http://bigdata.doit.cn/laozhao
http://bigdata.doit.cn/laozhao
http://bigdata.doit.cn/laozhao
http://bigdata.doit.cn/laozhao
http://bigdata.doit.cn/laozhao
http://bigdata.doit.cn/laoduan
http://bigdata.doit.cn/laoduan
http://javaee.doit.cn/xiaozhang
http://javaee.doit.cn/xiaozhang
http://javaee.doit.cn/laowang
http://javaee.doit.cn/laowang
http://javaee.doit.cn/laowang
2.1 代码一
package com.doit.practice
import com.doit.foreach.ForeachTest
import org.apache.spark.rdd.RDD
import org.apache.spark.{
Partitioner, SparkConf, SparkContext}
/*
* 数据形式如下,http://bigdata.doit.cn/laozhang
* 需求是得出每个科目最受欢迎老师
* 以下是所有数据
* */
object TopNTest1 {
def main(args: Array[String]): Unit = {
val conf: SparkConf = new SparkConf().setAppName(TopNTest1.getClass.getSimpleName).setMaster("local[*]")
val sc = new SparkContext(conf)
val rdd1: RDD[String] = sc.textFile("E:\\DOITLearning\\12.Spark\\teacherlog.txt")
// 先把数据匹配出来
val mapedRdd: RDD[((String, String), Int)] = rdd1.map(line => {
val strings: Array[String] = line.split("/+")
val domian: String = strings(1)
val domainSplits: Array[String] = domian.split("\\.")
val subject: String = domainSplits(0)
val name: String = strings(2)
((subject, name), 1)
})
/*
* mapedRdd: ArrayBuffer(((bigdata,laozhang),1), ((bigdata,laozhang),1),
* ((bigdata,laozhao),1), ((bigdata,laozhao),1), ((bigdata,laozhao),1),
* ((bigdata,laozhao),1), ((bigdata,laozhao),1), ((bigdata,laoduan),1),
* ((bigdata,laoduan),1), ((javaee,xiaozhang),1), ((javaee,xiaozhang),1),
* ((javaee,laowang),1), ((javaee,laowang),1), ((javaee,laowang),1))
* */
println("mapedRdd: " + mapedRdd.collect().toBuffer)
// 使用key进行聚合,把观看次数聚合起来
val reduceRDD: RDD[((String, String), Int)] = mapedRdd.reduceByKey(_ + _)
/*
* reduceRDD: ArrayBuffer(((javaee,xiaozhang),2), ((bigdata,laozhang),2),
* ((javaee,laowang),3), ((bigdata,laozhao),5), ((bigdata,laoduan),2))
* */
println("reduceRDD: " + reduceRDD.collect().toBuffer)
// 降序排列
val sortedRDD: RDD[((String, String), Int)] = reduceRDD.sortBy(_._2, false)
/*
* sortedRDD: ArrayBuffer(((bigdata,laozhao),5), ((javaee,laowang),3),
* ((javaee,xiaozhang),2), ((bigdata,laozhang),2), ((bigdata,laoduan),2))
* */
println("sortedRDD: " + sortedRDD.collect().toBuffer)
// 按照学科分类
val subjectReducedRDD: RDD[(String, Iterable[((String, String), Int)])] = reduceRDD.groupBy(_._1._1)
/*
* subjectReducedRDD: ArrayBuffer((javaee,CompactBuffer(((javaee,xiaozhang),2), ((javaee,laowang),3))),
* (bigdata,CompactBuffer(((bigdata,laozhang),2), ((bigdata,laozhao),5), ((bigdata,laoduan),2))))
* */
println("subjectReducedRDD: " + subjectReducedRDD.collect().toBuffer)
// 将学科分类后数据进行区粒度处理
val res: RDD[(String, List[((String, String), Int)])] = subjectReducedRDD.mapValues(iter => {
iter.toList.sortBy(-_._2).take(2)
})
/*
* res: ArrayBuffer((javaee,List(((javaee,laowang),3), ((javaee,xiaozhang),2))),
* (bigdata,List(((bigdata,laozhao),5), ((bigdata,laozhang),2))))
* */
println("res: " + res.collect().toBuffer)
sc.stop()
}
}
运行结果
res: ArrayBuffer((javaee,List(((javaee,laowang),3), ((javaee,xiaozhang),2))), (bigdata,List(((bigdata,laozhao),5), ((bigdata,laozhang),2))))
上述代码,有用到toList再排序,需要注意内存消耗以及数据量大小,过大则不适合这么处理。
2.2 代码二
val conf: SparkConf = new SparkConf().setAppName(TopNTest1.getClass.getSimpleName).setMaster("local[*]")
val sc = new SparkContext(conf)
// http://javaee.doit.cn/laowang
val rdd1: RDD[String] = sc.textFile("E:\\DOITLearning\\12.Spark\\teacherlog.txt")
val reducedRDD: RDD[((String, String), Int)] = rdd1.map(line => {
val strings: Array[String] = line.split("/+")
val domain: String = strings(1)
val name: String = strings(2)
val domainSplits: Array[String] = domain.split("\\.")
val subject: String = domainSplits(0)
((subject, name), 1)
}).reduceByKey(_ + _)
println("reducedRDD: " + reducedRDD.collect().toBuffer)
/**
* reducedRDD: ArrayBuffer(((javaee,xiaozhang),2), ((bigdata,laozhang),2),
* ((javaee,laowang),3), ((bigdata,laozhao),5), ((bigdata,laoduan),2))
**/
// 获取科目数组,注意需要去重
val subjects: Array[String] = reducedRDD.map(e => e._1._1).distinct().collect()
/**
* ArrayBuffer((javaee,CompactBuffer(((javaee,xiaozhang),2), ((javaee,laowang),3))),
* (bigdata,CompactBuffer(((bigdata,laozhang),2), ((bigdata,laozhao),5), ((bigdata,laoduan),2))))
**/
for (elem <- subjects) {
// 过滤出当前科目的所有数据
val filteredSubjectRDD: RDD[((String, String), Int)] = reducedRDD.filter(e => e._1._1.equals(elem)