通过自定义分区的方式,实现对点击流日志统计,并取出每个模块中点击排行前三的链接。
点击日志样本:
20160321101954 http://java.study.163.com/java/course/javaee.shtml20160321101954 http://java.study.163.com/java/course/android.shtml
20160321101954 http://java.study.163.com/java/video.shtml
20160321101954 http://java.study.163.com/java/teacher.shtml
20160321101954 http://java.study.163.com/java/course/android.shtml
20160321101954 http://php.study.163.com/php/teacher.shtml
20160321101954 http://net.study.163.com/net/teacher.shtml
/**
* Created by zn on 2017/5/4.
*/
object UrlCountPartition {
def main(args: Array[String]) {
val conf = new SparkConf().setAppName("UrlCountPartition").setMaster("local[2]")
val sc = new SparkContext(conf)
//rdd1将数据切分,元组中放的是(URL, 1)
val rdd1 = sc.textFile("/Users/ning/Downloads/wangyiyun.log").map(line => {
val f = line.split("\t")
(f(1), 1)
})
val rdd2 = rdd1.reduceByKey(_ + _)
val rdd3 = rdd2.map(t => {
val url = t._1
val host = new URL(url).getHost
(host, (url, t._2))
})
val ints = rdd3.map(_._1).distinct().collect()
//使用自定义分区
val hostParitioner = new HostParitioner(ints)
// val rdd4 = rdd3.partitionBy(new HashPartitioner(ints.length))
val rdd4 = rdd3.partitionBy(hostParitioner).mapPartitions(it => {
it.toList.sortBy(_._2._2).reverse.take(2).iterator
})
rdd4.saveAsTextFile("/Users/ning/Downloads/out.spark")
//println(rdd4.collect().toBuffer)
sc.stop()
}
}
/**
* 决定了数据到哪个分区里面
* @param ins
*/
class HostParitioner(ins: Array[String]) extends Partitioner {
val parMap = new mutable.HashMap[String, Int]()
var count = 0
for(i <- ins){
parMap += (i -> count)
count += 1
}
override def numPartitions: Int = ins.length
override def getPartition(key: Any): Int = {
parMap.getOrElse(key.toString, 0)
}
}