package TeacherTopN2 import org.apache.spark.{SparkConf, SparkContext} import org.apache.spark.rdd.RDD import scala.collection.mutable object TeacherN05 { def main(args: Array[String]): Unit = { val conf = new SparkConf().setAppName("Teacher").setMaster("local[*]") val sc = new SparkContext(conf) val lines = sc.textFile("F:\\spark\\计算每个学科最受欢迎Top2.log") val line: RDD[((String, String), Int)] = lines.map(it => { val strings = it.split("/") val teacher = strings(3) val subject = strings(2).split("[.]")(0) ((subject, teacher), 1) }) val subDis: Array[String] = line.map(i => i._1._1).distinct().collect() val partition: MyTreeSetPartition = new MyTreeSetPartition(subDis) //使用reduceByKey进行分区,加聚合 val rdd1: RDD[((String, String), Int)] = line.reduceByKey(partition, _ + _) val value1: RDD[((String, String), Int)] = rdd1.mapPartitions(it => { implicit val value = Ordering[Int].on[((String, String), Int)](i => -i._2) val tuples = new mutable.TreeSet[((String, String), Int)]() //因为mapPartition只有出发Action才执行,用map不行,只会返回空的迭代器,所有用foreach每条遍历出来 it.foreach(i => { tuples += i if (tuples.size > 2) { tuples -= tuples.last } }) tuples.iterator }) println(value1.collect().toBuffer) } }
top5
最新推荐文章于 2024-07-25 10:39:07 发布