package TeacherTopN2 import org.apache.spark.{Partitioner, SparkConf, SparkContext} import org.apache.spark.rdd.RDD import scala.collection.mutable object TeacherN04 { def main(args: Array[String]): Unit = { val conf = new SparkConf().setAppName("Teacher").setMaster("local[*]") val sc = new SparkContext(conf) val lines = sc.textFile("F:\\spark\\计算每个学科最受欢迎Top2.log") val line: RDD[((String, String), Int)] = lines.map(it => { val strings = it.split("/") val teacher = strings(3) val subject = strings(2).split("[.]")(0) ((subject, teacher), 1) }) val reduceSub: RDD[((String, String), Int)] = line.reduceByKey(_ + _) val distSub: Array[String] = reduceSub.map(i => i._1._1).distinct().collect() // val partition = new MyTreeSetPartition(distSub) val partition = new MyPartition(distSub) val value2: RDD[((String, String), Int)] = reduceSub.partitionBy(partition) val value1: RDD[((String, String), Int)] = value2.mapPartitions(it => { implicit val value: Ordering[((String, String), Int)] = Ordering[Int].on[((String, String), Int)](i => -i._2) val tuples = new mutable.TreeSet[((String, String), Int)]() it.foreach(itt => { tuples += itt if (tuples.size > 2) { tuples -= tuples.last } }) tuples.iterator }) println(value1.collect().toBuffer) sc.stop() } } class MyTreeSetPartition(val distSub :Array[String]) extends Partitioner{ val stringToInt = new mutable.HashMap[String, Int]() var indexId=0 for(sub<-distSub){ stringToInt(sub)=indexId indexId+=1 } override def numPartitions: Int = distSub.length override def getPartition(key: Any): Int = { val tuple = key.asInstanceOf[(String, String)] val value = tuple._1 stringToInt(value) } }
TopN4
最新推荐文章于 2024-07-25 10:39:07 发布