package com.meng.nan.day720
import java.util.Comparator
import java.util.function.ToIntFunction
import org.apache.log4j.{Level, Logger}
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import scala.reflect.ClassTag
object SortClass {
/**
*
* spark的普通排序
* sortByKey(只能处理那些[Key,Value])按照key进行排序Transformation算子
* sortBy(底层还是sortByKey)可以处理没有key的数据action算子
*/
def main(args: Array[String]): Unit = {
Logger.getLogger("org.apache.spark").setLevel(Level.WARN)
Logger.getLogger("org.apache.hadoop").setLevel(Level.WARN)
Logger.getLogger("org.spark_project").setLevel(Level.WARN)
val conf=new SparkConf()
.setAppName("SortClass")
.setMaster("local[*]")
val sc=new SparkContext(conf)
//sbk(sc)
sb(sc)
}
def sbk(sc:SparkContext): Unit ={
val list=List(
"hello you",
"i hate you",
"i miss you",
"i love you",
"fuck you"
)
val words=sc.parallelize(list).flatMap(_.split("\\s+"))
val ret=words.map((_,1)).reduceByKey(_+_)
//默认升序排序
ret.map{case (key,count)=>(count,key)}//按照count进行排序(case (key,count)=>(count,key)count和key进行换位置)
.sortByKey(false,1)//false降序,分区个数为1个
.map{case (count,key)=>(key,count)}//(.map{case (count,key)=>(key,count)}count和key进行换位置)
.foreach(println)
ret.sortByKey(numPartitions =1).foreach(println)//按key进行排序numPartitions =1分区个数
}
def sb(sc:SparkContext): Unit ={
val list=List(
"hello you",
"i hate you",
"i miss you",
"i love you",
"fuck you"
)
val words=sc.parallelize(list).flatMap(_.split("\\s+"))
val ret=words.map((_,1)).reduceByKey(_+_)
自定义排序
// val sortedRDD:RDD[(String,Int)]=ret.sortBy(t=>t._2,true,1)(
// new Ordering[Int](){
// override def compare(x: Int, y: Int): Int = {
// y.compareTo(x)
// }
// },ClassTag.Int.asInstanceOf[ClassTag[Int]]
//)
// sortedRDD.foreach(println)
//
// println("===============")
val sortedRDD1 = ret.sortBy(t => t, true, 1)(
new Ordering[(String,Int)](){
override def compare(x: (String, Int), y: (String, Int)) = {
var ret = y._2.compareTo(x._2)
if (ret == 0) {
ret = y._1.compareTo(x._1)
}
ret
}
},
ClassTag.Object.asInstanceOf[ClassTag[(String, Int)]]
)
sortedRDD1.foreach(println)
sc.stop()
}
}
sparka排序算法
最新推荐文章于 2021-10-31 23:32:50 发布