import org.apache.spark.{SparkConf, SparkContext}
object Paixu {
def main(args: Array[String]): Unit = {
val conf=new SparkConf().setMaster("local[3]").setAppName("paixu")
val sc=new SparkContext(conf)
val array=Array("xiaoming 22 40","zhangsan 20 30","xiaolan 18 60")
val rdd1=sc.parallelize(array)
val rdd2=rdd1.map(x=>{
val fields=x.split(" ")
new User(fields(0),fields(1).toInt,fields(2).toInt)
})
val r=rdd2.sortBy(x=>x).collect()
// val r=rdd2.collect()
println(r.toBuffer)
sc.stop()
}
}
class User(val name: String, val age: Int, val fv: Int) extends Ordered[User] with Serializable {
override def compare(that: User): Int = {
if(this.fv == that.fv) {
this.age - that.age
} else {
-(this.fv - that.fv)
}
}
override def toString: String = s"name: $name, age: $age, fv: $fv"
}
重点是要继承,重写序列化和比较函数