第1关:集合并行化创建RDD
import org.apache.spark.{SparkConf, SparkContext}
object Student {
def main(args: Array[String]): Unit = {
val conf = new SparkConf().setAppName(this.getClass.getSimpleName).setMaster("local")
val sc = new SparkContext(conf)
val arr1=Array(("bj",88),("sh",67),("gz",92))
val arr2=Array(("bj",94),("sh",85),("gz",95))
val arr3=Array(("bj",72),("sh",69),("gz",98))
/********** begin **********/
//第一步:先将数组进行合并
val arr = arr1++arr2++arr3
//第二步:创建RDD
val stuRDD = sc.makeRDD(arr)
//第三步:把相同key的进行聚合
val result = stuRDD.reduceByKey(_+_)
//第四步:输出
result.foreach(println)
/********** end **********/
sc.stop()
}
}
第2关:读取外部数据集创建RDD
import org.apache.spark.rdd.RDD
import org.apache.spark.SparkContext
import org.apache.spark.SparkConf
object Teachers {
def main(args: Array[String]): Unit = {
val conf = new SparkConf().setAppName("Teachers").setMaster("local")
val sc = new SparkContext(conf)
val dataFile = "file:///root/step2_files"
/********** begin **********/
//第一步:以外部文件方式创建RDD
val teaRDD = sc.textFile(dataFile)
//第二步:将文件中每行的数据切分,得到自己想要的返回值
val teacher = teaRDD.map(line => {
val course = line.split(",")(0)
val name = line.split(",")(1)
(name,1)
})
//第三步:将相同的key进行聚合
val tea= teacher.reduceByKey(_+_)
//第四步:按出现次数进行降序
val result=tea.sortBy(_._2,false)
//第五步:输出
result.foreach(println)
/********** end **********/
sc.stop()
}
}