Spark之反转排序
关于反转排序的原理,可以参考之前写过的MapReduce之反转排序,这是尝试用Spark复现后的代码
package OrderInversion
import org.apache.spark.{SparkConf, SparkContext}
object OrderInversion {
def main(args: Array[String]): Unit = {
val sparkConf = new SparkConf().setAppName("RelativeFrequency").setMaster("local")
val sc = new SparkContext(sparkConf)
val neighborWindow = 2
val input = "input/OrderInversion.txt"
val output = "output"
//广播到其他节点,如果有的话
val broadcastWindow = sc.broadcast(neighborWindow)
val rawData = sc.textFile(input)
//将输入文本格式化(word,(neighbor,1))
val pairs = rawData.flatMap(line => {
val tokens = line.split("\\s")
for {
i <- 0 until tokens.length
start = if (i - broadcastWindow.value < 0) 0 else i - broadcastWindow.value
end = if (i + broadcastWindow.value >= tokens.length) tokens.length - 1 else i + broadcastWindow.value
j <- start to end if (j != i)
} yield (tokens(i), (tokens(j), 1))
})
//按照键进行统计(word,sum(word))
val totalByKey = pairs.map(t => (t._1, t._2._2)).reduceByKey(_ + _)
val grouped = pairs.groupByKey()
val uniquePairs = grouped.flatMapValues(_.groupBy(_._1).mapValues(_.unzip._2.sum))
//连接
val joined = uniquePairs join totalByKey
//计算
val orderInversion = joined.map(t => {
((t._1, t._2._1._1), (t._2._1._2.toDouble / t._2._2.toDouble))
})
val formatResult_tab_separated=orderInversion.map(t=>t._1._1+"\t"+t._1._2+"\t"+t._2)
formatResult_tab_separated.saveAsTextFile(output)
sc.stop()
}
}