Scala实现简单的PageRank
val sc = new SparkContext(...) //spark-shell默认已创建好sc
val links = sc.parallelize(Array(('A',Array('D')),('B',Array('A')),('C',Array('A','B')),('D',Array('A','C'))),2).map(x => (x._1,x._2)).cache()
var ranks = sc.parallelize(Array(('A',1.0),('B',1.0),('C',1.0),('D',1.0)),2)
val iterations = 10
for(i <- 1 to iterations){
val contribs = links.join(ranks,2).flatMap{
case(url,(links,rank)) => links.map(dest => (dest,rank/links,size))
}
ranks = contribs.reduceByKey(_ + _,2).mapValues(0.15 + 0.85 * _)
}
//ranks.take(1) 可直接查看结果
ranks.saveAsTextFile(...) //“hdfs://localhost:9000/user/”没有报错,卡死,hdfs没有存储文件
//重启spark-shell,必须kill -9 5525(我电脑的id) 否则报错:
<console>:16: error: not found: value sqlContext
import sqlContext.implicits._
^
<console>:16: error: not found: value sqlContext
import sqlContext.sql
val sc = new SparkContext(...) //spark-shell默认已创建好sc
val links = sc.parallelize(Array(('A',Array('D')),('B',Array('A')),('C',Array('A','B')),('D',Array('A','C'))),2).map(x => (x._1,x._2)).cache()
var ranks = sc.parallelize(Array(('A',1.0),('B',1.0),('C',1.0),('D',1.0)),2)
val iterations = 10
for(i <- 1 to iterations){
val contribs = links.join(ranks,2).flatMap{
case(url,(links,rank)) => links.map(dest => (dest,rank/links,size))
}
ranks = contribs.reduceByKey(_ + _,2).mapValues(0.15 + 0.85 * _)
}
//ranks.take(1) 可直接查看结果
ranks.saveAsTextFile(...) //“hdfs://localhost:9000/user/”没有报错,卡死,hdfs没有存储文件
//重启spark-shell,必须kill -9 5525(我电脑的id) 否则报错:
<console>:16: error: not found: value sqlContext
import sqlContext.implicits._
^
<console>:16: error: not found: value sqlContext
import sqlContext.sql
小结:RDD还是蛮灵活的,Scala语言还要再走一遍。
以上为参考书籍的一点体会,可参考以下两篇博客:
http://blog.csdn.net/wzy0623/article/details/51383232
http://blog.csdn.net/li385805776/article/details/19760663