wordcount实例
python版本
from pyspark import SparkConf, SparkContext
#创建SparkConf和SparkContext
conf = SparkConf().setMaster("local").setAppName("lichao-wordcount")
sc = SparkContext(conf=conf)
#输入的数据
data=["hello","world","hello","word","count","count","hello"]
#将Collection的data转化为spark中的rdd并进行操作
rdd=sc.parallelize(data)
resultRdd = rdd.map(lambda word: (word,1)).reduceByKey(lambda a,b:a+b)
#rdd转为collecton并打印
resultColl = resultRdd.collect()
for line in resultColl:
print line
#结束
sc.stop()
scala
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
object ScalaWordCount {
def main(args: Array[String]): Unit = {
//创建spark配置,设置应用程序名字
val conf = new SparkConf().setAppName("ScalaWordCount")
//创建spark执行的入口
val sc = new SparkContext(conf)
//指定以后从哪里读取数据创建RDD(弹性分布式数据集)
val lines:RDD[String]= sc.textFile(args(0))
//切分压平
val words:RDD[String] = lines.flatMap(_.split(" "))
//将单词和1组合
val wordAndOne:RDD[(String,Int)] = words.map((_, 1))
//按Key进行聚合
val reduced:RDD[(String,Int)] = wordAndOne.reduceByKey(_+_)
//排序
val sorted:RDD[(String,Int)] = reduced.sortBy(_._2,false)
//将结果保存到HDFS中
sorted.saveAsTextFile(args(1))
//释放资源
sc.stop()
}
练习题
python版本
data = [1,2,3,4,5]
testdata = sc.parallelize(data)
mulRDD = testdata.map(lambda x : x**2)
print(mulRDD.collect())
ordRDD = testdata.filter(lambda x :x % 2==0)
print(ordRDD.collect())
data2 = ['apple,data,we','bye','dog','cat']
testdata2 = sc.parallelize(data2)
splitRDD = testdata2.flatMap(lambda line:line.split(","))
print(splitRDD.collect())
data3 = [("c1",100),("c2",60),("c1",90),("c2",60),("c1",80)]
testdata3 = sc.parallelize(data3)
groupRDD = testdata3.groupByKey()
re = groupRDD.collect()
for i,y in re:
print(i)
for l in y:
print(l)
countRDD = testdata3.reduceByKey(lambda x,y:x+y)
print(countRDD.collect())
sortRDD = testdata3.map(lambda x:(x[1],x[0])).sortByKey().map(lambda x:(x[1],x[0]))
print(sortRDD.collect())
```