PySpark的RDD,其中parallelize、map、collect、lambda、groupByKey、distinct、count、reduce
## RDD的基本操作
## 建立第一个RDD --- sparkContext
wordsList = ['cat','elephant','rat','cat']
wordsRDD = sc.parallelize(wordsList,4)
print(type(wordsRDD))
## map函数 --- 将function 套用到rdd中的每个元素
def makePlural(word):
return word + 's'
## 调用该函数
print(makePlural('cat)) --- cats
appliedRDD = wordsRDD.map(makePlural)
## collect ---- 将RDD元素送回master并转换为list
print(appliedRDD.collect())
print(type(appliedRDD.collect())) --list
## 使用lambda函数 --- lambda函数逻辑
lambdaRDD = wordsRDD.map(lambda word: word + 's')
print(appliedRDD.collect())
## 使用成对rdd来做计算 -- (key,values)存储RDD
[('key',value),('key2',value2)]
pairRDD = wordsRDD.map(lambda word: (word,1))
print(pairRDD)
print(pairRDD.take(1))
pairRDD.collect()
## 将资料依照key重新排序
wordsGrouped = pairRDD.groupByKey()
for key,value in wordsGrouped.collect():
print({0}:{1}.format(key,list(value)))
## 依照key值加总
wordCountsGrouped = wordsGrouped.map(lambda (k,v):(k,sum(v)))
wordCountsGrouped.collect()
## 寻找不重复值 --distinct
uniquewords = wordRDD.map(lambda word:(word,1)).distinct()
print(uniquewords.collect())
## count
countuniquewords = wordRDD.map(lambda word:(word,1)).distinct().count()
print(countuniquewords)
## 计算每个字平均出现几次
wordscounts = {('cat',2),('elephant',1),('rat',2)}
wordCountRDD = sc.parallelize(wordCounts)
## map -- 找到值,reduce--求和
totalCount = wordCountRDD.map(lambda (x,y):y).reduce(lambda x,y:x+y)
average = totalCount / wordCountRDD.distinct().count()
print(average)