简介
使用python调用spark的api接口很方便,代码简洁。
首先要安装python、pip、pyspark。
pip install pyspark
数据格式
words.txt
word1
word2
word3
...
统计词汇量
wordcount.py
# -*- coding:utf-8 -*-
from pyspark import SparkConf, SparkContext
conf = SparkConf().set("Spark.ui.showConsoleProgress", "false")
sc = SparkContext(master="spark://192.168.0.166:7077", appName="Spark word count", conf=conf)
data_file = sc.textFile("words.txt")
result = data_file.map(lambda x: x.split("\t")[0]).map(lambda x: (x, 1)).reduceByKey(lambda x, y: x+y)
print result.collect()
统计排行榜
# -*- coding:utf-8 -*-
from pyspark import SparkConf, SparkContext
import heapq
conf = SparkConf().set("Spark.ui.showConsoleProgress", "false")
sc = SparkContext(master="spark://192.168.0.166:7077", appName="Spark topk", conf=conf)
data_file = sc.textFile("words.txt")
result = data_file.map(lambda x: x.split("\t")[0]).map(lambda x: (x, 1)).reduceByKey(lambda x, y: x+y)
topn = 10
par_topk = result.mapPartitions(lambda elements: heapq.nlargest(topn, elements, key=lambda e: e[1]))
final_result = heapq.nlargest(topn, par_topk.collect(), key=lambda e: e[1])
print final_result
统计中位数
# -*- coding:utf-8 -*-
from pyspark import SparkConf, SparkContext
conf = SparkConf().set("Spark.ui.showConsoleProgress", "false")
sc = SparkContext(master="spark://192.168.0.166:7077", appName="Spark median", conf=conf)
sorted_array = sc.parallelize(range(100), 10)
# 对排序数组进行分组,分组的数量和数据量相关
group_element = sorted_array.map(lambda e: (e/10, e)).sortByKey()
# 统计每个分组的元素个数
group_element_count = sorted_array.map(lambda e: (e/10, 1)).reduceByKey(lambda x, y: x+y).sortByKey()
group_element_count_map = group_element_count.collectAsMap()
# 算出总的元素个数
element_count = group_element_count.map(lambda (k, v): v).sum()
temp = 0
index = 0
mid = 0
temp2 = 0
if element_count % 2 != 0:
mid=element_count / 2 + 1
else:
mid = element_count / 2
pcount = group_element_count.count()
for i in range(pcount):
temp = temp + group_element_count_map[i]
temp2 = temp - group_element_count_map[i]
if temp >= mid:
# 得到中位的index
index=i
break
offset = mid-temp2
result = group_element.filter(lambda (k, v): k == index).takeOrdered(offset)
print result