pyspark 之Action算子
# -- coding: utf-8 --
# Author : ming
# time: 2022-03-09
from typing import Any, Union
import findspark
findspark.init()
from pyspark import SparkConf, SparkContext, RDD
conf = SparkConf().setAppName("transform_function").setMaster("local[*]")
sc = SparkContext(conf=conf)
"""
Action算子案例
"""
# 1. collect
rdd1 = sc.parallelize(range(0, 5))
print(rdd1.collect())
# 2. first: 取第一个元素
rdd2 = rdd1
first_num = rdd2.first()
print(first_num)
# 3. collectAsMap: 转换为dict
# note: 不要针对大量数据使用,因为这是将数据全部载入driver端,会爆内存
rdd3 = sc.parallelize([(1,2), (3,4)])
rdd3_dict = rdd3.collectAsMap()
print(rdd3_dict)
# 4. reduce
rdd4 = sc.parallelize(range(0,5))
rdd4_reduce = rdd4.reduce(lambda x, y: x+y)
print(rdd4_reduce)
# 5. countByKey
rdd5 = sc.parallelize([("a", 1), ("b", 2), ("a", 3), ("b", 3), ("c", 2)])
rdd5_count_by_key = rdd5.countByKey()
print(sorted(rdd5_count_by_key.items()))
# 6. countByValue 可以对rdd元素进行统计, 类似于python 中的Counter()
rdd6 = sc.parallelize([1,2,3,4,2,3,4,1,2,4,2,1,4,2])
rdd6_count_by_value = rdd6.countByValue()
print(sorted(rdd6_count_by_value.items()))
# 7. take 取前 N 个值
rdd7 = sc.parallelize(range(0,6))
print(rdd7.take(4))
# 8. savaAsTextFile: 保存文件
text_file = "file:///D:/study/pyspark/spark_learn/data/rdd.txt"
rdd8 = sc.parallelize(range(10))
rdd8.saveAsTextFile(text_file)
# 9. takeSample: 随机取数
# This method should only be used if the resulting array is expected to be small, as all the data is loaded into the driver's memory.
rdd9 = sc.parallelize([1,2,3,4,2,45,6,8,45,34,8,34,67,23,6,23,1])
rdd9_sample = rdd9.takeSample(True, 10, 0) # withReplacement: 是否有放回抽样 num: 提取的个数 seed:随机种子
print(rdd9_sample)
# 10. takeOrdered
rdd10 = sc.parallelize([1,2,3,2,4,6,7,4,5,3476,54,23,6,12,6512,1])
rdd10_take_order = rdd10.takeOrdered(6) # 按照key 进行升序
print(rdd10_take_order)
rdd10_take_order_reverse = rdd10.takeOrdered(6, lambda x: -x)
print(rdd10_take_order_reverse)
# 11. foreach
# from operator import add
rdd11 = sc.parallelize(range(5))
rdd11.foreach(print)