collect、 reduce、take、count方法
from pyspark import SparkConf, SparkContext
import os
# 1.让PySpark知道Python的解释器位置
os.environ['PYSPARK_PYTHON'] = "C:/Python310/python.exe"
conf = SparkConf().setMaster("local[*]").setAppName("test_spark")
sc = SparkContext(conf=conf)
rdd = sc.parallelize([1, 2, 3, 4, 5])
# collect 算子输出RDD为list对象
rdd_list: list = rdd.collect()
print(rdd_list)
print(type(rdd_list))
# reduce 算子,对RDD进行两两聚合
num = rdd.reduce(lambda a, b: a + b) # 1+2+3+4+5
print(num)
# take算子。取出RDD前N个元素,组成list返回
take_list = rdd.take(3)
print(take_list)
# count 统计rdd内有多少条数据,返回为数值
num_count = rdd.count()
print(f"rdd内有{num_count}个元素")
sc.stop()
输出结果:
[1, 2, 3, 4, 5]
<class ‘list’>
15
[1, 2, 3]
rdd内有5个元素