import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
/**
* coalesce(numPartitions) 算子:
* 将分区减少为numPartitions个。
*
* 代码的两次输出为:
* [0:a, 1:b, 2:c, 3:d]
* [0:a, 0:b, 1:c, 1:d]
*
*/
public class CoalesceDemo {
public static void main(String[] args) {
SparkConf conf = new SparkConf().setMaster("local").setAppName("spark");
JavaSparkContext sc = new JavaSparkContext(conf);
List<String> list = Arrays.asList("a","b","c","d");
JavaRDD<String> javaRDD = sc.parallelize(list,4);
//显示出之前的分区,并输出
JavaRDD<String> javaRDD1 = javaRDD.mapPartitionsWithIndex((integer, stringIterator) -> {
List<String> list1 =new ArrayList<>();
while (stringIterator.hasNext()) {
list1.add(integer + ":" + stringIterator.next());
}
return list1.iterator();
},false);
System.err.println(javaRDD1.collect());
//将分区由4个变为2个并输出
JavaRDD<String> javaRDD2 = javaRDD.coalesce(2);
JavaRDD<String> javaRDD3 = javaRDD2.mapPartitionsWithIndex((integer, stringIterator) -> {
List<String> list1 =new ArrayList<>();
while (stringIterator.hasNext()) {
list1.add(integer + ":" + stringIterator.next());
}
return list1.iterator();
},false);
System.err.println(javaRDD3.collect());
}
}
Java Spark算子:coalesce
最新推荐文章于 2023-04-20 12:48:35 发布