Spark RDD算子(十二)RDD分区操作mapPartitions、mapPartitionsWithIndex
mapPartitions
- mapPartitions可以倒过来理解,先partition,再把每个partition进行map函数
- 适用场景
如果在映射的过程中需要频繁创建额外的对象,使用mapPartitions要比map高效的多
比如,将RDD中的所有数据通过JDBC连接写入数据库,如果使用map函数,可能要为每一个元素都创建一个connection,这样开销很大。如果使用mapPartitions,那么只需要针对每一个分区建立一个connection
把每一个元素变成map(i,i*i)
Scala版本
package nj.zb.sparkstu
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
object mapPartitions {
def main(args: Array[String]): Unit = {
val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("mapPartitions")
val sc: SparkContext = new SparkContext(conf)
val rdd1: RDD[Int] = sc.parallelize(List(1,2,3,4,5,6,7,8,9,10))
def fun(iter:Iterator[Int]):Iterator[(Int,Int)]={
var res=List[(Int,Int)]()
while (iter.hasNext){
val cur: Int = iter.next
res=res.::(cur,cur*cur)
}
res.iterator
}
val rdd2: RDD[(Int, Int)] = rdd1.mapPartitions(fun)
rdd2.collect.foreach(println)
}
}
结果展示:
Java版本
package nj.zb.sparkstu;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.VoidFunction;
import scala.Tuple2;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
public class mapPartitionsJava {
public static void main(String[] args) {
SparkConf conf = new SparkConf().setMaster("local[*]").setAppName("mapPartitionsJava");
JavaSparkContext sc = new JavaSparkContext(conf);
JavaRDD<Integer> rdd1 = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
JavaRDD<Tuple2<Integer, Integer>> tuple2JavaRDD = rdd1.mapPartitions(new FlatMapFunction<Iterator<Integer>, Tuple2<Integer, Integer>>() {
@Override
public Iterator<Tuple2<Integer, Integer>> call(Iterator<Integer> it) throws Exception {
ArrayList<Tuple2<Integer, Integer>> tuple2s = new ArrayList<>();
while (it.hasNext()) {
Integer next = it.next();
tuple2s.add(new Tuple2<Integer, Integer>(next, next * next));
}
return (Iterator<Tuple2<Integer, Integer>>) tuple2s;
}
});
tuple2JavaRDD.foreach(new VoidFunction<Tuple2<Integer, Integer>>() {
@Override
public void call(Tuple2<Integer, Integer> tp2) throws Exception {
System.out.println(tp2);
}
});
}
}
把(i,j) 变成(i,j*j)
Scala版本
package nj.zb.sparkstu
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
object mapPartitions2 {
def main(args: Array[String]): Unit = {
val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("mapPartitionsScala")
val sc: SparkContext = new SparkContext(conf)
val rdd1: RDD[(Int, Int)] = sc.parallelize(List((1,1),(1,2),(1,3),(2,1),(2,2),(2,3)))
def fun(iter:Iterator[(Int,Int)]):Iterator[(Int,Int)]={
var res = List[(Int, Int)]()
while (iter.hasNext){
val cur= iter.next
res=res.::(cur._1,cur._2*cur._2)
}
res.iterator
}
val rdd2: RDD[(Int, Int)] = rdd1.mapPartitions(fun)
rdd2.collect.foreach(println)
}
}
结果展示:
Java版本
package nj.zb.sparkstu;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.VoidFunction;
import scala.Tuple2;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
public class mapPartitions2Java {
public static void main(String[] args) {
SparkConf conf = new SparkConf().setMaster("local[*]").setAppName("mapPartitionsJava");
JavaSparkContext sc = new JavaSparkContext(conf);
JavaRDD<Tuple2<Integer, Integer>> rdd1 = sc.parallelize(Arrays.asList(
new Tuple2<Integer, Integer>(1, 1),
new Tuple2<Integer, Integer>(1, 2),
new Tuple2<Integer, Integer>(1, 3),
new Tuple2<Integer, Integer>(2, 1),
new Tuple2<Integer, Integer>(2, 2),
new Tuple2<Integer, Integer>(2, 3)));
JavaPairRDD<Integer, Integer> rdd2 = JavaPairRDD.fromJavaRDD(rdd1);
JavaRDD<Tuple2<Integer, Integer>> rdd3 = rdd2.mapPartitions(new FlatMapFunction<Iterator<Tuple2<Integer, Integer>>, Tuple2<Integer, Integer>>() {
@Override
public Iterator<Tuple2<Integer, Integer>> call(Iterator<Tuple2<Integer, Integer>> tuple2Iterator) throws Exception {
ArrayList<Tuple2<Integer, Integer>> tuple2s = new ArrayList<>();
while (tuple2Iterator.hasNext()) {
Tuple2<Integer, Integer> next = tuple2Iterator.next();
tuple2s.add(new Tuple2<Integer, Integer>(next._1, next._2 * next._2));
}
return (Iterator<Tuple2<Integer, Integer>>) tuple2s;
}
});
rdd3.foreach(new VoidFunction<Tuple2<Integer, Integer>>() {
@Override
public void call(Tuple2<Integer, Integer> integerIntegerTuple2) throws Exception {
System.out.println(integerIntegerTuple2);
}
});
}
}
把每一个元素平方
Java版本
package nj.zb.sparkstu;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.VoidFunction;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
public class mapPartitions3Java {
public static void main(String[] args) {
SparkConf conf = new SparkConf().setMaster("local[*]").setAppName("mapPartitionsJava");
JavaSparkContext sc = new JavaSparkContext(conf);
JavaRDD<Integer> rdd1 = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
JavaRDD<Integer> rdd2 = rdd1.mapPartitions(new FlatMapFunction<Iterator<Integer>, Integer>() {
@Override
public Iterator<Integer> call(Iterator<Integer> integerIterator) throws Exception {
ArrayList<Integer> results = new ArrayList<>();
while (integerIterator.hasNext()) {
Integer i = integerIterator.next();
results.add(i * i);
}
for (Integer a :
results) {
System.out.println(a);
}
return (Iterator<Integer>) results;
}
});
rdd2.foreach(new VoidFunction<Integer>() {
@Override
public void call(Integer integer) throws Exception {
System.out.println(integer);
}
});
}
}
mapPartitionsWithIndex
- 与mapPartitions类似,也是按照分区进行的map操作,不过mapPartitionsWithIndex传入的参数多个一个分区的值
统计各个分区中的元素
Scala版本
package nj.zb.kb09.gaoji
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
object mapPartitionsWithIndex {
def main(args: Array[String]): Unit = {
val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("mapPartitionsWithIndex")
val sc: SparkContext = new SparkContext(conf)
val rdd1: RDD[Int] = sc.parallelize(List(1,2,3,4,5,6,7,8,9,10))
def fun(i:Int,iter:Iterator[Int]):Iterator[(Int,Int)]={
var res = List[(Int,Int)]()
while(iter.hasNext){
val next = iter.next()
res=res.::(i,next)
}
res.iterator
}
val rdd2: RDD[(Int, Int)] = rdd1.mapPartitionsWithIndex(fun)
rdd2.collect.foreach(println)
}
}
结果展示:
Java版本
package nj.zb.sparkstu;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.VoidFunction;
import scala.Tuple2;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
public class mapPartitionsWithIndex {
public static void main(String[] args) {
SparkConf conf = new SparkConf().setMaster("local[*]").setAppName("mapPartitionsWithIndexJava");
JavaSparkContext sc = new JavaSparkContext(conf);
JavaRDD<Integer> rdd1 = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
JavaRDD<Tuple2<Integer, Integer>> tuple2JavaRDD = rdd1.mapPartitionsWithIndex(new Function2<Integer, Iterator<Integer>, Iterator<Tuple2<Integer, Integer>>>() {
@Override
public Iterator<Tuple2<Integer, Integer>> call(Integer partIndex, Iterator<Integer> it) throws Exception {
ArrayList<Tuple2<Integer, Integer>> tuple2s = new ArrayList<>();
while (it.hasNext()) {
int next = it.next();
tuple2s.add(new Tuple2<>(partIndex, next));
}
return tuple2s.iterator();
}
}, false);
tuple2JavaRDD.foreach(new VoidFunction<Tuple2<Integer, Integer>>() {
@Override
public void call(Tuple2<Integer, Integer> tp2) throws Exception {
System.out.println(tp2);
}
});
}
}
结果展示:
统计键值对中的各个分区的元素
Scala版本
package nj.zb.sparkstu
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
object mapPartitionsWithIndex2 {
def main(args: Array[String]): Unit = {
val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("mapPartitionsWithIndex")
val sc: SparkContext = new SparkContext(conf)
val rdd1: RDD[(Int, Int)] = sc.parallelize(List((1,1),(1,2),(2,3),(2,4),(3,5),(3,6),(4,7),(4,8),(5,9),(5,10)))
def fun(i:Int,iter:Iterator[(Int,Int)]):Iterator[(Int, (Int, Int))]={
var res = List[(Int,(Int,Int))]()
while (iter.hasNext){
var next = iter.next()
res=res.::(i,next)
}
res.iterator
}
val rdd2: RDD[(Int, (Int, Int))] = rdd1.mapPartitionsWithIndex(fun)
rdd2.collect.foreach(println)
}
}
结果展示:
Java版本
package nj.zb.sparkstu;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.VoidFunction;
import scala.Tuple2;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
public class mapPartitionsWithIndex2Java {
public static void main(String[] args) {
SparkConf conf = new SparkConf().setMaster("local[*]").setAppName("mapPartitionsWithIndex");
JavaSparkContext sc = new JavaSparkContext(conf);
JavaRDD<Tuple2<Integer, Integer>> rdd = sc.parallelize(Arrays.asList(new Tuple2<Integer, Integer>(1, 1), new Tuple2<Integer, Integer>(1, 2)
, new Tuple2<Integer, Integer>(2, 3), new Tuple2<Integer, Integer>(2, 4)
, new Tuple2<Integer, Integer>(3, 5), new Tuple2<Integer, Integer>(3, 6)
, new Tuple2<Integer, Integer>(4, 7), new Tuple2<Integer, Integer>(4, 8)
, new Tuple2<Integer, Integer>(5, 9), new Tuple2<Integer, Integer>(5, 10)
), 3);
JavaPairRDD<Integer, Integer> pairRDD = JavaPairRDD.fromJavaRDD(rdd);
JavaRDD<Tuple2<Integer, Tuple2<Integer, Integer>>> mapPartitionIndexRDD = pairRDD.mapPartitionsWithIndex(new Function2<Integer, Iterator<Tuple2<Integer, Integer>>, Iterator<Tuple2<Integer, Tuple2<Integer, Integer>>>>() {
@Override
public Iterator<Tuple2<Integer, Tuple2<Integer, Integer>>> call(Integer partIndex, Iterator<Tuple2<Integer, Integer>> tuple2Iterator) {
ArrayList<Tuple2<Integer, Tuple2<Integer, Integer>>> tuple2s = new ArrayList<>();
while (tuple2Iterator.hasNext()) {
Tuple2<Integer, Integer> next = tuple2Iterator.next();
tuple2s.add(new Tuple2<Integer, Tuple2<Integer, Integer>>(partIndex, next));
}
return tuple2s.iterator();
}
}, false);
mapPartitionIndexRDD.foreach(new VoidFunction<Tuple2<Integer, Tuple2<Integer, Integer>>>() {
@Override
public void call(Tuple2<Integer, Tuple2<Integer, Integer>> integerTuple2Tuple2) throws Exception {
System.out.println(integerTuple2Tuple2);
}
});
}
}
结果展示:
补充:打印各个分区的操作,可以使用glom方法
package nj.zb.sparkstu;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.VoidFunction;
import scala.Tuple2;
import java.util.Arrays;
import java.util.List;
public class glom {
public static void main(String[] args) {
SparkConf conf = new SparkConf().setMaster("local[*]").setAppName("glom");
JavaSparkContext sc = new JavaSparkContext(conf);
JavaRDD<Tuple2<Integer, Integer>> rdd1 = sc.parallelize(Arrays.asList(new Tuple2<Integer, Integer>(1, 1), new Tuple2<Integer, Integer>(1, 2)
, new Tuple2<Integer, Integer>(2, 3), new Tuple2<Integer, Integer>(2, 4)
, new Tuple2<Integer, Integer>(3, 5), new Tuple2<Integer, Integer>(3, 6)
, new Tuple2<Integer, Integer>(4, 7), new Tuple2<Integer, Integer>(4, 8)
, new Tuple2<Integer, Integer>(5, 9), new Tuple2<Integer, Integer>(5, 10)));
JavaPairRDD<Integer, Integer> pairRDD = JavaPairRDD.fromJavaRDD(rdd1);
/*补充:打印各个分区的操作,可以使用 glom 的方法*/
System.out.println("打印各个分区的操作,可以使用 glom 的方法");
JavaRDD<List<Tuple2<Integer, Integer>>> glom = pairRDD.glom();
glom.foreach(new VoidFunction<List<Tuple2<Integer, Integer>>>() {
@Override
public void call(List<Tuple2<Integer, Integer>> tuple2s) throws Exception {
System.out.println(tuple2s);
}
});
}
}
结果展示: