import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaSparkContext;
import scala.Tuple2;
import java.util.Arrays;
import java.util.List;
/**
* join(otherDataSet,[numPartitions])
* 第一个参数:另一个RDD
* 第二个参数:分区数,可省略。
* 按照key将两个RDD中进行汇总操作,会对每个key所对应的两个RDD中的数据进行笛卡尔积计算。
*
* 代码的输出结果为:[(d,(1,3)), (d,(2,3)), (t,(1,3)), (t,(1,4)), (t,(2,3)), (t,(2,4))]
*
*/
public class JoinDemo {
public static void main(String[] args) {
SparkConf conf = new SparkConf().setMaster("local").setAppName("spark");
JavaSparkContext sc = new JavaSparkContext(conf);
List<Tuple2<String, Integer>> list1 = Arrays.asList(
new Tuple2<>("t", 1),
new Tuple2<>("t", 2),
new Tuple2<>("d", 1),
new Tuple2<>("d", 2)
);
List<Tuple2<String, Integer>> list2 = Arrays.asList(
new Tuple2<>("t", 3),
new Tuple2<>("t", 4),
new Tuple2<>("d", 3)
);
JavaPairRDD<String, Integer> javaPairRDD = sc.parallelizePairs(list1);
JavaPairRDD<String, Integer> javaPairRDD1 = sc.parallelizePairs(list2);
//join算子
JavaPairRDD<String,Tuple2<Integer,Integer>> reduce = javaPairRDD.join(javaPairRDD1);
System.err.println(reduce.collect());
}
}
Java Spark算子:join
最新推荐文章于 2022-12-05 14:13:29 发布