Spark2.x之RDD支持java8 lambda表达式

1、非lambda实现的java spark wordcount程序

public class WordCount {
    public static void main(String[] args) {
        SparkConf conf = new SparkConf().setAppName("appName").setMaster("local");
        JavaSparkContext sc = new JavaSparkContext(conf);

        //JavaPairRDD<LongWritable, Text> inputRDD = sc.hadoopFile("hdfs://master:9999/user/word.txt",
        //        TextInputFormat.class, LongWritable.class, Text.class);

        JavaRDD<String> inputRDD = sc.textFile("file:///Users/tangweiqun/test.txt");

        JavaRDD<String> wordsRDD = inputRDD.flatMap(new FlatMapFunction<String, String>() {
            @Override
            public Iterator<String> call(String s) throws Exception {
                return Arrays.asList(s.split(" ")).iterator();
            }
        });

        JavaPairRDD<String, Integer> keyValueWordsRDD
                = wordsRDD.mapToPair(new PairFunction<String, String, Integer>() {
            @Override
            public Tuple2<String, Integer> call(String s) throws Exception {
                return new Tuple2<String, Integer>(s, 1);
            }
        });

        JavaPairRDD<String, Integer> wordCountRDD =
                keyValueWordsRDD.reduceByKey(new HashPartitioner(2),
                        new Function2<Integer, Integer, Integer>() {
            @Override
            public Integer call(Integer a, Integer b) throws Exception {
                return a + b;
            }
        });

        //如果输出文件存在的话需要删除掉
        File outputFile = new File("/Users/tangweiqun/wordcount");
        if (outputFile.exists()) {
            File[] files = outputFile.listFiles();
            for(File file: files) {
                file.delete();
            }
            outputFile.delete();
        }

        wordCountRDD.saveAsTextFile("file:///Users/tangweiqun/wordcount");

        System.out.println(wordCountRDD.collect());
    }
}

2、java8 lambda实现的wordcount代码

public class WordCount {

    public static void main(String[] args) {
        SparkConf conf = new SparkConf().setAppName("appName").setMaster("local");
        JavaSparkContext sc = new JavaSparkContext(conf);

        //JavaPairRDD<LongWritable, Text> inputRDD = sc.hadoopFile("hdfs://master:9999/user/word.txt",
        //        TextInputFormat.class, LongWritable.class, Text.class);

        JavaRDD<String> inputRDD = sc.textFile("file:///Users/tangweiqun/test.txt");

        JavaRDD<String> wordsRDD = inputRDD.flatMap(input -> Arrays.asList(input.split(" ")).iterator());

        JavaPairRDD<String, Integer> keyValueWordsRDD
                = wordsRDD.mapToPair(word -> new Tuple2<String, Integer>(word, 1));

        JavaPairRDD<String, Integer> wordCountRDD = keyValueWordsRDD.reduceByKey((a, b) -> a + b);

        //如果输出文件存在的话需要删除掉
        File outputFile = new File("/Users/tangweiqun/wordcount");
        if (outputFile.exists()) {
            File[] files = outputFile.listFiles();
            for(File file: files) {
                file.delete();
            }
            outputFile.delete();
        }

        wordCountRDD.saveAsTextFile("file:///Users/tangweiqun/wordcount");

        System.out.println(wordCountRDD.collect());
    }
}

从上面可以看出,lambda的实现更加简洁,也可以看出一个lambda函数表达式就是一个java接口。

 

参考自http://blog.51cto.com/7639240/1966958《spark2.x由浅入深深到底系列六之RDD java api详解三》中combineByKey,如下的代码:

JavaPairRDD<String, Integer> javaPairRDD =
        sc.parallelizePairs(Arrays.asList(new Tuple2("coffee", 1), new Tuple2("coffee", 2),
                new Tuple2("panda", 3), new Tuple2("coffee", 9)), 2);
 
//当在一个分区中遇到新的key的时候,对这个key对应的value应用这个函数
Function<Integer, Tuple2<Integer, Integer>> createCombiner = new Function<Integer, Tuple2<Integer, Integer>>() {
    @Override
    public Tuple2<Integer, Integer> call(Integer value) throws Exception {
        return new Tuple2<>(value, 1);
    }
};
//当在一个分区中遇到已经应用过上面createCombiner函数的key的时候,对这个key对应的value应用这个函数
Function2<Tuple2<Integer, Integer>, Integer, Tuple2<Integer, Integer>> mergeValue =
        new Function2<Tuple2<Integer, Integer>, Integer, Tuple2<Integer, Integer>>() {
            @Override
            public Tuple2<Integer, Integer> call(Tuple2<Integer, Integer> acc, Integer value) throws Exception {
                return new Tuple2<>(acc._1() + value, acc._2() + 1);
            }
        };
//当需要对不同分区的数据进行聚合的时候应用这个函数
Function2<Tuple2<Integer, Integer>, Tuple2<Integer, Integer>, Tuple2<Integer, Integer>> mergeCombiners =
        new Function2<Tuple2<Integer, Integer>, Tuple2<Integer, Integer>, Tuple2<Integer, Integer>>() {
            @Override
            public Tuple2<Integer, Integer> call(Tuple2<Integer, Integer> acc1, Tuple2<Integer, Integer> acc2) throws Exception {
                return new Tuple2<>(acc1._1() + acc2._1(), acc1._2() + acc2._2());
            }
        };
 
JavaPairRDD<String, Tuple2<Integer, Integer>> combineByKeyRDD =
        javaPairRDD.combineByKey(createCombiner, mergeValue, mergeCombiners);
//结果:[(coffee,(12,3)), (panda,(3,1))]
System.out.println("combineByKeyRDD = " + combineByKeyRDD.collect());

可以写成如下的lambda实现的combineByKey:

JavaPairRDD<String, Integer> javaPairRDD =
        sc.parallelizePairs(Arrays.asList(new Tuple2("coffee", 1), new Tuple2("coffee", 2),
                new Tuple2("panda", 3), new Tuple2("coffee", 9)), 2);
//当在一个分区中遇到新的key的时候,对这个key对应的value应用这个函数
Function<Integer, Tuple2<Integer, Integer>> createCombiner = value -> new Tuple2<>(value, 1);
//当在一个分区中遇到已经应用过上面createCombiner函数的key的时候,对这个key对应的value应用这个函数
Function2<Tuple2<Integer, Integer>, Integer, Tuple2<Integer, Integer>> mergeValue = (acc, value) ->new Tuple2<>(acc._1() + value, acc._2() + 1);
//当需要对不同分区的数据进行聚合的时候应用这个函数
Function2<Tuple2<Integer, Integer>, Tuple2<Integer, Integer>, Tuple2<Integer, Integer>> mergeCombiners = (acc1, acc2) -> new Tuple2<>(acc1._1() + acc2._1(), acc1._2() + acc2._2());

JavaPairRDD<String, Tuple2<Integer, Integer>> combineByKeyRDD =
        javaPairRDD.combineByKey(createCombiner, mergeValue, mergeCombiners);
//结果:[(coffee,(12,3)), (panda,(3,1))]
System.out.println("combineByKeyRDD = " + combineByKeyRDD.collect());

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值