累加器 java_Spark累加器

package SparkStreaming;

import org.apache.commons.collections.iterators.ArrayListIterator;

import org.apache.commons.io.LineIterator;

import org.apache.spark.Accumulator;

import org.apache.spark.SparkConf;

import org.apache.spark.api.java.JavaPairRDD;

import org.apache.spark.api.java.JavaRDD;

import org.apache.spark.api.java.JavaSparkContext;

import org.apache.spark.api.java.function.FlatMapFunction;

import org.apache.spark.api.java.function.Function;

import org.apache.spark.api.java.function.Function2;

import org.apache.spark.api.java.function.PairFunction;

import scala.Tuple2;

import java.util.Iterator;

import java.util.List;public classtotalization_device {public static voidmain(String[] args) {

SparkConf conf= newSparkConf()

.setMaster("local[2]")

.setAppName("totalization_device");

JavaSparkContext sc= newJavaSparkContext(conf);/** 定义一个累加器

**/Accumulator accumulator = sc.accumulator(0);

JavaRDD fileRDD = sc.textFile("E:/2018_cnic/learn/wordcount.txt");

JavaRDD fileRDD1 = fileRDD.flatMap(new FlatMapFunction() {

@Overridepublic Iteratorcall(String s) throws Exception {

accumulator.add(1);return new ArrayListIterator(s.split(" "));

}

});

JavaPairRDD pairRDD = fileRDD1.mapToPair(new PairFunction() {

@Overridepublic Tuple2call(String s) throws Exception {return new Tuple2(s,1);

}

});

JavaPairRDD reducebykeyRDD = pairRDD.reduceByKey(new Function2() {

@OverridepublicInteger call(Integer integer, Integer integer2) throws Exception {return integer +integer2;

}

});

List> collect =reducebykeyRDD.collect();for(Tuple2 tup:collect){

System.out.println(tup);

}

Integer num=accumulator.value();

System.out.println("一共有:"+num+"行");

sc.close();

}

}

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值