我的Spark学习笔记(四)

体会一下:mapToPair()、countByKey()、reduceByKey()。

import org.apache.spark.Partitioner;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import scala.Tuple2;

import java.util.Arrays;
import java.util.List;
import java.util.Map;

public class ReduceByKeyDemo {
    public static void main(String[] args) {
        SparkConf conf = new SparkConf().setAppName("Simple Application").setMaster("local");

        // 这一行非常耗时,目测耗时约10秒
        JavaSparkContext sc = new JavaSparkContext(conf);

        List<Integer> data = Arrays.asList(1, 2, 2, 3, 3, 3, 4, 5, 6, 7);
        JavaRDD<Integer> javaRDD = sc.parallelize(data);
        System.out.println("### List<Integer>转JavaRDD=" + javaRDD.collect());

        //转化为K,V格式
        JavaPairRDD<Integer,Integer> javaPairRDD = javaRDD.mapToPair(new PairFunction<Integer, Integer, Integer>() {
            @Override
            public Tuple2<Integer, Integer> call(Integer integer) {
                return new Tuple2<Integer, Integer>(integer, 1);
            }
        });
        System.out.println("### javaRDD.mapToPair(...)=" + javaPairRDD.collect());

        Map<Integer, Long> countByKeyResultMap = javaPairRDD.countByKey();
        System.out.println("### javaRDD.countByKey()=" + countByKeyResultMap);

        JavaPairRDD<Integer,Integer> reduceByKeyRDD1 = javaPairRDD.reduceByKey(new Function2<Integer, Integer, Integer>() {
            @Override
            public Integer call(Integer v1, Integer v2) {
                return v1 + v2;
            }
        });
        System.out.println("### javaPairRDD.reduceByKey,方式1=" + reduceByKeyRDD1.collect());

        //指定numPartitions
        JavaPairRDD<Integer,Integer> reduceByKeyRDD2 = javaPairRDD.reduceByKey(new Function2<Integer, Integer, Integer>() {
            @Override
            public Integer call(Integer v1, Integer v2) {
                return v1 + v2;
            }
        },2);
        System.out.println("### javaPairRDD.reduceByKey,方式2=" + reduceByKeyRDD2.collect());

        //自定义partition
        JavaPairRDD<Integer,Integer> reduceByKeyRDD3 = javaPairRDD.reduceByKey(new Partitioner() {
            @Override
            public int numPartitions() {
                return 2;
            }

            @Override
            public int getPartition(Object o) {
                return (o.toString()).hashCode() % numPartitions();
            }
        }, new Function2<Integer, Integer, Integer>() {
            @Override
            public Integer call(Integer v1, Integer v2) {
                return v1 + v2;
            }
        });
        System.out.println("### javaPairRDD.reduceByKey,方式3=" + reduceByKeyRDD3.collect());
    }
}

体会一下:groupByKey

test.txt内容:

2010-05-04 11:50,10,10,10
2010-05-04 12:50,10,10,10
2010-05-04 12:50,10,10,10
2010-05-05 13:50,20,20,20
2010-05-05 13:50,20,20,20
2010-05-06 14:50,30,30,30
2010-05-06 14:50,30,30,30

在Spark-shell里执行:

import scala.io.Source
val source = Source.fromFile("D:/test.txt").getLines.toArray
val sourceRDD = sc.parallelize(source)
sourceRDD.map {
    line =>
      val lines = line.split(",")                                         
      (s"${lines(0)}", s"${lines(1)},${lines(2)},${lines(3)}")            
  }.groupByKey.map {                                                     
    case (k, v) =>
      var a, b, c = 0                                                     
      v.foreach {                                                        
        x =>
          val r = x.split(",")                                            
          a += r(0).toInt                                                 
          b += r(1).toInt
          c += r(2).toInt
      }
    s"$k,$a,$b,$c"                                                      
  }.foreach(println)

输出:

2010-05-04 11:50,10,10,10
2010-05-04 12:50,20,20,20
2010-05-05 13:50,40,40,40
2010-05-06 14:50,60,60,60
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值