对比scala和java编写的spark wordcount程序

使用scala编写spark wordcount程序

import org.apache.spark.{SparkConf, SparkContext}

object WordCount {

  def main(args: Array[String]): Unit = {

    //设置spark的配置文件信息
    val sparkConf: SparkConf = new SparkConf().setAppName("WordCount").setMaster("local[1]")
   
    sparkConf.set("spark.testing.memory", "2147480000")//如果出现内存不足的错误需要加这行代码
      
    //构建sparkcontext上下文对象
    val context = new SparkContext(sparkConf)

    //读取文件
    //val file = context.textFile(args(0))
    val file = context.textFile("E:\\word.txt")

    //对文件中每一行单词进行压平切分
    val word = file.flatMap(_.split(" "))

    //对每一个单词计数为1 转化为(单词,1)
    val wordAndOne = word.map((_,1))

    //相同的单词进行汇总
    val result = wordAndOne.reduceByKey(_+_)

    //将结果按照单词数量升序排列
    val by = result.sortBy(_._2,false)

    val collect = by.collect()
      
    //输出
    collect.foreach(println)
     
    //关闭sparkcontext
    context.stop()

  }

}

使用java编写spark wordcount程序

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import scala.Tuple2;

import java.util.Arrays;
import java.util.Iterator;


public class WordCount {
    public static void main(String[] args) {
        //构建sparkconf,设置配置信息
        SparkConf conf = new SparkConf().setAppName("WordCount").setMaster("local[2]");
        //构建sparkContext
        JavaSparkContext sc = new JavaSparkContext(conf);
        //读取文件
        JavaRDD<String> stringJavaRDD = sc.textFile("E:\\word.txt");

        //对每一行单词进行切分
        JavaRDD<String> wordRDD = stringJavaRDD.flatMap(new FlatMapFunction<String, String>() {
            public Iterator<String> call(String line) throws Exception {
                String[] split = line.split(" ");
                return Arrays.asList(split).iterator();
            }
        });
        //给每个单词计为 1
        JavaPairRDD<String, Integer> words = wordRDD.mapToPair(new PairFunction<String, String, Integer>() {
            public Tuple2<String, Integer> call(String s) throws Exception {
                return new Tuple2<String, Integer>(s, 1);
            }
        });
        //相同单词出现的次数累加
        JavaPairRDD<String, Integer> stringIntegerJavaPairRDD = words.reduceByKey(new Function2<Integer, Integer, Integer>() {
            public Integer call(Integer v1, Integer v2) throws Exception {
                return v1 + v2;
            }
        });
        //反转顺序
        JavaPairRDD<Integer, String> result = stringIntegerJavaPairRDD.mapToPair(new PairFunction<Tuple2<String, Integer>, Integer, String>() {
            public Tuple2<Integer, String> call(Tuple2<String, Integer> stringIntegerTuple2) throws Exception {
                return new Tuple2<Integer, String>(stringIntegerTuple2._2, stringIntegerTuple2._1);
            }
        });

        //把每个单词出现的次数作为key,进行排序,并且在通过mapToPair进行反转顺序后输出
        JavaPairRDD<String, Integer> javaPairRDD = result.sortByKey().mapToPair(new PairFunction<Tuple2<Integer, String>, String, Integer>() {

            public Tuple2<String, Integer> call(Tuple2<Integer, String> integerStringTuple2) throws Exception {
                return new Tuple2<String, Integer>(integerStringTuple2._2, integerStringTuple2._1);
            }
        });

        //输出
        System.out.println(javaPairRDD.collect());

        //关闭sparkcontext
        sc.close();
    }
}

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值