object WordCount {
def main(args: Array[String]): Unit = {
//创建一个SparkContext,用来闯将最原始的RDD
//创建一个SparkConf().setAppName(),加载SparkConf文件,并为其取名字"WordCount"
val conf = new SparkConf().setAppName("WordCount")
//创建SparkContext,他可以跟集群建立连接申请资源,里面还会创建一个driver,跟executor
//建立通信,
val sc = new SparkContext(conf)
//创建RDD,这个RDD要通过Spark才能创建,textFile(),可以告诉RDD,要读哪的数据,
//把读到的数据写到args(0)里面,这个是main方法自动创建的数组
val lines = sc.textFile(args(0))
//切分压平
val words:RDD[String] = lines.flatMap(_.split(" "))
//将单词和1组合
val wordAndOne = words.map((_, 1))
//分组聚合,把key相同的value相加,reduce可以先局部分组聚合,然后再全局的分组聚合
val reduced = wordAndOne.reduceByKey(_ + _)
//排序,false表是降序
val sorted = reduced.sortBy(_._2, false)
//Action算子,会触发任务,并执行
//将数据保存到hdfs中
sorted.saveAsTextFile(args(1))
//释放资源
sc.stop()
}
}
用java实现wordCount的编写
public class JavaWordCount {
public static void main(String[] args) {
SparkConf conf = new SparkConf().setAppName("JavaWordCount");
//创建一个SparkContext
SparkContext sc = new SparkContext(conf);
//创建一个javaSparkContext
JavaSparkContext jsc = new JavaSparkContext(conf);
//使用jsc创建javaRDD
JavaRDD<String> lines = jsc.textFile(args[0]);
//对数据进行切分和压平
JavaRDD<String> words = lines.flatMap(new FlatMapFunction<String, String>() {
@Override
public Iterator<String> call(String line) throws Exception {
return Arrays.asList(line.split(" ")).iterator();
}
});
//将单词和1组合
JavaPairRDD<String, Integer> wordAndOne = words.mapToPair(new PairFunction<String, String, Integer>() {
@Override
public Tuple2<String, Integer> call(String s) throws Exception {
return Tuple2.apply(s,1);
}
});
//分组聚合
JavaPairRDD<String, Integer> reduced = wordAndOne.reduceByKey(new Function2<Integer, Integer, Integer>() {
@Override
public Integer call(Integer v1, Integer v2) throws Exception {
return v1 + v2;
}
});
//交换kv顺序
JavaPairRDD<Integer, String> swaped = reduced.mapToPair(new PairFunction<Tuple2<String, Integer>, Integer, String>() {
@Override
public Tuple2<Integer, String> call(Tuple2<String, Integer> tp) throws Exception {
return tp.swap();
}
});
//排序
JavaPairRDD<Integer, String> sorted = swaped.sortByKey(false);
//交换kv顺序
JavaPairRDD<String, Integer> result = sorted.mapToPair(new PairFunction<Tuple2<Integer, String>, String, Integer>() {
@Override
public Tuple2<String, Integer> call(Tuple2<Integer, String> tp) throws Exception {
return tp.swap();
}
});
//触发action,将数据存储到hdfs
result.saveAsTextFile(args[1]);
//释放资源
jsc.stop();
}
}
SparkConf conf = new SparkConf().setAppName("LambdaWordCount");
//创建SparkContext
JavaSparkContext jsc = new JavaSparkContext(conf);
//创建RDD
JavaRDD<String> lines = jsc.textFile(args[0]);
//因为java底层输入的是string,但输出的是迭代器,所以要在末尾调用一个迭代器
JavaRDD<String> words = lines.flatMap(line -> Arrays.stream(line.split(" ")).iterator());
//将单词和1组合,java没有元组的概念但是可以输出一个元组,
JavaPairRDD<String, Integer> wordAndOne = words.mapToPair(word -> Tuple2.apply(word, 1));
//分组聚合
JavaPairRDD<String, Integer> reduced = wordAndOne.reduceByKey((a, b) -> a + b);
//调换顺序,因为在java中只能对key排序
JavaPairRDD<Integer, String> swaped = reduced.mapToPair(tp -> tp.swap());
//排序
JavaPairRDD<Integer, String> sorted = swaped.sortByKey(false);
//调换顺序
JavaPairRDD<String, Integer> result = sorted.mapToPair(tp -> tp.swap());
//将数据保存在hdfs中
result.saveAsTextFile(args[1]);
//释放资源
jsc.stop();
}