一、基于排序机制的wordcount程序
1、需求
spark.txt
- 对文本文件内的每个单词都统计出其出现的次数。
- 按照每个单词出现次数的数量,降序排序
2、实战
① 基于java
package cn.spark.study.core;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.api.java.function.VoidFunction;
import scala.Tuple2;
import java.util.Arrays;
public class SortWordCount {
public static void main(String[] args) {
SparkConf conf = new SparkConf()
.setMaster("local")
.setAppName("SortWordCount");
JavaSparkContext sc = new JavaSparkContext(conf);
JavaRDD<String> lines = sc.textFile("E:\\ziliao\\Spark\\node\\shuju\\spark.txt");
JavaRDD<String> worlds = lines.flatMap(new FlatMapFunction<String, String>() {
@Override
public Iterable<String> call(String s) throws Exception {
return Arrays.asList(s.split(" "));
}
});
JavaPairRDD<String, Integer> worldAndOne = worlds.mapToPair(new PairFunction<String, String, Integer>() {
@Override
public Tuple2<String, Integer> call(String s) throws Exception {
return new Tuple2<String, Integer>(s, 1);
}
});
JavaPairRDD<String, Integer> wAndC = worldAndOne.reduceByKey(new Function2<Integer, Integer, Integer>() {
@Override
public Integer call(Integer integer, Integer integer2) throws Exception {
return integer + integer2;
}
});
//到这里为止就得到了每个单词出现的次数,但是我们的新需求是,按照每个单词出现的次数降序排列
//wAndC DRR内的元素是什么?应该是这样的格式:(hello,2)(word,3)
//我们需要将RDD中转换为(3,word)的格式,才能根据单词出现的次数进行排序
// 进行key-value的反转
JavaPairRDD<Integer, String> cAndW = wAndC.mapToPair(new PairFunction<Tuple2<String, Integer>, Integer, String>() {
@Override
public Tuple2<Integer, String> call(Tuple2<String, Integer> tuple2) throws Exception {
return new</