private static void wordCount() {
//设置这次任务的名字和设置本地跑spark程序
SparkConf conf = new SparkConf().setAppName("hzy").setMaster("local");
JavaSparkContext sparkContext = new JavaSparkContext(conf);
//读取文件地址
JavaRDD<String> lines = sparkContext.textFile("E:\\hzyFtp\\aa\\words.txt");
//List<hello,word,hello>
JavaRDD<String> words = lines.flatMap(new FlatMapFunction<String, String>() {
@Override
public Iterator<String> call(String s) throws Exception {
String[] arr = s.split(" ");
List<String> list = Arrays.asList(arr);
return list.iterator();
}
});
//Map(<hello,1>,<word,1>,<hello,1>)
JavaPairRDD<String,Integer> tuples =words.mapToPair(new PairFunction<String, String, Integer>() {
@Override
public Tuple2<String, Integer> call(String s) throws Exception {
return new Tuple2<>(s, 1);
}
});
//聚合Map(<hello,2>,<word,1>)
JavaPairRDD<String,Integer> reduced = tuples.reduceByKey(new Function2<Integer, Integer, Integer>() {
@Override
public Integer call(Integer v1, Integer v2) throws Exception {
return v1+v2;
}
});
//单词和它出现的次数做一个颠倒,交换
JavaPairRDD<Integer,String> swaped = reduced.mapToPair(new PairFunction<Tuple2<String, Integer>, Integer, String>() {
@Override
public Tuple2<Integer, String> call(Tuple2<String, Integer> tup) throws Exception {
return tup.swap();
}
});
//排序
JavaPairRDD<Integer,String> sorted =swaped.sortByKey();
//数据的一个位置交换
JavaPairRDD<String,Integer> res = sorted.mapToPair(new PairFunction<Tuple2<Integer, String>, String, Integer>() {
@Override
public Tuple2<String, Integer> call(Tuple2<Integer, String> tup) throws Exception {
return tup.swap();
}
});
//把结果写出去,一半情况下我们选择写到hdfs中
res.saveAsTextFile("D:\\ceshi\\out");
sparkContext.stop();
}
Spark的wordcount
最新推荐文章于 2024-08-05 00:07:11 发布