1.---Java的WordCount
package com.hjm.spark.daydayup;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import scala.Tuple2;
import java.util.Arrays;
import java.util.Iterator;
/**
* 对比Java来说 scala更加优雅 api丰富 且有隐式转换 避免写各种泛型 匿名内部类
*/
public class JavaWordCount {
public static void main(String[] args) {
//创建conf对象
SparkConf conf = new SparkConf().setAppName("JavaWordCount");
//
JavaSparkContext jsc = new JavaSparkContext(conf);
JavaRDD<String> lines = jsc.textFile(args[0]);
//begin transformation
//压平
JavaRDD<String> words = lines.flatMap(new FlatMapFunction<String, String>() {
@Override
public Iterator<String> call(String line) throws Exception {
return Arrays.asList(line.split(" ")).iterator();
}
});
//拉姆达
// JavaRDD<String> words = lines.flatMap(line -> Arrays.stream(line.split(" ")).iterator());
//变成对子 输入string 输出元组<String ,Integer>
JavaPairRDD<String, Integer> wordAndOne = words.mapToPair(new PairFunction<String, String, Integer>() {
@Override
public Tuple2<String, Integer> call(String s) throws Exception {
return Tuple2.apply(s, 1);
}
});
//拉姆达
//JavaPairRDD<String, Integer> wordAndOne = words.mapToPair((PairFunction<String, String, Integer>) s -> Tuple2.apply(s, 1));
//进行规约
JavaPairRDD<String, Integer> reduced = wordAndOne.reduceByKey(new Function2<Integer, Integer, Integer>() {
@Override
public Integer call(Integer v1, Integer v2) throws Exception {
return v1 + v2;
}
});
//拉姆达
//JavaPairRDD<String, Integer> reduced = wordAndOne.reduceByKey((Function2<Integer, Integer, Integer>) (v1, v2) -> v1 + v2);
JavaPairRDD<Integer, String> swaped = reduced.mapToPair(new PairFunction<Tuple2<String, Integer>, Integer, String>() {
@Override
public Tuple2<Integer, String> call(Tuple2<String, Integer> tp) throws Exception {
return tp.swap();
}
});
//拉姆达
//JavaPairRDD<Integer, String> swaped = reduced.mapToPair((PairFunction<Tuple2<String, Integer>, Integer, String>) tp -> tp.swap());
JavaPairRDD<Integer, String> sorted = swaped.sortByKey(false);
//交换kv顺序
JavaPairRDD<String, Integer> result = sorted.mapToPair(new PairFunction<Tuple2<Integer, String>, String, Integer>() {
@Override
public Tuple2<String, Integer> call(Tuple2<Integer, String> tp) throws Exception {
return tp.swap();
}
});
//拉姆达
//JavaPairRDD<String, Integer> result = sorted.mapToPair((PairFunction<Tuple2<Integer, String>, String, Integer>) tp -> tp.swap());
//保存到路径上
result.saveAsTextFile(args[1]);
}
}
args(0):传入素材的文件路径,本地或 hdfs上都可以
args(1):输出目录\路径,
例如:
查看结果
2.--Scala的WordCount
idea--->
package com.hjm.spark.daydayup
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
object WordCount {
def main(args: Array[String]): Unit = {
//
val conf = new SparkConf().setAppName("WordCount")
val sc = new SparkContext(conf)
//创建RDD
val lines: RDD[String] = sc.textFile(args(0))
//Transforma开始
//切分压平
val words: RDD[String] = lines.flatMap(_.split(" "))
//
val WordAndOne: RDD[(String, Int)] = words.map((_, 1))
//分组聚合 排序
val value: RDD[(String, Int)] = WordAndOne.reduceByKey(_ + _).sortBy(_._2, false)
//Action算子,触发任务执行
value.saveAsTextFile(args(1))
//释放资源
sc.stop()
}
}
--->甚至直接在Linux命令一行输入,(已启动spark等必要组件)
scala>sc.textFile("args(0)").flatMap(_.split(" ")).map((_,1)).reduceByKey(_+_).sortBy(_._2, false).SaveAsFile("(args(1))")
args(0):传入素材的文件路径,本地或 hdfs上都可以
args(1):输出目录\路径