demo需求:统计每个单词出现的个数。
统计文件 Spark.txt
hello word
hello spark
hello hadoop
思路:
- textFile:读取文件
- flatMap:将每行单词按空格分开
- mapToPair:把每个单词后面加上一个数量组成
- reduceByKey:将相同的单词后面数量相加
- foreach:打印
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.api.java.function.VoidFunction;
import scala.Tuple2;
public class SparkTest {
public static void main(String[] args) {
// 1.创建SparkConf对象,设置Spark应用配置信息
// 使用setMaster()可以设置Spark应用程序要链接的Spark集群的master节点的url
// local 本地运行
SparkConf conf = new SparkConf().setAppName("SparkTest").setMaster("local");
// 2.创建JavaSparkContext对象
JavaSparkContext sc = new JavaSparkContext(conf);
// 3.根据输入源,创建一个初始的RDD
JavaRDD<String> lines = sc.textFile("C:\\********\\Spark.txt");
// 4.对初始RDD进行transformation操作,计算操作
// 普通方式
// 将每行单词拆分为单个单词
JavaRDD<String> words = lines.flatMap(new FlatMapFunction<String, String>() {
private static final long serialVersionUID = 7703955252555938424L;
@Override
public Iterator<String> call(String line) throws Exception {
List<String> asList = Arrays.asList(line.split(" "));
return (Iterator<String>) asList.iterator();
}
});
// 将每个单词转换为 单词 + 个数 格式
JavaPairRDD<String, Integer> pair = words.mapToPair(new PairFunction<String, String, Integer>() {
private static final long serialVersionUID = 1L;
@Override
public Tuple2<String, Integer> call(String word) throws Exception {
return new Tuple2<String, Integer>(word, 1);
}
});
// 将相同的单词个数相加
JavaPairRDD<String, Integer> reduceByKey = pair.reduceByKey(new Function2<Integer, Integer, Integer>() {
private static final long serialVersionUID = 1L;
@Override
public Integer call(Integer v1, Integer v2) throws Exception {
return v1 + v2;
}
});
// 打印 action操作
reduceByKey.foreach(new VoidFunction<Tuple2<String, Integer>>() {
private static final long serialVersionUID = 1L;
@Override
public void call(Tuple2<String, Integer> t) throws Exception {
System.out.println(t._1 + " " + t._2);
}
});
/**
* lambda 写法
*/
JavaPairRDD<String, Integer> counts = lines
.flatMap(line -> Arrays.asList(line.split(" ")).iterator())
.mapToPair(w -> new Tuple2<String, Integer>(w, 1))
.reduceByKey((x, y) -> x + y);
counts.foreach(t -> System.out.println("lambda: " + t._1 + " " + t._2));
sc.close();
}
}
运行结果:
spark版本:2.3.2
结语:初次见面,不足之处,请多指教