1、搭建环境,配置maven环境,去spark官网可知
<properties>
<scala.version>2.11.12</scala.version>
</properties>
<dependencies>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-core_2.11</artifactId>
<version>2.3.3</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>2.8.5</version>
</dependency>
</dependencies>
2、编写代码
package cn.zkz.spark;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import scala.Tuple2;
import java.util.Arrays;
public class MysprakWithSpark {
public static void main(String[] args) {
SparkConf conf = new SparkConf().setAppName("Mysprak2");
JavaSparkContext sc = new JavaSparkContext(conf);
JavaRDD<String> textFile = sc.textFile(args[0]);
JavaPairRDD<String, Integer> counts = textFile
.flatMap(s -> Arrays.asList(s.split(" ")).iterator())
.mapToPair(word -> new Tuple2<>(word, 1))
.reduceByKey((a, b) -> a + b);
counts.saveAsTextFile(args[1]);
}
}
3、将打好的jar包上传到集群,编写运行脚本
spark-submit \
--master spark://mini1:7077 \
--class cn.zkz.spark.MysprakWithSpark \
--name wordcount \
--executor-memory 700m \
/usr/app/spark-2.3.3-bin-hadoop2.7/scala-1.0-SNAPSHOT.jar \
hdfs://mini1:9000/wordcount/test.txt \
hdfs://mini1:9000/spark/output12
备注:需要启动hdfs和spark集群