环境说明
spark版本:1.6.1(已经安装完毕)
os: centos6.5
java: 1.8
hadoop:2.3
网上许多spark教程都是基于scala或者python的,通过这篇文章我们来使用spark的JavaAPI, 来写一个wordCount程序, 首先我们写个Java程序:
pom.xml如下:
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.study</groupId>
<artifactId>com.sparkstudy</artifactId>
<version>1.0-SNAPSHOT</version>
<dependencies>
<dependency> <!-- Spark dependency -->
<groupId>org.apache.spark</groupId>
<artifactId>spark-core_2.10</artifactId>
<version>1.6.2</version>
</dependency>
<dependency> <!-- Hadoop dependency -->
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>2.6.0</version>
</dependency>
</dependencies>
</project>
再编写一个Java程序。
import scala.Tuple2;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import java.util.Arrays;
import java.util.List;
import java.util.regex.Pattern;
public final class JavaWordCount {
private static final Pattern SPACE = Pattern.compile(" ");
public static void main(String[] args) throws Exception {
if (args.length < 1) {
System.err.println("file arg is null ... ");
System.exit(1);//退出程序
}
SparkConf sparkConf = new SparkConf().setAppName("Java-WordCount");
JavaSparkContext ctx = new JavaSparkContext(sparkConf);
JavaRDD<String> lines = ctx.textFile(args[0], 1);
JavaRDD<String> words = lines.flatMap(new FlatMapFunction<String, String>() {
@Override
public Iterable<String> call(String s) {
return Arrays.asList(SPACE.split(s));
}
});
JavaPairRDD<String, Integer> ones = words.mapToPair(new PairFunction<String, String, Integer>() {
@Override
public Tuple2<String, Integer> call(String s) {
return new Tuple2<String, Integer>(s, 1);
}
});
JavaPairRDD<String, Integer> counts = ones.reduceByKey(new Function2<Integer, Integer, Integer>() {
@Override
public Integer call(Integer i1, Integer i2) {
return i1 + i2;
}
});
List<Tuple2<String, Integer>> output = counts.collect();
for (Tuple2<?,?> tuple : output) {
System.out.println(tuple._1() + ": " + tuple._2());
}
ctx.stop();
System.out.println("ok ... ");
}
}
OK,我们使用IDEA把项目打包成jar包。然后运行:
[方式一:使用spark命令]
spark-submit --class JavaWordCount --name javaWordCount --master local[2] --num-executors 1 --executor-memory 128M --executor-cores 2 com.sparkstudy-1.0-SNAPSHOT.jar file:///home/hdfs/root.log
其中:file:///home/hdfs/root.log 是一个自定义的本地文件,内容可自定义,我们看一下spark的输出:
is: 1
hello: 1
buautiful: 1
baby: 1
the: 1
world: 2
ok ...
[方式二:使用IDEA执行运行]
在运行参数里面增加:-Dspark.master=local ,即可直接运行。
[方式三:yarn-cluster]
使用此模式时,提前将需要分析的文件存储在hdfs上(可以使用hadoop fs -put 本地文件 HDFS目标路径的方式上传文件至hdfs),然后执行:
spark-submit --class JavaWordCount --name javaWordCount --master yarn --deploy-mode cluster com.sparkstudy-1.0-SNAPSHOT.jar /user/hdfs/root.log
打开yarn资源管理器: http://namenode2:8088/cluster
查看任务已经执行了
[方式四:yarn-client]
同方式三的命令类似,此时的任务日志打印在终端上。
spark-submit --class JavaWordCount --name javaWordCount --master yarn --deploy-mode client com.sparkstudy-1.0-SNAPSHOT.jar /user/hdfs/root.log