maven 项目 前提是装好hadoop集群和spark集群 并上传好文件到hdfs
pom.xml 如下
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>test</groupId>
<artifactId>spark</artifactId>
<version>0.0.1-SNAPSHOT</version>
<packaging>jar</packaging>
<name>spark</name>
<url>http://maven.apache.org</url>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
</properties>
<dependencies>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>3.8.1</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-core_2.10</artifactId>
<version>2.2.0</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
<source>1.8</source>
<target>1.8</target>
<encoding>UTF-8</encoding>
</configuration>
</plugin>
<plugin>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-maven-plugin</artifactId>
</plugin>
</plugins>
</build>
</project>
java 代码
package test.spark;
import java.util.Arrays;
import java.util.List;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import scala.Tuple2;
public class CountWord {
@SuppressWarnings("resource")
public static void main(String[] args) {
// 创建一个java版本的Spark Context
SparkConf conf = new SparkConf().setMaster("spark://192.168.7.202:7077").setAppName("My App");
JavaSparkContext sc = new JavaSparkContext(conf);
// 从hadoop中hdfs读取输入数据
JavaRDD<String> input = sc.textFile("hdfs://192.168.7.202:900/test/sql.txt");
// 根据空格切分成单词
JavaRDD<String> words = input.flatMap((String x) -> {
List<String> list = Arrays.asList(x.split(" "));
return list.iterator();
});
// 转换成键值对并计数
JavaPairRDD<String, Integer> count = words.mapToPair((String x) -> {
return new Tuple2<String, Integer>(x, 1);
}).reduceByKey((Integer v1, Integer v2) -> {
return v1 + v2;
});
// 将统计出来的单词存入一个文本文件
count.saveAsTextFile("hdfs://192.168.7.202:900/test/sql-spark2");
}
}
最后打包
上传jar包到主节点
执行命令
/data1/hadoop/spark-2.2.0-bin-hadoop2.7/bin/spark-submit --master spark://192.168.7.202:7077 --class test.spark.CountWord /data1/hadoop/spark-2.2.0-bin-hadoop2.7/shell/spark-0.0.1-SNAPSHOT.jar
查看结果
hadoop fs -cat /test/sql-spark2/part-00001
代码量是不是比用hadoop 体系里面mapreduce少很多