编写程序(windows上直接运行)
1.新建一个maven工程
1.1 建好后,在项目上右击 --> Add Framework Support --> 勾选scala
1.2 在src/main下新建一个directory (scala)–> 点击scala,右键 --> Mark Directory AS --> Sources Root
2.日志文件配置(设置只打印Error级别的日志)
2.1 在src/main/resources下新建 --> File(名为log4j.properties)
2.2 添加配置信息
log4j.rootCategory=ERROR, console
log4j.appender.console=org.apache.log4j.ConsoleAppender
log4j.appender.console.target=System.err
log4j.appender.console.layout=org.apache.log4j.PatternLayout
log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n
# Set the default spark-shell log level to ERROR. When running the spark-shell, the
# log level for this class is used to overwrite the root logger's log level, so that
# the user can have different defaults for the shell and regular Spark apps.
log4j.logger.org.apache.spark.repl.Main=ERROR
# Settings to quiet third party logs that are too verbose
log4j.logger.org.spark_project.jetty=ERROR
log4j.logger.org.spark_project.jetty.util.component.AbstractLifeCycle=ERROR
log4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=ERROR
log4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=ERROR
log4j.logger.org.apache.parquet=ERROR
log4j.logger.parquet=ERROR
# SPARK-9183: Settings to avoid annoying messages when looking up nonexistent UDFs in SparkSQL with Hive support
log4j.logger.org.apache.hadoop.hive.metastore.RetryingHMSHandler=FATAL
log4j.logger.org.apache.hadoop.hive.ql.exec.FunctionRegistry=ERROR
3.pom.xml中添加依赖
<dependencies>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-core_2.12</artifactId>
<version>3.0.0</version>
</dependency>
</dependencies>
<build>
<finalName>WordCount</finalName>
<plugins>
<plugin>
<groupId>net.alchim31.maven</groupId>
<artifactId>scala-maven-plugin</artifactId>
<version>3.4.6</version>
<executions>
<execution>
<goals>
<goal>compile</goal>
<goal>testCompile</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
注意:如果maven版本为3.2.x,插件下载报错,那么修改插件版本为3.3.2
4.打包插件,添加到pom.xml(也就是打的jar包需要伴随环境依赖)
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<version>3.0.0</version>
<configuration>
<descriptorRefs>
<descriptorRef>jar-with-dependencies</descriptorRef>
</descriptorRefs>
</configuration>
<executions>
<execution>
<id>make-assembly</id>
<phase>package</phase>
<goals>
<goal>single</goal>
</goals>
</execution>
</executions>
</plugin>
5.编写代码
5.1 输出到控制台
object WordCount1{
def main (args:Array[String]):Unit={
val conf: SparkConf = new SparkConf().setAppName("SparkWordCount").setMaster("local[*]")
val sc: SparkContext = new SparkContext(conf)
val lineRDD: RDD[String] = sc.textFile("D:\\develop\\Workspaces\\IdeaProjects\\sparkWordCount\\input\\1.txt") //需要解析的文件的绝对路径
val wordRDD: RDD[String] = lineRDD.flatMap(_.split(" "))//将输入的文件按空格切分并打散成单个词
val word2OneRDD: RDD[(String,Int)] = wordRDD.map((_,1))//将单词映射成单词,1的格式
val word2SumRDD: RDD[(String,Int)] = word2OneRDD.reduceByKey(_+_)//将单词相同的值进行相加求和
val result:Array[(String,Int)] = word2SumRDD.collect()//收集RDD进行计算
result.foreach(println)
sc.stop()
}
}
5.1 输出到另一个文件
object WordCount2{
def main (args:Array[String]):Unit={
val conf: SparkConf = new SparkConf().setAppName("SparkWordCount").setMaster("local[*]")
val sc: SparkContext = new SparkContext(conf)
val lineRDD: RDD[String] = sc.textFile("D:\\develop\\Workspaces\\IdeaProjects\\sparkWordCount\\input\\1.txt") //需要解析的文件的绝对路径
val wordRDD: RDD[String] = lineRDD.flatMap(_.split(" "))//将输入的文件按空格切分并打散成单个词
val word2OneRDD: RDD[(String,Int)] = wordRDD.map((_,1))//将单词映射成单词,1的格式
val word2SumRDD: RDD[(String,Int)] = word2OneRDD.reduceByKey(_+_)//将单词相同的值进行相加求和
word2SumRDD.saveAsTextFile("D:\\develop\\Workspaces\\IdeaProjects\\sparkWordCount\\output\\1.txt")//输出结果的文件的绝对路径(输出的文件夹要不存在,默认会创建)
sc.stop()
}
}