第一种实现:
import org.apache.spark.{SparkConf, SparkContext}
object Spark01_WordCount {
def main(args: Array[String]): Unit = {
//Application
//一:Spark框架
//TODO 建立和Spark框架的连接
val sparkConf = new SparkConf().setMaster("local").setAppName("WordCount")
val sc = new SparkContext(sparkConf)
//二:TODO 执行业务操作
//1.读取文件,获取一行一行的数据:hello world
val lines = sc.textFile("datas")
//2.将一行行数据进行拆分(扁平化):"hello world => hello,world"
val words = lines.flatMap(_.split(" "))
//3.将数据单词分组,便于统计:(hello,hello,hello), (world,world)
val wordGroup = words.groupBy(word => word)
//4.对分组后的数据进行转换:(hello,hello,hello), (world,world) => (hello,3), (world,2)
val wordToCount = wordGroup.map {
case (word, list) => {
(word, list.size)
}
}
//5.将转换结果在控制台打印出来
val arrary = wordToCount.collect().foreach(println)
//三:TODO 关闭连接
sc.stop()
}
}
第二种实现:
RDD中的转换算子:reduceByKey和groupByKey一样可以分组,但是reduceByKey还提供聚合功能。
import org.apache.spark.{SparkConf, SparkContext}
object Spark02_WordCount {
def main(args: Array[String]): Unit = {
//Application
//一:Spark框架
//TODO 建立和Spark框架的连接
val sparkConf = new SparkConf().setMaster("local").setAppName("WordCount")
val sc = new SparkContext(sparkConf)
//二:TODO 执行业务操作
//1.读取文件,获取一行一行的数据:hello world
val lines = sc.textFile("datas")
//2.将一行行数据进行拆分(扁平化):"hello world => hello,world"
val words = lines.flatMap(_.split(" "))
//********************************************
//3.hello,world => (hello,1),(world,1)
val wordToOne = words.map(
word => (word, 1)
)
//4.reduceByKey:提供分组和聚合功能
val wordToCount = wordToOne.reduceByKey(_ + _)
//*********************************************
//5.将转换结果在控制台打印出来
val arrary = wordToCount.collect().foreach(println)
//三:TODO 关闭连接
sc.stop()
}
}
错误日志打印:
log4j.rootCategory=ERROR, console
log4j.appender.console=org.apache.log4j.ConsoleAppender
log4j.appender.console.target=System.err
log4j.appender.console.layout=org.apache.log4j.PatternLayout
log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n
# Set the default spark-shell log level to ERROR. When running the spark-shell, the
# log level for this class is used to overwrite the root logger's log level, so that
# the user can have different defaults for the shell and regular Spark apps.
log4j.logger.org.apache.spark.repl.Main=ERROR
# Settings to quiet third party logs that are too verbose
log4j.logger.org.spark_project.jetty=ERROR
log4j.logger.org.spark_project.jetty.util.component.AbstractLifeCycle=ERROR
log4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=ERROR
log4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=ERROR
log4j.logger.org.apache.parquet=ERROR
log4j.logger.parquet=ERROR
# SPARK-9183: Settings to avoid annoying messages when looking up nonexistent UDFs in SparkSQL with Hive support
log4j.logger.org.apache.hadoop.hive.metastore.RetryingHMSHandler=FATAL
log4j.logger.org.apache.hadoop.hive.ql.exec.FunctionRegistry=ERROR