直接上代码,如果不理解的地方,可以百度或留言。
主要是rdd算子的使用,本身实现比较简单,不涉及spark的优化等问题,只是供大家参考
import org.apache.spark.{SparkConf,SparkContext}
/**
* Created by zhang on 2018/6/29.
*/
object WordCount {
def dealTime(line:String) ={
line.substring(0,10)
}
def main(args:Array[String]){
val conf = new SparkConf().setAppName("WordCount by zhangqm").setMaster("local")
val sc = new SparkContext(conf)
val sourdd = sc.textFile("F:\\intellij_workspace\\sparklearning\\src\\main\\scala\\text.txt")
val uv = sourdd.map(line=>(line.split(",")(0),line.split(",")(1),line.split(",")(2)))
.map(line=>(line._1,dealTime(line._2),line._3)).filter(line=>line._3.contains("app")).map(line=>(line._3,line._1))
.groupByKey().map(line=>(line._1,line._2.toList.distinct.size))
val pv = sourdd.m