实例1:
数据格式(消费者ID 消费时间 消费金额)
1 12:01 100
1 12:02 200
1 12:50 100
2 12:50 100
3 13:01 200
需求:统计每个小时,每个用户的消费总额
思路步骤:
1、id加上时间的小时部分(前两位)作为key
2、使用sparkSQl里面的 groupby.agg()方法 groupby(“id”,“time”).agg(sum(“cousumer”))
代码:
package com.soul.spark.SparkSQL
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._
/**
* @author soulChun
* @create 2019-01-04-15:22
*/
object TextFileApp {
def main(args: Array[String]): Unit = {
val spark = SparkSession.builder().master("local[2]").appName("TextFileApp").getOrCreate()
import spark.implicits._
/**
* ID Time Consumer
* 用户ID 消费时间 消费金额
* 1 12:01 100
* 1 12:02 100
* 1 12:50 100
* 2 12:50 200
* 3 01:21 100
* 统计每个小时,每个用户的消费总额
*/
val infoDF = spark.read.textFile("/Users/mac/soul/1.txt")
.map(x => x.split("\t"))
.map(x => User(x(0), x(1).substring(0,2),x(2))).toDF
infoDF.show()
infoDF.groupBy("id", "time").agg(sum("consume")).sort("id").show
Thread.sleep(5*60*1000)
spark.stop()
}
}
case class User(id:String,time:String,consume:String)
结果:
实例2:
数据格式(名称,访问次数)
yy,1001
panda,1001
kuaishou,1002
yy,1001
yy,1003
panda,1003
kuaishou,1003
yy,1003
需求:求每个用户的总访问量
代码:
package com.soul.spark.SparkSQL
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._
/**
* @author soulChun
* @create 2019-01-08-16:03
* yy,1001
* panda,1001
* kuaishou,1002
* yy,1001
* yy,1003
* panda,1003
* kuaishou,1003
* yy,1003
* 求每个用户的总访问量
*/
object PvuvApp {
def main(args: Array[String]): Unit = {
val spark = SparkSession.builder().master("local[2]").appName("TextFileApp").getOrCreate()
runBuildInExample(spark)
spark.stop()
}
case class LOGINFO(name:String,count:Int)
def runBuildInExample(spark:SparkSession): Unit ={
import spark.implicits._
val logDF = spark.sparkContext.textFile("/Users/mac/soul/data/tmp/pvuv.txt")
.map(x => x.split(",")).map(x => LOGINFO(x(0), x(1).toInt)).toDF()
// logDF.groupBy("name").count().show()
logDF.groupBy("name").agg(sum("count").as("totalTiems")).show()
}
}