1.从监控日志表中获取每个卡口摄像头个数
查询没有过车的卡口(暂放)
select * from traffic.monitor_camera_info where monitor_id not in (
select monitor_id from traffic.monitor_flow_action
)
val sql1 = "select monitor_id,count(distinct camera_id) cameraCnt,count(0) carCnt from traffic.monitor_flow_action where monitor_id!='0008' group by monitor_id"
val df1 = spark.sql(sql1)
df1.createOrReplaceTempView("monitor_flow_tmp1")
2.从卡口-摄像头关系表中获取每个卡口摄像头个数
val sql2 = "select monitor_id,count(0) cameraCnt from traffic.monitor_camera_info where monitor_id!='0008' group by monitor_id"
val df2 = spark.sql(sql2)
df2.createOrReplaceTempView("monitor_flow_tmp2")
3.通过1,2进行比对,获取哪些卡口有坏摄像头,并获取每个卡口的坏摄像头个数
val sql3 = "select t2.monitor_id,t2.cameraCnt,nvl(t1.cameraCnt,0) normalCameraCnt,t2.cameraCnt-nvl(t1.cameraCnt,0) abNormalCameraCnt,
case when t2.cameraCnt=nvl(t1.cameraCnt,0) then 1 else 0 end as normalMonitorFlag
from monitor_flow_tmp2 t2 left join monitor_flow_tmp1 t1 on t2.monitor_id=t1.monitor_id"
val df3 = spark.sql(sql3)
df3.createOrReplaceTempView("monitor_flow_tmp3")
4.获取正常卡口个数,坏卡口个数,正常摄像头个数,坏摄像头个数
val sql4 = "select " + args(0) + " as taskId, sum(normalMonitorFlag) normalMonitorCount,count(monitor_id)-sum(normalMonitorFlag) abNormalMonitorCount,sum(normalCameraCnt) normalCameraCnt,sum(abNormalCameraCnt) abNormalCameraCnt from monitor_flow_tmp3"
val df4 = spark.sql(sql4)
monitor_id,cameraCnt,normalCameraCnt,abNormalCameraCnt,
normalMonitorFlag
`date` string ,
monitor_id string ,
camera_id string ,
car string ,
action_time string ,
speed string ,
road_id string,
area_id string
1 1
1 2
1 3
2 4
2 5
2 6
==========================
(1,[1,2,3])
(2,[4,5,6])
concat_ws('|',collect_set(camera_id)列转行----->[1,2,3])
1|2|3
explode:行转列
select concat_ws('|',collect_set(t1.monitor_id)) abNormalMonitorInfos,
concat_ws('|',
collect_set(
concat_ws('|',array_except(t2.cameraIds,t1.cameraIds))))
abNormalCameraInfos
from monitor_flow_tmp5 t1
join monitor_flow_tmp6 t2
on t1.monitor_id=t2.monitor_id
(1,[1,2,3])
(2,[4,5,6])
(1,[1,2,3,11])
(2,[4,5,6,12])
================================================
1 [1,2,3] [1,2,3,11]
2 [4,5,6] [4,5,6,12]
=================================================
concat_ws('|',collect_set(t1.monitor_id))
1|2
--------------------------------------------------
concat_ws('|',array_except(t2.cameraIds,t1.cameraIds))
11
12
concat_ws('|',collect_set(xxxx))
11|12
5.从监控日志表和3中获取每个坏卡口的摄像头信息
val sql5 = "select monitor_id,collect_set(camera_id) cameraIds
from traffic.monitor_flow_action
where monitor_id in
(select monitor_id from monitor_flow_tmp3 where normalMonitorFlag=0) group by monitor_id"
val df5 = spark.sql(sql5)
df5.createOrReplaceTempView("monitor_flow_tmp5")
6.从卡口-摄像头关系表和3中获取每个坏卡口的摄像头信息
val sql6 = "select monitor_id,collect_set(camera_id) cameraIds
from traffic.monitor_camera_info
where monitor_id in
(select monitor_id from monitor_flow_tmp3 where normalMonitorFlag=0) group by monitor_id"
val df6 = spark.sql(sql6)
df6.createOrReplaceTempView("monitor_flow_tmp6")
7.通过5,6比对,获取所有坏卡口信息和所有坏摄像头信息
val sql7 = "select " + args(0) + " as taskId, concat_ws('|',collect_set(t1.monitor_id)) abNormalMonitorInfos,concat_ws('|',collect_set(concat_ws('|',array_except(t2.cameraIds,t1.cameraIds)))) abNormalCameraInfos from monitor_flow_tmp5 t1 join monitor_flow_tmp6 t2 on t1.monitor_id=t2.monitor_id"
val df7 = spark.sql(sql7)
8.通过4,7关联,获取最终数据【正常卡口个数,坏卡口个数,正常摄像头个数,坏摄像头个数,所有坏卡口信息,所有坏摄像头信息】
val df8 = df4.join(df7, "taskId").drop(df7("taskId"))
df8.write.format("jdbc")
.option("driver", "com.mysql.jdbc.Driver")
.option("url", "jdbc:mysql://hadoop-senior.test.com:3306/traffic1")
.option("dbtable", "monitor_state")
.option("user", "root")
.option("password","123456")
.mode(SaveMode.Append)
.save()
object MonitorFlowAnalyzer {
System.setProperty("hadoop.home.dir","d://software/hadoop-2.9.2")
def main(args: Array[String]): Unit = {
val spark = SparkSession.builder()
.master("local")
.appName("MonitorFlowAnalyzer")
//spark.eventLog.dir是记录Spark事件的基本目录,如果spark.eventLog.enabled为true。 在此基本目录中,
// Spark为每个应用程序创建一个子目录,并在此目录中记录特定于应用程序的事件
.config("fs.defaultFS", "hdfs://hadoop-senior.test.com")
.config("spark.sql.warehouse.dir", "hdfs://hadoop-senior.test.com:8020/user/hive/warehouse")
.enableHiveSupport()
.getOrCreate()
spark.sparkContext.setLogLevel("WARN")
//,concat_ws(',',collect_set(camera_id)) cameraIds
val sql1 = "select monitor_id,count(distinct camera_id) cameraCnt,count(0) carCnt from traffic.monitor_flow_action where monitor_id!='0008' group by monitor_id"
val df1 = spark.sql(sql1)
df1.createOrReplaceTempView("monitor_flow_tmp1")
val sql2 = "select monitor_id,count(0) cameraCnt from traffic.monitor_camera_info where monitor_id!='0008' group by monitor_id"
val df2 = spark.sql(sql2)
df2.createOrReplaceTempView("monitor_flow_tmp2")
val sql3 = "select t2.monitor_id,t2.cameraCnt,nvl(t1.cameraCnt,0) normalCameraCnt,t2.cameraCnt-nvl(t1.cameraCnt,0) abNormalCameraCnt,case when t2.cameraCnt=nvl(t1.cameraCnt,0) then 1 else 0 end as normalMonitorFlag from monitor_flow_tmp2 t2 left join monitor_flow_tmp1 t1 on t2.monitor_id=t1.monitor_id"
val df3 = spark.sql(sql3)
df3.createOrReplaceTempView("monitor_flow_tmp3")
val sql4 = "select " + args(0) + " as taskId, sum(normalMonitorFlag) normalMonitorCount,count(monitor_id)-sum(normalMonitorFlag) abNormalMonitorCount,sum(normalCameraCnt) normalCameraCnt,sum(abNormalCameraCnt) abNormalCameraCnt from monitor_flow_tmp3"
val df4 = spark.sql(sql4)
val sql5 = "select monitor_id,collect_set(camera_id) cameraIds from traffic.monitor_flow_action where monitor_id in (select monitor_id from monitor_flow_tmp3 where normalMonitorFlag=0) group by monitor_id"
val df5 = spark.sql(sql5)
df5.createOrReplaceTempView("monitor_flow_tmp5")
val sql6 = "select monitor_id,collect_set(camera_id) cameraIds from traffic.monitor_camera_info where monitor_id in (select monitor_id from monitor_flow_tmp3 where normalMonitorFlag=0) group by monitor_id"
val df6 = spark.sql(sql6)
df6.createOrReplaceTempView("monitor_flow_tmp6")
val sql7 = "select " + args(0) + " as taskId, concat_ws('|',collect_set(t1.monitor_id)) abNormalMonitorInfos,concat_ws('|',collect_set(concat_ws('|',array_except(t2.cameraIds,t1.cameraIds)))) abNormalCameraInfos from monitor_flow_tmp5 t1 join monitor_flow_tmp6 t2 on t1.monitor_id=t2.monitor_id"
val df7 = spark.sql(sql7)
val df8 = df4.join(df7, "taskId").drop(df7("taskId"))
df8.show()
// df8.write.format("jdbc")
// .option("driver", "com.mysql.jdbc.Driver")
// .option("url", "jdbc:mysql://hadoop-senior.test.com:3306/traffic1")
// .option("dbtable", "monitor_state")
// .option("user", "root")
// .option("password","123456")
// .mode(SaveMode.Append)
// .save()
spark.close()
}
}