package report
import config.ConfigHelper
import org.apache.spark.sql.{Dataset, SparkSession}
import scalikejdbc.{DB, SQL}
import scalikejdbc.config.DBs
import utils.MakeATPKpi
//利用mysql存储traintime进行报表分析
object TrainTimeMysqlAnalysis {
def main(args: Array[String]): Unit = {
//session
val session=SparkSession
.builder()
//设置是否是在本地运行
//*代表几个线程
.master("local[*]")
//设置进程的名称
.appName(this.getClass.getName)
//序列化
.config("spark.serializer",ConfigHelper.serializer)
.getOrCreate()
//导入隐式转换
import session.implicits._
//读取数据
//返回值是DataFrame
val frame = session.read.parquet(args(0))
//处理数据
val rddResult = frame.mapPartitions(partition => {
//利用scalikejdbc操作
DBs.setup()
val tuples: Iterator[(String, List[Int])] = partition.map(row => {
//readOnly:只读
val listTrainTime: List[String] = DB.readOnly { implicit session =>
//获取trainId
val trainId = row.getAs[String]("MPacketHead_TrainID")
//获取trainTime
val listTrainTime: List[String] = SQL("select * from traintime2mysql where trainId=?")
.bind(trainId)
//取出line的trainTime属性的值
.map(line => line.string("trainTime"))
.list().apply()
listTrainTime
}
listTrainTime
//获取出厂时间
val trainTime:String=listTrainTime(0)
(trainTime,MakeATPKpi.makeKpi(row))
})
tuples
}).rdd.reduceByKey{
(list1, list2) => list1 zip list2 map(tp => tp._1 + tp._2)
}.foreach(println(_))
//释放资源
session.stop()
}
}
样例类
MakeATPKpi.scala:
package utils
import org.apache.commons.lang.StringUtils
import org.apache.spark.sql.Row
object MakeATPKpi {
def makeKpi(row:Row)={
//获取atperror
val atpError = row.getAs[String]("MATPBaseInfo_AtpError")
//判断指标
//如果atpError不为空,前面为1,如果atpError为空,那么前面为0
//先判断atpError不为空,如果为空,则执行下面的else语句,全部输出为0
val listAtpError: List[Int] = if (StringUtils.isNotEmpty(atpError)) {
val listError: List[Int] =
if (atpError.equals("车载主机")) {
List[Int](1, 0, 0, 0, 0, 0, 0, 0)
} else if (atpError.equals("无线传输单元")) {
List[Int](0, 1, 0, 0, 0, 0, 0, 0)
} else if (atpError.equals("应答器信息接收单元")) {
List[Int](0, 0, 1, 0, 0, 0, 0, 0)
} else if (atpError.equals("轨道电路信息读取器")) {
List[Int](0, 0, 0, 1, 0, 0, 0, 0)
} else if (atpError.equals("测速测距单元")) {
List[Int](0, 0, 0, 0, 1, 0, 0, 0)
} else if (atpError.equals("人机交互接口单元")) {
List[Int](0, 0, 0, 0, 0, 1, 0, 0)
} else if (atpError.equals("列车接口单元")) {
List[Int](0, 0, 0, 0, 0, 0, 1, 0)
} else if (atpError.equals("司法记录单元")) {
List[Int](0, 0, 0, 0, 0, 0, 0, 1)
} else {
//这是为了防止获取的atpError的值都不符合上述判断条件的情况
List[Int](0, 0, 0, 0, 0, 0, 0, 0)
}
//两个list的拼接要用++
List[Int](1) ++ listError
} else {
//如果atpError为空的情况,说明这行的这个属性没有值,那么全取为0即可
List[Int](0, 0, 0, 0, 0, 0, 0, 0, 0)
}
//创建一个容器用来存放标签
//两个list的拼接要用++
//这个是为了在整合以后统计总共多少条数据用,相当于数量
val list: List[Int] = List[Int](1) ++ listAtpError
list
}
}
相关配置文件
application.conf
#配置文件
#配置压缩格式
parquet.code="snappy"
#配置序列化方式
spark.serializer="org.apache.spark.serializer.KryoSerializer"
#配置jdbc链接
jdbc.url="jdbc:mysql://localhost:3306/test?characterEncoding=UTF-8"
jdbc.driver="com.mysql.jdbc.Driver"
jdbc.user="root"
jdbc.password="000000"
#配置scalikejdbc链接
db.default.url="jdbc:mysql://localhost:3306/test?characterEncoding=UTF-8"
db.default.driver="com.mysql.jdbc.Driver"
db.default.user="root"
db.default.password="000000"
ConfigHelper.scala
package config
import com.typesafe.config.{Config, ConfigFactory}
object ConfigHelper {
//加载配置文件
private lazy val load: Config = ConfigFactory.load()
//加载压缩格式
val parquetCode: String = load.getString("parquet.code")
//加载序列化方式
val serializer: String = load.getString("spark.serializer")
//加载jdbc
val url: String = load.getString("jdbc.url")
val driver: String = load.getString("jdbc.driver")
val user: String = load.getString("jdbc.user")
val password: String = load.getString("jdbc.password")
//加载scalikejdbc
val url2: String = load.getString("db.default.url")
val driver2: String = load.getString("db.default.driver")
val user2: String = load.getString("db.default.user")
val password2: String = load.getString("db.default.password")
}