import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.Row
import org.apache.spark.sql.expressions.{MutableAggregationBuffer, UserDefinedAggregateFunction}
import org.apache.spark.sql.types._
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types.TimestampType
import java.sql.Timestamp
import java.sql.{Date=>sqlDate}
import java.text.SimpleDateFormat
class AggSumUdf(var low:Double,var up:Double) extends UserDefinedAggregateFunction {
// 聚合函数的输入数据结构
override def inputSchema: StructType = StructType(StructField("input", LongType) :: Nil)
// 缓存区数据结构
override def bufferSchema: StructType = StructType(StructField("count", LongType) :: Nil)
// 聚合函数返回值数据结构
override def dataType: DataType = LongType
// 聚合函数是否是幂等的,即相同输入是否总是能得到相同输出
override def deterministic: Boolean = true
// 初始化缓冲区
override def initialize(buffer: MutableAggregationBuffer): Unit = {
buffer(0) = 0L
}
// 给聚合函数传入一条新数据进行处理
override def update(buffer: MutableAggregationBuffer, input: Row): Unit = {
var addcount=0L
val booleanExp=(input.get(0)!=null && (input.get(0).toString().toDouble<low
|| input.get(0).toString().toDouble>up))
if ( booleanExp) {
addcount=1L
}
buffer(0) = buffer.getLong(0) + addcount
}
// 合并聚合函数缓冲区
override def merge(buffer1: MutableAggregationBuffer, buffer2: Row): Unit = {
buffer1(0) = buffer1.getLong(0) + buffer2.getLong(0)
}
// 计算最终结果
override def evaluate(buffer: Row): Any = buffer.getLong(0)
}
val data=getInPortDataset("in")
val sparkSession=data.sparkSession
//注册自定义udfa
sparkSession.udf.register("AggSumUdf", new AggSumUdf(0.0,0.0))
val xiaxian = Array(
0, 0, -50, -50, -50, -50, -50, -50, -50, -50, -50, -50, -50, -50, -50, -1.5, -1.5, -1.5, -1.5, -1.5, -100, -100,
-100, 0, -5, -5, -5, -10, -
10, -10, -10, -10, -10, -50, -50, -50, -50, -50, -50, -50, -50, -50, -50, -50, -50, -50, -50, -50, -50, -50, -50,
-50, 0, 0, 0, -50, -50, 0, 0, 0)
val shangxian = Array(
2500, 50, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 1.5, 1.5, 1.5, 1.5, 1.5, 3000, 3000,
3000, 180, 95, 95, 95, 200,
200, 200, 200, 200, 200, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 150, 150, 150, 150, 150, 150, 150, 150, 150, 180,
180, 180, 150, 150, 180, 180, 180)
val colNames=data.columns.toBuffer.--(Array("时间","场站","机组"))
val boundMap=colNames.zip(xiaxian.zip(shangxian)).toMap
//处理时间
val sf=new SimpleDateFormat("yyyy-MM-dd")
val timeFunc=udf{(time:Timestamp)=>{
time.toString().substring(0,10)
}
}
data=data.withColumn("timeStr", timeFunc(col("时间")))
val groupData=data.groupBy("timeStr","场站", "机组")
val aggMap=colNames.flatMap { x => {
var (low,up)=boundMap(x)
Array(min(x).alias(x+"_min"),max(x).alias(x+"_max"),new AggSumUdf(low,up)(col(x)).alias(x+"_sum"))
}}
val resultAggDataFrame= groupData.agg(aggMap.head,aggMap.tail:_*)
resultAggDataFrame.printSchema()
resultAggDataFrame.show()
val rddRow=resultAggDataFrame.rdd.map { x =>{
val rowValue=x //constantValue
val colAggValue=colNames.map { name =>{
val colMin=rowValue.getAs[Any](name+"_min")
val colMax=rowValue.getAs[Any](name+"_max")
val colaggSum=rowValue.getAs[Long](name+"_sum")
val isAllNull= !(colMin!=null || colMax!=null)
val isConstant=(colMin!=null && colMax!=null && colMin==colMax)
(name,colMin,colMax,colaggSum,isAllNull,isConstant)
}}
//空值
val isAllNullColres=colAggValue.filter(_._5)
val isAllNullCols=isAllNullColres.map(_._1).mkString(",")
//恒定值
val isConstantres=colAggValue.filter(_._6)
val isConstantCols=isConstantres.map(_._1).mkString(",")
val isConstantValues=isConstantres.map(_._2).mkString(",")
//超限值
val outBoundValues=colAggValue.map(_._4).mkString(",")
val outBoundValuesres=colAggValue.filter(_._4>0)
val outBoundCols=outBoundValuesres.map(_._1).mkString(",")
val outBoundValuesNums=outBoundValuesres.length
Row.fromSeq(Seq( new sqlDate(sf.parse(x.getAs[String]("timeStr")).getTime),x.get(1).toString(),x.get(2).toString(),colNames.mkString(",")
,isAllNullColres.length,isConstantres.length,outBoundValuesNums,isAllNullCols,isConstantCols,
outBoundCols,isConstantValues,outBoundValues))
}
}
var schemaTyped = new StructType()
.add("时间", DateType, true)
.add("场站", "string", true)
.add("机组", "string", true)
.add("所有字段", "string", true)
.add("空值字段数量", "int", true)
.add("恒定值字段数量", "int", true)
.add("超限值字段数量", "int", true)
.add("空值字段", "string", true)
.add("恒定值字段", "string", true)
.add("超限值字段", "string", true)
.add("恒定值统计", "string", true)
.add("超限值统计", "string", true)
val outPutDf=sparkSession.createDataFrame(rddRow, schemaTyped)