1,UDF,输入一行返回一行(自定义拼接字符串案例):
object MyUDF {
def main(args: Array[String]): Unit = {
val spark: SparkSession = SparkSession.builder()
.appName(this.getClass.getSimpleName)
.master("local[*]")
.getOrCreate()
import spark.implicits._
val ds: Dataset[(String, String, String)] = spark.createDataset(List(("山东省", "烟台市", "莱山区"), ("辽宁省", "沈阳市", "皇姑区"), ("北京市", "北京市", "昌平区")))
val df = ds.toDF("province", "city", "district")
val func = (split: String, p1: String, p2: String, p3: String) => {
p1 + split + p2 + split + p3
}
spark.udf.register("MY_CONCAT_WS",func)
df.createTempView("v_addr")
//SQL风格
spark.sql("SELECT MY_CONCAT_WS('-', province, city, district) FROM v_addr").show()
//DSL风格
import org.apache.spark.sql.functions._
df.select(
expr("MY_CONCAT_WS('-', province, city, district)").as("location"))
.show()
}
}
输出:
2,UDAF(输入多行返回一行)2.x版
自定义求平均数案例:
定义函数:
import org.apache.spark.sql.Row
import org.apache.spark.sql.expressions.{MutableAggregationBuffer, UserDefinedAggregateFunction}
import org.apache.spark.sql.types.{DataType, DoubleType, IntegerType, StructField, StructType}
class MyAvgFunction extends UserDefinedAggregateFunction {
//输入数据的类型
override def inputSchema: StructType = StructType(List(
StructField("in", DoubleType)
))
//中间要缓存的数据类型
override def bufferSchema: StructType = StructType(List(
StructField("total", DoubleType),
StructField("amount", IntegerType)
))
//返回的数据类型
override def dataType: DataType = DoubleType
//输入的类型和返回的类型是否一样
override def deterministic: Boolean = true
//初始值
override def initialize(buffer: MutableAggregationBuffer): Unit = {
buffer(0) = 0.0 //薪水的初始值
buffer(1) = 0 //员工的数量初始值
}
//在每一个分区内局部聚合的方法,每一个组,每处理一条数据调用一次该方法
override def update(buffer: MutableAggregationBuffer, input: Row): Unit = {
buffer(0) = buffer.getDouble(0) + input.getDouble(0) //薪水
buffer(1) = buffer.getInt(1) + 1 //人数加一
}
//全局聚合是调用的函数
override def merge(buffer1: MutableAggregationBuffer, buffer2: Row): Unit = {
buffer1(0) = buffer1.getDouble(0) + buffer2.getDouble(0)
buffer1(1) = buffer1.getInt(1) + buffer2.getInt(1)
}
//计算最终的结果的方法
override def evaluate(buffer: Row): Any = {
buffer.getDouble(0) / buffer.getInt(1)
}
}
调用:
spark.sql(
"""
|SELECT dept, my_avg(salary) avg_salary FROM v_emp GROUP BY dept
|""".stripMargin).show()
3,UDAF(输入多行返回一行)3.x版
import org.apache.spark.sql.catalyst.expressions.Encode
import org.apache.spark.sql.{Encoder, Encoders, SparkSession}
import org.apache.spark.sql.expressions.Aggregator
object UDFDemo5 {
def main(args: Array[String]): Unit = {
val spark = SparkSession.builder()
.appName(this.getClass.getSimpleName)
.master("local[*]")
.getOrCreate()
val df = spark
.read
.option("header", "true")
.option("inferSchema", "true")
.csv("data/emp.csv")
df.createTempView("v_emp")
import org.apache.spark.sql.functions._
val avgAgg = new Aggregator[Double, (Double, Int), Double] {
//初始值
override def zero: (Double, Int) = (0.0, 0)
//每个分组区局部聚合的方法,
override def reduce(b: (Double, Int), a: Double): (Double, Int) = {
(b._1 + a, b._2 + 1)
}
//全局聚合调用的方法
override def merge(b1: (Double, Int), b2: (Double, Int)): (Double, Int) = {
(b1._1 + b2._1, b1._2 + b2._2)
}
//计算最终的结果
override def finish(reduction: (Double, Int)): Double = {
reduction._1 / reduction._2
}
//中间结果的encoder
override def bufferEncoder: Encoder[(Double, Int)] = {
Encoders.tuple(Encoders.scalaDouble, Encoders.scalaInt);
}
//返回结果的encoder
override def outputEncoder: Encoder[Double] = {
Encoders.scalaDouble
}
}
spark.udf.register("my_avg", udaf(avgAgg))
spark.sql(
"""
|SELECT dept, my_avg(salary) avg_salary FROM v_emp GROUP BY dept
|""".stripMargin).show()
spark.stop()
}
}