spark自定义UDAF函数

import org.apache.spark.sql.Row
import org.apache.spark.sql.expressions.{MutableAggregationBuffer, UserDefinedAggregateFunction}
import org.apache.spark.sql.types.{DataType, StringType, StructField, StructType}

class GroupConcatDistinct extends UserDefinedAggregateFunction{
  //UDAF:输入数据类型为String
  override def inputSchema: StructType = StructType(List(StructField("cityInfo",StringType,true)))

  //缓冲区类型
  override def bufferSchema: StructType = StructType(List(StructField("buffCityInfo",StringType,true)))

  //输出数据类型
  override def dataType: DataType = StringType

  override def deterministic: Boolean = true

  override def initialize(buffer: MutableAggregationBuffer): Unit = {
    buffer(0) = ""
  }

  override def update(buffer: MutableAggregationBuffer, input: Row): Unit = {
    var buffCityInfo = buffer.getString(0)
    val cityInfo = input.getString(0)

    if(!buffCityInfo.contains(cityInfo)){
      if("".equals(buffCityInfo)){
        buffCityInfo += cityInfo
      }else{
        buffCityInfo += "," + cityInfo
      }
      buffer.update(0,buffCityInfo)
    }
  }

  override def merge(buffer1: MutableAggregationBuffer, buffer2: Row): Unit = {

    var buffCityInfo1 = buffer1.getString(0)
    val buffCityInfo2 = buffer2.getString(0)

    for(cityInfo <- buffCityInfo2.split(",")){
          if(!buffCityInfo1.contains(cityInfo)){
            if("".equals(buffCityInfo1)){
              buffCityInfo1 += cityInfo
            }else{
              buffCityInfo1 += "," + cityInfo
            }
          }
    }

    buffer1.update(0,buffCityInfo1)

  }

  override def evaluate(buffer: Row): Any = {
    buffer.getString(0)
  }
}

 

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值