Java 实现自定义UDAF函数,代码如下:
package com.**;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.expressions.MutableAggregationBuffer;
import org.apache.spark.sql.expressions.UserDefinedAggregateFunction;
import org.apache.spark.sql.types.DataType;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.StructType;
public class SelfConcatFunction extends UserDefinedAggregateFunction {
@Override
public StructType inputSchema() {
return new StructType().add("SELFCONCAT",DataTypes.StringType);
}
@Override
public StructType bufferSchema() {
return new StructType().add("concat_str",DataTypes.StringType);
}
@Override
public DataType dataType() {
return DataTypes.StringType;
}
@Override
public boolean deterministic() {
return true;
}
@Override
public void initialize(MutableAggregationBuffer mutableAggregationBuffer) {
mutableAggregationBuffer.update(0,null);
}
@Override
public void update(MutableAggregationBuffer mutableAggregationBuffer, Row row) {
if(mutableAggregationBuffer.getString(0)!=null){
if(row.getString(0)!=null){
String tmp = mutableAggregationBuffer.getString(0)+"|"+row.getString(0);
if(!mutableAggregationBuffer.getString(0).contains(row.getString(0))){
mutableAggregationBuffer.update(0,tmp);
}
}
}else{
if(row.getString(0)!=null){
mutableAggregationBuffer.update(0,row.getString(0));
}
}
}
@Override
public void merge(MutableAggregationBuffer mutableAggregationBuffer, Row row) {
if(mutableAggregationBuffer.getString(0)!=null){
if(row.getString(0)!=null){
String tmp=mutableAggregationBuffer.getString(0)+"|"+row.getString(0);
if(!mutableAggregationBuffer.getString(0).contains(row.getString(0))){
mutableAggregationBuffer.update(0,tmp);
}
}
}else{
if(row.getString(0)!=null){
mutableAggregationBuffer.update(0,row.getString(0));
}
}
}
@Override
public Object evaluate(Row row) {
if(row.getString(0)!=null){
return "|"+row.getString(0)+"|";
}
else{
return null;
}
}
}
SelfConcatFunction concatFunc= new SelfConcatFunction();//引用自定义函数 sparkSession.udf().register("CONCAT_BUCKET", concatFunc);//注册到sparkSession
在聚合SQL中使用该注册的函数: CONCAT_BUCKET
select CONCAT_BUCKET(AAA) as combine_aaa,bbb from tableName group by bbb;
后来发现,hive内置函数也可以实现相同功能,SQL如下:
select concat_ws('|',collect_set(AAA)) as combine_aaa,bbb from tableNamegroup by bbb;