2020.11.17课堂笔记(spark中使用UDF UDAF UDTF函数)

spark中使用UDF函数

package nj.zb.kb09.sql

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}

case class Hobbies(name:String,hobbies:String)
object SparkUDFDemo {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession.builder()
      .master("local[*]")
      .appName("SparkUDFDemo")
      .getOrCreate()
    import spark.implicits._
    val sc: SparkContext = spark.sparkContext
    val rdd: RDD[String] = sc.textFile("in/hobbies.txt")
    val df: DataFrame = rdd
      .map(x=>x.split(" "))
      .map(x=>Hobbies(x(0),x(1)))
      .toDF()

    df.show()
    df.registerTempTable("hobbies")
    spark.udf.register(("hobby_num"),(v:String)=>v.split(",").size)
    val frame: DataFrame = spark.sql("select name,hobbies,hobby_num(hobbies) as hobnum from hobbies")
    frame.show()

  }
}

spark中使用UDAF函数

package nj.zb.kb09.sql

import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import org.apache.spark.sql.expressions.{MutableAggregationBuffer, UserDefinedAggregateFunction}
import org.apache.spark.sql.types._

//自定义UDAF函数及使用
object SparkUDAFDemo {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .master("local[*]")
      .appName("udaf")
      .getOrCreate()
    val df: DataFrame = spark.read.json("in/user.json")
    //df.printSchema()
    //df.show()

    //创建并注册自定义udaf函数
    val myUdaf=new MyAgeAvgFunction
    spark.udf.register("myAvgAge",myUdaf)

    df.createTempView("userinfo")
    val resultDF: DataFrame = spark.sql("select myAvgAge(age),sex as avg  from userinfo group by sex")
    resultDF.printSchema()
    resultDF.show()


  }
}

class MyAgeAvgFunction extends UserDefinedAggregateFunction{
  //聚合函数的输入数据结构
  override def inputSchema: StructType = {
    new StructType().add("age",LongType)
    //StructType(StructField("age",LongType)::Nil)
  }
  //缓存区数据结构
  override def bufferSchema: StructType = {
    new StructType().add("sum",LongType).add("count",LongType)
    //StructType(StructField("sum",LongType)::StructField("count",LongType)::Nil)
  }
  //聚合函数返回值数据结构
  override def dataType: DataType = DoubleType
  //聚合函数是否是幂等的,即相同的输入是否总能得到相同的输出
  override def deterministic: Boolean = true

  override def initialize(buffer: MutableAggregationBuffer): Unit = {
    buffer(0)=0L
    buffer(1)=0L
  }
  //给聚合函数传入一条新数据进行处理
  override def update(buffer: MutableAggregationBuffer, input: Row): Unit = {
    buffer(0)=buffer.getLong(0)+input.getLong(0)
    buffer(1)=buffer.getLong(1)+1
  }
  //合并聚合函数缓冲区
  override def merge(buffer1: MutableAggregationBuffer, buffer2: Row): Unit = {
    //总年龄数
    buffer1(0)=buffer1.getLong(0)+buffer2.getLong(0)
    //总个数
    buffer1(1)=buffer1.getLong(1)+buffer2.getLong(1)
  }
  //计算最终结果
  override def evaluate(buffer: Row): Any = {
    buffer.getLong(0).toDouble/buffer.getLong(1)
  }
}

spark中使用UDTF函数

package nj.zb.kb09.sql

import java.util

import org.apache.hadoop.hive.ql.exec.UDFArgumentException
import org.apache.hadoop.hive.ql.udf.generic.GenericUDTF
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category
import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory
import org.apache.hadoop.hive.serde2.objectinspector.{ObjectInspector, ObjectInspectorFactory, StructObjectInspector}
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}



object SparkUDTFDemo {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession.builder()
      .master("local[*]")
      .appName("udtfdemo")
      //.config("hive.metastore.uris","thrift://192.168.237.100:9083")
      .enableHiveSupport()
      .getOrCreate()

    val sc: SparkContext = spark.sparkContext
    val lines: RDD[String] = sc.textFile("in/udtf.txt")
    //lines.collect().foreach(println)
    import spark.implicits._

    val stuDF: DataFrame = lines.map(_.split("//")).filter(x=>x(1).equals("ls")).map(x=>(x(0),x(1),x(2))).toDF("id","name","class")
    stuDF.printSchema()
    stuDF.show()

    stuDF.createOrReplaceTempView("student")

    spark.sql("CREATE TEMPORARY FUNCTION myUDTF AS 'nj.zb.kb09.sql.myUDTF' ")

    val resultDF: DataFrame = spark.sql("select myUDTF(class) from student")
    resultDF.printSchema()
    resultDF.show()

  }
}

class myUDTF extends GenericUDTF{


  override def initialize(argOIs: Array[ObjectInspector]): StructObjectInspector = {
    if(argOIs.length!=1){
      throw new UDFArgumentException("有且只能有一个参数传入")
    }
    if(argOIs(0).getCategory!=Category.PRIMITIVE){
      throw new UDFArgumentException("参数的类型不匹配")
    }
    val fieldNames=new util.ArrayList[String]
    val fieldOis=new util.ArrayList[ObjectInspector]

    fieldNames.add("type")
    //这里定义的是列字段的类型
    fieldOis.add(PrimitiveObjectInspectorFactory.javaStringObjectInspector)

    ObjectInspectorFactory.getStandardStructObjectInspector(fieldNames,fieldOis)
  }

  //传入 hadoop scala kafka hive hbase Oozie
  override def process(objects: Array[AnyRef]): Unit = {
    //将字符串切分成单个字符的数组
    val strings: Array[String] = objects(0).toString.split(" ")
    for (str <- strings) {
      val tmp = new Array[String](1)
      tmp(0)=str
      forward(tmp)
    }
  }

  override def close(): Unit = {}
}
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值