Spark_UDTF

import java.util

import org.apache.hadoop.hive.ql.udf.generic.GenericUDTF
import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory
import org.apache.hadoop.hive.serde2.objectinspector.{ObjectInspector, ObjectInspectorFactory, StructObjectInspector}
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}

/**
  * 自定义函数:
  * UDF:User- Defined Funcation;用户定义(普通)函数,只对单行数值产生作用; 一进一出
  * UDAF:User- Defined Aggregation Funcation;用户定义聚合函数,对多行数据产生作用(sum()、avg()...),多进一出
  * UDTF:User- Defined Table-Generating Functions;用户定义表生成函数,输入一行 输出多行,一进多出
  *
  */
//一进多出
object UDTF {
  def main(args: Array[String]): Unit = {
    val sparkSession: SparkSession = SparkSession.builder().appName("UDTF").master("local[*]")
      .config("hive.metastore.uris", "thrift://192.168.153.101:9083")
      .enableHiveSupport()
      .getOrCreate()

    val sc: SparkContext = sparkSession.sparkContext

    import sparkSession.implicits._

    val rdd: RDD[String] = sc.textFile("in/UDTF.txt")
    val rdd2: RDD[(String, String, String)] = rdd
      .map(x => x.split("//"))
      .filter(x => x(1).equals("ls"))
      .map(x => (x(0),x(1),x(2)))

    val frame: DataFrame = rdd2.toDF("id","name","class")
    frame.printSchema()
    frame.show(false)

    frame.createOrReplaceTempView("udtfTable")

    sparkSession.sql("create temporary function Myudtf as 'kb15.function.MyUDTF'")
    sparkSession.sql("select Myudtf(class) from udtfTable").show(false)
  }
}
class MyUDTF extends GenericUDTF{

  override def initialize(argOIs: Array[ObjectInspector]): StructObjectInspector = {
    val fieldName: util.ArrayList[String] = new java.util.ArrayList[String]()
    val fieldOIs: util.ArrayList[ObjectInspector] = new java.util.ArrayList[ObjectInspector]()

    //定义输出字段类型
    fieldName.add("type")
    fieldOIs.add(PrimitiveObjectInspectorFactory.javaStringObjectInspector)
    ObjectInspectorFactory.getStandardStructObjectInspector(fieldName,fieldOIs)
  }

  /*
  * 传入 Haddop scala kafka hive hbase Oozie
  * 输出 head     type    string
  *              Hadoop
  *              scala
  *              kafka
  *              hive
  *              hbase
  *              Oozie
  * */

  override def process(objects: Array[AnyRef]): Unit = {
    val strings: Array[String] = objects(0).toString.split(" ")
    for (str<- strings){
      val temp: Array[String] = new Array[String](1)
      temp(0) = str
      forward(temp)
    }
  }

  override def close(): Unit = {

  }
}
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值