UDF
一对一
统计兴趣爱好的个数
package function
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{DataFrame, SparkSession}
case class Hobbies(name:String,hobbies:String)
object UDFDemo {
def main(args: Array[String]): Unit = {
val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("demo")
val spark: SparkSession = SparkSession.builder().config(conf).getOrCreate()
val sc: SparkContext = spark.sparkContext
import spark.implicits._
val rdd: RDD[String] = sc.textFile("in/hobbies.txt")
val frame: DataFrame = rdd.map(_.split(" ")).map(x=>Hobbies(x(0),x(1))).toDF()
frame.createOrReplaceTempView("hobby")
spark.udf.register("hobby_num",(x:String)=>x.split(",").size)
spark.sql("select name,hobbies,hobby_num(hobbies) as hobbynum from hobby").show(false)
}
}
UDAF
多对一
package function
import org.apache.calcite.avatica.ColumnMetaData
import org.apache.spark.sql.expressions.{MutableAggregationBuffer, UserDefinedAggregateFunction}
import org.apache.spark.sql.types._
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
case class Student(id:Integer,name:String,gender:String,age:Integer)
object UDAFDemo {
def main(args: Array[String]): Unit = {
val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("demo")
val spark: SparkSession = SparkSession.builder().config(conf).getOrCreate()
val sc: SparkContext = spark.sparkContext
import spark.implicits._
val students = Seq(
Student(1, "a", "F", 10),
Student(2, "b", "F", 20),
Student(3, "c", "M", 19),
Student(4, "d", "F", 22),
Student(5, "e", "F", 35),
Student(6, "f", "M", 18),
Student(7, "g", "F", 23),
Student(8, "h", "F", 22),
Student(9, "i", "M", 15)
)
import spark.implicits._
val df: DataFrame = students.toDF()
val myUDAF = new MyAgeAvgFunction
spark.udf.register("myAvg",myUDAF)
df.createOrReplaceTempView("students")
val resultDF: DataFrame = spark.sql("select gender,myAvg(age) as avgage from students group by gender")
resultDF.printSchema()
resultDF.show()
}
}
class MyAgeAvgFunction extends UserDefinedAggregateFunction{
//聚合函数的输入输入数据结构
override def inputSchema: StructType = {
new StructType().add("age",LongType)
// StructType(StructField("age",LongType)::Nil)
}
//缓存区内的数据结构
override def bufferSchema: StructType = {
new StructType().add("sum",LongType).add("count",LongType)
// StructType(StructField("num",LongType)::StructField("count",LongType)::Nil)
}
//聚合函数返回值的数据结构
override def dataType: DataType = DoubleType
//聚合函数 相同的输入是否总是要得到相同的输出 聚合函数是否幂等
override def deterministic: Boolean = true
//数据初始化
override def initialize(buffer: MutableAggregationBuffer): Unit = {
buffer(0)=0L//记录传入所有用户年龄总和 76【12,23,41】
buffer(1)=0L//记录传入的用户的个数 3
}
//传入一条新的数据后需要进行的处理
override def update(buffer: MutableAggregationBuffer, input: Row): Unit = {
buffer(0)= buffer.getLong(0)+input.getLong(0)
buffer(1)=buffer.getLong(1)+1
}
//合并各分区内数据
override def merge(buffer1: MutableAggregationBuffer, buffer2: Row): Unit = {
//总年龄之和
buffer1(0)= buffer1.getLong(0)+buffer2.getLong(0)
//总人数
buffer1(1)=buffer1.getLong(1)+buffer2.getLong(1)
}
//计算最终结果
override def evaluate(buffer: Row): Any = {
buffer.getLong(0).toDouble/buffer.getLong(1)
}
}
UDTF
一对多
package function
import java.util
import org.apache.hadoop.hive.ql.udf.generic.GenericUDTF
import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory
import org.apache.hadoop.hive.serde2.objectinspector.{ObjectInspector, ObjectInspectorFactory, PrimitiveObjectInspector, StructObjectInspector}
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{DataFrame, SparkSession}
object UDTFDemo {
def main(args: Array[String]): Unit = {
val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("udtfdemo")
val spark: SparkSession = SparkSession.builder().config(conf)
.config("hive.metastore.uris", "thrift://192.168.232.211:9083")
.enableHiveSupport()
.getOrCreate()
val sc: SparkContext = spark.sparkContext
import spark.implicits._
val rdd: RDD[String] = sc.textFile("in/udtf.txt")
val frame: DataFrame = rdd.map(x => x.split("//")).filter(x => x(1).equals("ls"))
.map(x => (x(0), x(1), x(2))).toDF("id", "name", "class")
frame.createOrReplaceTempView("udtftable")
// spark.udf.register("myudtf", new MyUDTF())
spark.sql("create temporary function myudtf as 'function.MyUDTF'")
spark.sql("select myudtf(class) from udtftable").show()
}
}
class MyUDTF extends GenericUDTF{
override def initialize(argOIs: Array[ObjectInspector]): StructObjectInspector = {
val fieldName = new util.ArrayList[String]()
val fieldOIS = new util.ArrayList[ObjectInspector]()
//定义输出字段的类型
fieldName.add("type")
fieldOIS.add(PrimitiveObjectInspectorFactory.javaStringObjectInspector)
ObjectInspectorFactory.getStandardStructObjectInspector(fieldName,fieldOIS)
}
// 传入 Hadoop scala spark hive hbase
/*
输出 head type string
Hadoop
scala
spark
hive
hbase
*/
override def process(objects: Array[AnyRef]): Unit = {
//将字符串切割成单个的字符数组
val strings: Array[String] = objects(0).toString.split(" ")
for(str <- strings){
val temp = new Array[String](1)
temp(0) = str
forward(temp)
}
}
override def close(): Unit = {}
}