sparksql转换和自定义函数操作

4 篇文章 0 订阅

sparksql中RDD、Dataframe、DateSet的创建于相互转换

package spark_sql

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

case class Emp(name: String, age: Long)

object spark_sql_json extends App {

  val ss: SparkSession = SparkSession.builder().master("local[*]").appName("sql").getOrCreate()

  import ss.implicits._

  val df: DataFrame = ss.read.json("datas/a.json").toDF()
  println("df")
  df.show()
  val ds: Dataset[Emp] = df.as[Emp]
  println("ds")
  ds.show()
  val df_rdd: RDD[Row] = df.rdd
  val rdd: RDD[Emp] = ds.rdd
  println("ds.rdd")
  rdd.collect().foreach(println)
  println("rdd.toDS")
  rdd.toDS()
  ss.stop()
}

自定义UDAF函数

弱类型:继承UserDefinedAggregateFunction

package sparkSql

import org.apache.spark.SparkConf
import org.apache.spark.sql.expressions.{MutableAggregationBuffer, UserDefinedAggregateFunction}
import org.apache.spark.sql.types.{DataType, DoubleType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

case class user(name: String, age: Long)

object sparkSqlUdf {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("sparkSql")

    val spark: SparkSession = SparkSession.builder().config(conf).getOrCreate()
    val df: DataFrame = spark.read.json("datas/sql")

    spark.udf.register("udaf", new myUDAF())

    df.createOrReplaceTempView("user")

    spark.sql("select name , udaf(age)+1 as newAge from user group by name").show()

    spark.stop()
  }
}

class myUDAF extends UserDefinedAggregateFunction {
  // 输入数据结构
  override def inputSchema: StructType = {
    StructType(Array(StructField("age", DoubleType))
    )
  }

  // 缓冲区数据结构:这里是求平均数;缓冲区的数据应该是 相加的数量 和 个数
  override def bufferSchema: StructType = {
    StructType(Array(StructField("total", DoubleType), StructField("count", DoubleType)))
  }

  // 计算结果数据类型
  override def dataType: DataType = DoubleType

  override def deterministic: Boolean = true

  // 缓冲区大数据初始化

  /**
   * 定义了两个缓冲区数据,都要进行初始化,设置为0
   * */
  override def initialize(buffer: MutableAggregationBuffer): Unit = {
    buffer.update(0, 0D)
    buffer.update(1, 0D)
  }

  // 缓冲区数据计算逻辑
  override def update(buffer: MutableAggregationBuffer, input: Row): Unit = {
    buffer.update(0, buffer.getDouble(0) + input.getDouble(0))
    buffer.update(1, buffer.getDouble(1) + 1)
  }

  override def merge(buffer1: MutableAggregationBuffer, buffer2: Row): Unit = {
    buffer1.update(0, buffer1.getDouble(0) + buffer2.getDouble(0))
    buffer1.update(1, buffer1.getDouble(1) + buffer2.getDouble(1))

  }


  override def evaluate(buffer: Row): Any = {
    buffer.getDouble(0) / buffer.getDouble(1)
  }
}

强类型:继承Aggregator(org.apache.spark.sql.expressions.Aggregator)

package sparkSql

import org.apache.spark.SparkConf
import org.apache.spark.sql.expressions.{Aggregator, MutableAggregationBuffer, UserDefinedAggregateFunction}
import org.apache.spark.sql.{DataFrame, Encoder, Encoders, Row, SparkSession, functions}

object sparkSqlUdf_new {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("sparkSql")
    val spark: SparkSession = SparkSession.builder().config(conf).getOrCreate()
    val df: DataFrame = spark.read.json("datas/sql")

    //    @deprecated("Aggregator[IN, BUF, OUT] should now be registered as a UDF" +
    //      " via the functions.udaf(agg) method.", "3.0.0")
    spark.udf.register("udaf", functions.udaf(new UDAF()))
    df.createOrReplaceTempView("user")
    spark.sql("select name , udaf(age)+1 as newAge from user group by name").show()
    spark.stop()
  }
}

// 使用样例类强类型来定义缓存类型
case class Buff(var sum: Long, var cnt: Long)

class UDAF extends Aggregator[Long, Buff, Long] {
  override def zero: Buff = {
    Buff(0L, 0L)
  }
  override def reduce(buff: Buff, a: Long): Buff = {
    buff.cnt = buff.cnt + 1L
    buff.sum = buff.sum + a
    buff
  }
  override def merge(buff1: Buff, buff2: Buff): Buff = {
    buff1.sum = buff1.sum + buff2.sum
    buff1.cnt = buff1.cnt + buff2.cnt
    buff1
  }
  override def finish(reduction: Buff): Long =
    reduction.sum / reduction.cnt
  // 编码操作
  override def bufferEncoder: Encoder[Buff] = Encoders.product
  override def outputEncoder: Encoder[Long] = Encoders.scalaLong
}
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值