Kudu与Spark集成API操作

导入Maven坐标

<!-- Kudu Client依赖 -->
<dependency>
    <groupId>org.apache.kudu</groupId>
    <artifactId>kudu-client</artifactId>
    <version>1.9.0</version>
</dependency>
<!-- kudu与spark集成依赖 -->
<dependency>
    <groupId>org.apache.kudu</groupId>
    <artifactId>kudu-spark2_2.11</artifactId>
    <version>1.9.0</version>
</dependency>
<!-- 依赖Scala语言 -->
<dependency>
    <groupId>org.scala-lang</groupId>
    <artifactId>scala-library</artifactId>
    <version>2.11.12</version>
</dependency>
<!-- Spark Core 依赖 -->
<dependency>
    <groupId>org.apache.spark</groupId>
    <artifactId>spark-core_2.11</artifactId>
    <version>2.4.5</version>
</dependency>
<!-- Spark SQL 依赖 -->
<dependency>
    <groupId>org.apache.spark</groupId>
    <artifactId>spark-sql_2.11</artifactId>
    <version>2.4.5</version>
</dependency>

使用RDD对表CD操作

import org.apache.kudu.client.CreateTableOptions
import org.apache.kudu.spark.kudu.KuduContext
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}

/**
 * @author: xu
 * @desc: Kudu与Spark集成,使用KuduContext创建表和删除表
 */
object TableRDDOperating {

  /**
   * 创建Kudu表,指定名称
   *
   * @param tableName   表的名称
   * @param kuduContext KuduContext实例对象
   */
  def createKuduTable(tableName: String, kuduContext: KuduContext): Unit = {
    // a. 表的Schema信息
    val schema: StructType = StructType(
      Seq(
        // 在创建表时,主键不能为null,而且主键字段放在最前面
        StructField("id", IntegerType, nullable = false),
        StructField("name", StringType, nullable = true),
        StructField("age", IntegerType, nullable = true),
        StructField("gender", StringType, nullable = true)
      )
    )
    // b. 表的主键
    val keys: Seq[String] = Seq("id")
    // c. 创建表的选项设置,分区、副本等
    val options: CreateTableOptions = new CreateTableOptions()
    // 导入隐式转换,将scala中列表转换为java中列表List
    import scala.collection.JavaConversions._
    options.addHashPartitions(keys, 3)
    // 设置副本数
    options.setNumReplicas(1)

    // d. 调用创建表方法
    val kuduTable = kuduContext.createTable(tableName, schema, keys, options)
    println(s"TableId: ${kuduTable.getTableId}")
  }


  /**
   * 删除Kudu中表
   *
   * @param tableName   表的名称
   * @param kuduContext KuduContext实例对象
   */
  def dropKuduTable(tableName: String, kuduContext: KuduContext): Unit = {
    // 判断表是否存在,如果存在,再删除
    if (kuduContext.tableExists(tableName)) {
      kuduContext.deleteTable(tableName)
    }
  }

  def main(args: Array[String]): Unit = {
    // 1. 构建SparkSession实例对象
    val spark: SparkSession = SparkSession.builder()
      .appName(this.getClass.getSimpleName.stripSuffix("$"))
      .master("local[2]")
      .config("spark.sql.shuffle.partitions", "2")
      .getOrCreate()

    // TODO: 创建KuduContext对象
    val kuduContext: KuduContext = new KuduContext("node2:7051", spark.sparkContext)
    println(s"KuduContext: ${kuduContext}")

    // 创建表
    createKuduTable("kudu_users", kuduContext)

    // 删除表
    dropKuduTable("kudu_users", kuduContext)

    // 应用结束,关闭资源
    spark.stop()
  }
}

使用RDD对数据CRUD操作

import org.apache.kudu.spark.kudu.KuduContext
import org.apache.spark.TaskContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

/**
 * @author: xu
 * @desc: Kudu与Spark集成,对Kudu表的数据,进行CRUD操作
 */
object DataRDDOperating {

  // 向Kudu表中插入数据
  def insertData(spark: SparkSession, kuduContext: KuduContext, tableName: String): Unit = {
    // a. 模拟产生数据
    // TODO: 当RDD或Seq中数据类型为元组时,直接调用toDF,指定列名称,转换为DataFrame
    val usersDF: DataFrame = spark.createDataFrame(
      Seq(
        (1001, "zhangsan", 23, "男"),
        (1002, "lisi", 22, "男"),
        (1003, "xiaohong", 24, "女"),
        (1004, "zhaoliu2", 33, "男")
      )
    ).toDF("id", "name", "age", "gender")

    // b. 将数据保存至Kudu表
    kuduContext.insertRows(usersDF, tableName)
  }

  // 从Kudu表中读取数据,封装到RDD数据集
  def selectData(spark: SparkSession, kuduContext: KuduContext, tableName: String): Unit = {
    /*
		  def kuduRDD(
		      sc: SparkContext,
		      tableName: String,
		      columnProjection: Seq[String] = Nil,
		      options: KuduReadOptions = KuduReadOptions()
		  ): RDD[Row]
		 */
    val kuduRDD: RDD[Row] = kuduContext.kuduRDD(
      spark.sparkContext,
      tableName,
      columnProjection = Seq("id", "name", "age", "gender") // 需要指定,默认Nil一列都查不出
    )

    kuduRDD.foreachPartition { iter =>
      println(s"partitionId = ${TaskContext.getPartitionId()}")
      iter.foreach { row => println(row.toString()) }
    }
  }

  // 将数据DataFrame新到到Kudu表中
  def updateData(spark: SparkSession, context: KuduContext, tableName: String): Unit = {
    // a. 模拟产生数据
    val usersDF: DataFrame = spark.createDataFrame(
      Seq((1001, "xqh", 21, "男"))
    ).toDF("id", "name", "age", "gender")
    // 数据存在做update;不存在报错
    context.updateRows(usersDF, tableName)
  }

  // 将数据DataFrame更新插入到到Kudu表中,存在为更新,不存在为插入
  def upsertData(spark: SparkSession, context: KuduContext, tableName: String): Unit = {
    // a. 模拟产生数据
    val usersDF: DataFrame = spark.createDataFrame(
      Seq(
        (1001, "zhangsanfeng", 24, "男"),
        (1005, "tianqi", 33, "男")
      )
    ).toDF("id", "name", "age", "gender")
    // upsert操作,存在为更新,不存在为插入
    context.upsertRows(usersDF, tableName)
  }

  // 删除指定主键数据
  def deleteData(spark: SparkSession, context: KuduContext, tableName: String): Unit = {
    // a. 模拟产生数据,根据主键删除
    import spark.implicits._
    val usersDF: DataFrame = spark.sparkContext.parallelize(Seq(1001)).toDF("id")
    context.deleteRows(usersDF, tableName)
  }

  def main(args: Array[String]): Unit = {
    // 1. 构建SparkSession实例对象
    val spark: SparkSession = SparkSession.builder()
      .appName(this.getClass.getSimpleName.stripSuffix("$"))
      .master("local[2]")
      .config("spark.sql.shuffle.partitions", "2")
      .getOrCreate()

    // TODO: 创建KuduContext对象
    val kuduContext: KuduContext = new KuduContext("node2:7051", spark.sparkContext)
    val tableName = "kudu_users"

    // 插入数据
    insertData(spark, kuduContext, tableName)

    // 查询数据
    selectData(spark, kuduContext, tableName)

    // 更新数据
    updateData(spark, kuduContext, tableName)

    // 插入更新数据
    upsertData(spark, kuduContext, tableName)

    // 删除数据
    deleteData(spark, kuduContext, tableName)

    // 应用结束,关闭资源
    spark.stop()
  }
}

使用SQL对Kudu操作

/**
 * @author: xu
 * @desc: 编写SparkSQL程序,从Kudu表加载load数据,进行转换,最终保存到Kudu表中
 */
object KuduSparkSQLOperation {
  def main(args: Array[String]): Unit = {
    // 构建SparkSession实例对象
    val spark: SparkSession = SparkSession.builder()
      .appName(this.getClass.getSimpleName.stripSuffix("$"))
      .master("local[2]")
      .config("spark.sql.shuffle.partitions", "2")
      .getOrCreate()
    import spark.implicits._

    // TODO: 从Kudu表加载数据
    val kuduDF: DataFrame = spark.read
      .format("kudu")
      .option("kudu.master", "node2:7051")
      .option("kudu.table", "kudu_users")
      .load()
    //kuduDF.printSchema()
    //kuduDF.show(10, truncate = false)

    /*
      +----+--------+---+------+
      |id  |name    |age|gender|
      +----+--------+---+------+
      |1001|zhangsan|23 |男    |  -> M
      |1002|lisi    |22 |男    |
      |1004|zhaoliu2|33 |男    |
      |1003|xiaohong|24 |女    |  -> F
      +----+--------+---+------+
     */
    // 自定义UDF函数,转换gender性别
    val gender_udf = udf(
      (gender: String) => {
        // if("男".equals(gender)) "M" else "F"
        gender match {
          case "男" => "M"
          case "女" => "F"
          case _ => "X"
        }
      }
    )
    // TODO:调用UDF函数,进行转换
    val etlDF: DataFrame = kuduDF.select(
      $"id", $"name", $"age",
      gender_udf($"gender").as("gender")
    )
    //etlDF.printSchema()
    //etlDF.show()

    // TODO: 保存数据到Kudu表
    etlDF
      .coalesce(1)
      .write
      .mode(SaveMode.Append) // 设置保存模式,此处为追加
      .format("kudu")
      .option("kudu.master", "node2:7051")
      .option("kudu.table", "kudu_users")
      .option("kudu.operation", "upsert") // 默认upsert
      .save()

    // 应用结束,关闭资源
    spark.stop()
  }
}
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值