测试hudi-0.7.0对接spark structure streaming
测试环境
Hudi version :0.7.0
Spark version :2.4.0
Hive version :2.1.1
Hadoop version :3.0.0
Storage (HDFS/S3/GCS..) :HDFS
Running on Docker? (yes/no) :no
测试内容
表类型:copy on write
插入方式:bulkinsert
控制文件大小:clustering配置
是否同步hive:同步hive
测试的参数配置如下:
//hudi配置
.option(TABLE_TYPE_OPT_KEY, "COPY_ON_WRITE")
.option("hoodie.datasource.write.operation","bulk_insert")
.option(PRECOMBINE_FIELD_OPT_KEY, "kafka_timestamp")// 以kafka分区和偏移量作为组合主键
.option(RECORDKEY_FIELD_OPT_KEY, "kafka_partition_offset")// 以当前日期作为分区
.option(PARTITIONPATH_FIELD_OPT_KEY, "partition_date")
//clustering配置
.option("hoodie.parquet.small.file.limit", "0")
.option("hoodie.clustering.inline", "true")
.option("hoodie.clustering.inline.max.commits", "4")
.option("hoodie.clustering.plan.strategy.target.file.max.bytes", "1073741824")
.option("hoodie.clustering.plan.strategy.small.file.limit", "629145600")
.option("hoodie.clustering.plan.strategy.sort.columns", "") //optional, if sorting is needed as part of rewriting data
//hive配置
.option("hoodie.table.name", "copy_on_write_table")
.option("hoodie.datasource.write.hive_style_partitioning", "true")
.option(DataSourceWriteOptions.HIVE_DATABASE_OPT_KEY, "hudi")
.option(DataSourceWriteOptions.HIVE_TABLE_OPT_KEY, "hudi_hive_sync")
.option(DataSourceWriteOptions.HIVE_SYNC_ENABLED_OPT_KEY, "true")
.option(DataSourceWriteOptions.HIVE_PARTITION_FIELDS_OPT_KEY,"")
.option(DataSourceWriteOptions.HIVE_URL_OPT_KEY, "jdbc:hive2://ip:port")
.option(DataSourceWriteOptions.HIVE_USER_OPT_KEY, "USERNAME")
.option(DataSourceWriteOptions.HIVE_PASS_OPT_KEY, "PASSWORD")
.option("HoodieWriteConfig.EMBEDDED_TIMELINE_SERVER_ENABLED", "false")
//清理配置
.option("hoodie.cleaner.policy","KEEP_LATEST_FILE_VERSIONS")
.option("hoodie.cleaner.fileversions.retained","1")
.option("hoodie.cleaner.automatic","true")
.option("hoodie.clean.async","true")
测试代码如下:
object SparkHudi {
val logger = Logger.getLogger(SparkHudi.getClass)
def main(args: Array[String]): Unit = {
val spark = SparkSession
.builder
.appName("SparkHudi")
//.master("local[*]")
.config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
.config("spark.default.parallelism", 9)
.config("spark.sql.shuffle.partitions", 9)
.enableHiveSupport()
.getOrCreate()
// 添加监听器,每一批次处理完成,将该批次的相关信息,如起始offset,抓取记录数量,处理时间打印到控制台
spark.streams.addListener(new StreamingQueryListener() {
override def onQueryStarted(queryStarted: QueryStartedEvent): Unit = {
println("Query started: " + queryStarted.id)
}
override def onQueryTerminated(queryTerminated: QueryTerminatedEvent): Unit = {
println("Query terminated: " + queryTerminated.id)
}
override def onQueryProgress(queryProgress: QueryProgressEvent): Unit = {
println("Query made progress: " + queryProgress.progress)
}
})
// 定义kafka流
val dataStreamReader = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", "kafka IP:9092")
.option("subscribe", "TopicName")
.option("startingOffsets", "latest")
.option("maxOffsetsPerTrigger", 100000)
// 加载流数据,这里因为只是测试使用,直接读取kafka消息而不做其他处理,是spark结构化流会自动生成每一套消息对应的kafka元数据,如消息所在主题,分区,消息对应offset等。
val df = dataStreamReader.load()
.selectExpr(
"topic as kafka_topic"
"CAST(partition AS STRING) kafka_partition",
"cast(timestamp as String) kafka_timestamp",
"CAST(offset AS STRING) kafka_offset",
"CAST(key AS STRING) kafka_key",
"CAST(value AS STRING) kafka_value",
"current_timestamp() current_time",
)
.selectExpr(
"kafka_topic"
"concat(kafka_partition,'-',kafka_offset) kafka_partition_offset",
"kafka_offset",
"kafka_timestamp",
"kafka_key",
"kafka_value",
"substr(current_time,1,10) partition_date")
// 创建并启动query
val query = df
.writeStream
.queryName("demo").
.foreachBatch { (batchDF: DataFrame, _: Long) => {
batchDF.persist()
println(LocalDateTime.now() + "start writing cow table")
batchDF.write.format("org.apache.hudi")
.option(TABLE_TYPE_OPT_KEY, "COPY_ON_WRITE")
.option("hoodie.datasource.write.operation","bulk_insert")
.option(PRECOMBINE_FIELD_OPT_KEY, "kafka_timestamp")// 以kafka分区和偏移量作为组合主键
.option(RECORDKEY_FIELD_OPT_KEY, "kafka_partition_offset")// 以当前日期作为分区
.option(PARTITIONPATH_FIELD_OPT_KEY, "partition_date")
//clustering配置
.option("hoodie.parquet.small.file.limit", "0")
.option("hoodie.clustering.inline", "true")
.option("hoodie.clustering.inline.max.commits", "4")
.option("hoodie.clustering.plan.strategy.target.file.max.bytes", "1073741824")
.option("hoodie.clustering.plan.strategy.small.file.limit", "629145600")
.option("hoodie.clustering.plan.strategy.sort.columns", "") //optional, if sorting is needed as part of rewriting data
//hive配置
.option("hoodie.table.name", "copy_on_write_table")
.option("hoodie.datasource.write.hive_style_partitioning", "true")
.option(DataSourceWriteOptions.HIVE_DATABASE_OPT_KEY, "hudi")
.option(DataSourceWriteOptions.HIVE_TABLE_OPT_KEY, "hudi_hive_sync")
.option(DataSourceWriteOptions.HIVE_SYNC_ENABLED_OPT_KEY, "true")
.option(DataSourceWriteOptions.HIVE_PARTITION_FIELDS_OPT_KEY,"")
.option(DataSourceWriteOptions.HIVE_URL_OPT_KEY, "jdbc:hive2://ip:port")
.option(DataSourceWriteOptions.HIVE_USER_OPT_KEY, "USERNAME")
.option(DataSourceWriteOptions.HIVE_PASS_OPT_KEY, "PASSWORD")
.option("HoodieWriteConfig.EMBEDDED_TIMELINE_SERVER_ENABLED", "false")
//清理配置
.option("hoodie.cleaner.policy","KEEP_LATEST_FILE_VERSIONS")
.option("hoodie.cleaner.fileversions.retained","1")
.option("hoodie.cleaner.automatic","true")
.option("hoodie.clean.async","true")
.mode(SaveMode.Append)
.save("/hudi/sparkHudi/hudi表")
println(LocalDateTime.now() + "finish")
batchDF.unpersist()
}
}
.option("checkpointLocation", "/tmp/sparkHudi/checkpoint/")
.start()
query.awaitTermination()
}
}
测试结果
将之前的旧版本hudi-0.6.0替换成hudi-0.7.0,经测试没有报错,符合预期。新版本增加了clustering配置,小文件会合并成大文件,减少了查询引擎需要扫描的文件数,因而提高了查询效率。不过合并过程会降低写入速度。