1、hadoop-3.2.2、spark-3.2.1-bin-hadoop3.2和apache-hive-3.1.2已经安装好,并且hivemetestore和thrift服务已经启动
2、执行spark shell登录
spark-shell \
--packages org.apache.hudi:hudi-spark3.2-bundle_2.12:0.12.0 \
--conf 'spark.serializer=org.apache.spark.serializer.KryoSerializer' \
--conf 'spark.sql.catalog.spark_catalog=org.apache.spark.sql.hudi.catalog.HoodieCatalog' \
--conf 'spark.sql.extensions=org.apache.spark.sql.hudi.HoodieSparkSessionExtension'
3、执行从hdfs读取csv文件转换为hudi表
import scala.collection.JavaConversions._
import org.apache.spark.sql.SaveMode._
import org.apache.hudi.DataSourceReadOptions._
import org.apache.hudi.DataSourceWriteOptions._
import org.apache.hudi.config.HoodieWriteConfig._
import spark.implicits._
case class Peoplest(id:Int,name:String,ts:Long,dt:String,hh:String)
val rdd = sc.textFile("/tmp/huditestdata2/Huditestdata.txt",2)
val