Spark支持多种格式文件生成DataFrame,只需在读取文件时调用相应方法即可,本文以txt文件为例。
反射机制实现RDD转换DataFrame的过程:1. 定义样例类;2.RDD与样例类关联;3.RDD转换为DataFrame。
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}
case class Person(id:String, name:String, age:Int)
object SparkTextFile {
def main(args: Array[String]): Unit = {
//1.创建SparkSession对象
val spark: SparkSession = SparkSession.builder().appName("StructTypeTest01").master("local[*]").getOrCreate()
//2.创建sparkContext对象
val sc: SparkContext = spark.sparkContext
sc.setLogLevel("warn")
//3.读入文件数据(sparkContext,SparkSession均可读取文件)
//spark.read.text("E:\\person.txt").map(x => x.split(" "))
//sc.textFile("E:\\person.txt").map(x => x.split(" "))
val data: RDD[Array[String]] = sc.textFile(this.getClass.getClassLoader.getResource("person.txt").getPath).map(x => x.split(" "))
//4.将RDD与样例类关联
val personRDD: RDD[Person] = data.map(x => Person(x(0), x(1), x(2).toInt))
//5.RDD转换为DataFrame
import spark.implicits._
val personDF: DataFrame = personRDD.toDF()
//6.展示数据
println("******************************************")
personDF.printSchema()
personDF.show()
println("******************************************")
//7.关闭session
spark.stop()
}
}