import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}
import org.apache.spark.sql.types.{IntegerType, StringType, StructType}
object RddToDataFrame {
case class People(name:String,age:Int)
def main(args: Array[String]): Unit = {
val sparkConf = new SparkConf().setAppName("app").setMaster("local[*]")
// 创建Spark SQL 客户端
val spark = SparkSession.builder().config(sparkConf).getOrCreate()
val sparkContext = spark.sparkContext
val rdd: RDD[String] = sparkContext.textFile("file:///D:/data/people.txt")
//缓存rdd 后续调用
rdd.cache()
val splitRdd = rdd.map(
x =>{
val lines = x.split(" ")
Row(lines(0),lines(1).toInt)
}
)
//rdd 转 dataframe 方式一 通过自定义schema
//创建 schema
val schema = new StructType().add("name",StringType,nullable = true).add("age",IntegerType,nullable = true)
val dataFrame = spark.createDataFrame(splitRdd,schema)
//rdd 转 dataframe 方式二 通过自定义样例类 case class
import spark.implicits._ //在dataframe 和 dataSet操作中导入 避免toDF toDS报错
val dataframe2 = rdd.map(
x =>{
val lines = x.split(" ")
People(lines(0),lines(1).trim().toInt)
}
).toDF
//dataframe 转 rdd
val rdd2 = dataframe2.rdd
//rdd 转 dataset
val dataset = rdd.map(
x =>{
val lines = x.split(" ")
People(lines(0),lines(1).trim().toInt)
}
).toDS()
//dataset 转 rdd
val rdd3 = dataset.rdd
//dataframe 转 dataset
val dataset2 = dataframe2.as[People]
//dataset 转 dataframe
val dataFrame3: DataFrame = dataset2.toDF()
sparkContext.stop()
spark.stop()
}
}
<!--附 Spark的依赖引入 -->
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-core_2.11</artifactId>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-sql_2.11</artifactId>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-hive_2.11</artifactId>
</dependency>