Spark SQL可以与多种数据源交互,如普通文本、json、parquet、csv、MySQL等
1.写入不同数据源
2.读取不同数据源
写数据:
package cn.itcast.sql
import java.util.Properties
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}
object WriterDataSourceDemo {
case class Person(id:Int,name:String,age:Int)
def main(args: Array[String]): Unit = {
//1.创建SparkSession
val spark: SparkSession = SparkSession.builder().master(“local[*]”).appName(“SparkSQL”)
.getOrCreate()
val sc: SparkContext = spark.sparkContext
sc.setLogLevel(“WARN”)
//2.读取文件
val fileRDD: RDD[String] = sc.textFile(“D:\data\person.txt”)
val linesRDD: RDD[Array[String]] = fileRDD.map(.split(" "))
val rowRDD: RDD[Person] = linesRDD.map(line =>Person(line(0).toInt,line(1),line(2).toInt))
//3.将R