spark DSL
DSL的数据来源
package com.sql
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}
object Demo4DataSource {
def main(args: Array[String]): Unit = {
val spark: SparkSession = SparkSession
.builder
.master("local")
.appName("sql")
.config("spark.sql.shuffle.partitions", 1)
.getOrCreate()
import org.apache.spark.sql.functions._
import spark.implicits._
/**
* csv:格式
* 选择指定的表机构,分隔符(csv默认是 ,) 文件路径
*
*/
val csvDF: DataFrame = spark
.read
.format("csv")
.option("sep", ",")
.schema("id STRING,name STRING,age INT,gender STRING,clazz STRING")
.load("data/students.txt")
//将数据保存为csv格式
csvDF
.groupBy($"clazz")
.agg(count($"clazz") as "count_stu")
.write
.format("csv")
.option("sep", ",")
.mode(SaveMode.Overwrite)
//.save("data/sco_count")
/**
* json:格式
* spark:会自动将json中的字段名和字段类型解析出来
*
* json格式比csv格式占用的空间更大,再发数据的场景下不适用json
*/
val josnDF: DataFrame = spark
.read
.format("json")
.load("data/students.json")
//将数据保存为json格式
josnDF
.groupBy($"clazz")
.agg(count($"gender") as "gender_num")
.write
.format("json")
.mode(SaveMode.Overwrite)
//.save("data/gender_num")
/**
* parquet:带表的结构的压缩格式
* 压缩:时间换空间
* 压缩比取决于 信息熵
*
*/
//将数据保存为parquet格式
josnDF
.write
.format("parquet")
.mode(SaveMode.Overwrite)
.save("data/parquet_sj")
//读取 parquet 的数据
val parquetDF: DataFrame = spark
.read
.format("parquet")
.load("data/parquet_sj")
//parquetDF.show(22)
/**
* 读取JDBC中的数据
*
*/
val jdbcDF: DataFrame = spark
.read
.format("JDBC")
.option("url", "jdbc:mysql://master:3306")
.option("dbtable", "bigdata17.dept")
.option("user", "root")
.option("password", "123456")
.load()
jdbcDF.printSchema()
jdbcDF.show()
}
}
spark DSL RDD转化为DF
package com.sql
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}
object Demo5RDDToDF {
def main(args: Array[String]): Unit = {
val spark: SparkSession = SparkSession
.builder
.master("local")
.config("spark.sql.shuffle.partitions", 1)
.appName("rdd")
.getOrCreate()
import spark.implicits._
/**
* 获取sparkContext.使用RDD, api
*
*/
val sc: SparkContext = spark.sparkContext
val linesRDD: RDD[String] = sc.textFile("data/students.txt")
val studentsRDD: RDD[(String, String, String, String, String)] = linesRDD
.map(_.split(","))
.map {
case Array(id: String, name: String, age: String, gender: String, clazz: String) =>
(id, name, age, gender, clazz)
}
/**
* 将rdd保存为DF
*/
val studentDF: DataFrame = studentsRDD.toDF("id", "name", "age", "gender", "clazz")
studentDF.printSchema()
studentDF.show()
}
}
spark DSL DF转化为RDD
package com.sql
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
object Demo6DFToRDD {
def main(args: Array[String]): Unit = {
val spark: SparkSession= SparkSession
.builder
.appName("df")
.master("local")
.getOrCreate()
import spark.implicits._
val studentDF: DataFrame = spark
.read
.format("json")
.load("data/students.json")
// studentDF.printSchema()
// studentDF.show()
//将DF转换成RDD
val studentsRDD: RDD[Row] = studentDF.rdd
val stuRDD: RDD[(String, String, Long, String, String)] = studentsRDD.map((row: Row) => {
//通过列明去取数据
val id: String = row.getAs[String]("id")
val name: String = row.getAs[String]("name")
val age: Long = row.getAs[Long]("age")
val gender: String = row.getAs[String]("gender")
val clazz: String = row.getAs[String]("clazz")
(id, name, age, gender, clazz)
})
stuRDD.foreach(println)
val caseRDD: RDD[(String, String, Long, String, String)] = stuRDD.map {
//需要注意字段顺序
case Row(age: Long, clazz: String, gender: String, id: String, name: String) =>
(id, name, age, gender, clazz)
}
caseRDD.foreach(println)
}
}
spark sql 的窗口函数
package com.sql
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
object Demo6DFToRDD {
def main(args: Array[String]): Unit = {
val spark: SparkSession= SparkSession
.builder
.appName("df")
.master("local")
.getOrCreate()
import spark.implicits._
val studentDF: DataFrame = spark
.read
.format("json")
.load("data/students.json")
// studentDF.printSchema()
// studentDF.show()
//将DF转换成RDD
val studentsRDD: RDD[Row] = studentDF.rdd
val stuRDD: RDD[(String, String, Long, String, String)] = studentsRDD.map((row: Row) => {
//通过列明去取数据
val id: String = row.getAs[String]("id")
val name: String = row.getAs[String]("name")
val age: Long = row.getAs[Long]("age")
val gender: String = row.getAs[String]("gender")
val clazz: String = row.getAs[String]("clazz")
(id, name, age, gender, clazz)
})
stuRDD.foreach(println)
val caseRDD: RDD[(String, String, Long, String, String)] = stuRDD.map {
//需要注意字段顺序
case Row(age: Long, clazz: String, gender: String, id: String, name: String) =>
(id, name, age, gender, clazz)
}
caseRDD.foreach(println)
}
}