{"name": "libai", "age": 30, "fv": 99.99} {"name": "xiaoqiao", "age": 28, "fv": 9.99} {"name": "yasuo", "age": 18, "fv": 80.99, "gender": "male"} {"name": "banzang", "age": 18, "fv": 9999.99} {"name": "saisi", "fv": 9999.98, "gender": "female"} {"name": "xiaohong" {"name": "xiaoming", "nation": "China"}
-
读json文件
import org.apache.spark.sql.{DataFrame, SparkSession}
object CreateDataFrameFromJSON {
def main(args: Array[String]): Unit = {
//创建SparkSession(是对SparkContext的包装和增强)
val spark: SparkSession = SparkSession.builder()
.appName(this.getClass.getSimpleName)
.master("local[*]")
.getOrCreate()
//读取RDD数据
//JSON文件中有对应的schema信息(字段的名字,字段的类型)
val df: DataFrame = spark.read.json("src/main/scala/data/user.json")
import spark.implicits._
val df2 = df.select("*")
df2.printSchema()
df2.show()
}
注意:如果JSON文件中有错误数据 会出现在_corrupt_record中
val df2 = df.select("name", "fv", "age").where($"_corrupt_record".isNull)
-
写JSON文件格式
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import org.apache.spark.sql.types.{DoubleType, IntegerType, StringType, StructField, StructType}
object WriteToJSON {
def main(args: Array[String]): Unit = {
//创建SparkSession
val spark: SparkSession = SparkSession.builder()
.appName(this.getClass.getSimpleName)
.master("local[*]")
.getOrCreate()
val sc: SparkContext = spark.sparkContext
val lines: RDD[String] = sc.textFile("src/main/scala/data/user.txt")
//row的字段没有名字 没有类型
val rdd1: RDD[Row] = lines.map(e => {
val split = e.split(",")
Row(split(0), split(1).toInt, split(2).toDouble)
})
//关联schema(字段名称、字段类型、是否可以为空)
val schema: StructType = StructType(
Array(
StructField("name", StringType),
StructField("age", IntegerType),
StructField("fv", DoubleType)
)
)
//将RowRDD与StructType中的schema关联
val df1: DataFrame = spark.createDataFrame(rdd1, schema)
df1.write.json("src/main/scala/outjson")
sc.stop()
}