1)数据准备 一个多层嵌套json,数据文本名字为data.txt
{"error":200,"msg":null,"lsk":{"tags":{"150":12,"151":0,"152":0,"153":0,"154":0,"110":0,"111":0,"112":1,"113":0,"114":0,"115":0,"116":0,"117":0,"118":0,"119":3,"161":25,"162":1,"120":0,"121":5,"122":0,"123":0,"124":0,"125":2,"5":"android","126":0,"127":4,"128":0,"129":0,"130":0,"131":0,"132":0,"133":0,"134":0,"135":0,"136":0,"137":0,"138":0,"139":0,"140":3,"141":0,"142":5,"143":3,"144":0,"101":0,"145":6,"102":1,"146":0,"103":1,"147":0,"148":0,"104":0,"149":0,"105":3,"106":5,"107":0,"108":2,"109":2,"credit_score":457},"biz_id":"2589e393-004d-4b37-b0ab-23455c4bbde2","req_id":"15422508480665693989"},"request_id":"15422508480517778031"}
2)spark读取文本并进行解析代码
object JsonPar {
def main(args: Array[String]): Unit = {
/*将json串解析为df的方法实例*/
val spark = SparkSession.builder()
.master("local[2]")
.appName("JsonPar")
.getOrCreate()
import spark.implicits._
val result= spark.sparkContext.textFile("file:///D:\\ideaProjects\\rddtest\\datas\\data.txt")
//文件为本地路径的名字,因为连接了大数据集群,因此写上本地绝对路径
.map(x => {
JSON.parseObject(x.toString)
})
.map(x => {
val ds = x.getJSONObject("lsk")
val cc = ds.getString("tags")
cc
})
.map(
y => {
JSON.parseObject(y)
}
).map(z => {
val dr1 = z.getString("150")
val dr2 = z.getString("161")
(dr1.toInt, dr2)
}).toDF("p_150", "p_161").createTempView("s")
}
}