sparksql

pom


<dependency>

<groupId>org.apache.spark</groupId>

<artifactId>spark-sql_2.11</artifactId>

<version>2.3.0</version>

<scope>provided</scope>

</dependency>


初始化 


    val spark = SparkSession

                  .builder()

                  .appName("topn")

                  .master("local[2]")

                  .getOrCreate()     


rdd转DataFrame 


    val df = rddToDataFrame(spark)

    df.printSchema()

    df.select("_1", "_2").show() 

df.take(3).foreach(println)



  def rddToDataFrame(spark:SparkSession):DataFrame = {    

    // 隐式转换

      import spark.implicits._    

    //读取数据

    val fileRDD = spark.sparkContext.textFile("D:\\file\\wc.txt")    

    val wordCountsRDD = fileRDD.flatMap(_.split("\\s+"))

                            .map((_,1))

                            .reduceByKey(_+_)

                            .map(x =>(x._2,x._1))

                            .sortByKey(false)

                            .map(x =>(x._2,x._1))

                            

      //rdd转DataFrame              

      val df = wordCountsRDD.toDF()   

      df

  }

##

root

 |-- _1: string (nullable = true)

 |-- _2: integer (nullable = false)



+---------+---+

|       _1| _2|

+---------+---+

|    spark| 14|

|    storm|  6|

|    scala|  4|

|       es|  3|

|    mysql|  3|

|     solr|  2|

|     hdfs|  2|

|    hbase|  2|

|     hive|  1|

|     mlib|  1|

|    oozie|  1|

|   python|  1|

|mapreduce|  1|

|    flink|  1|

|    linux|  1|

|     java|  1|

|  azkaban|  1|

|    kafka|  1|

|   hadoop|  1|

|    flume|  1|

+---------+---+



[spark,14]

[storm,6]

[scala,4]

 

rdd 转DataSet

      val ds =rddToDataSet(spark)

      ds.createOrReplaceTempView("wordcount")//创建一个本地临时视图

      spark.sql("select word,count(1) as count from wordcount group by word order by count desc limit 3").show()



  case class wordcount(word:String,count:Int)

  def rddToDataSet(spark:SparkSession):Dataset[wordcount] ={

    // For implicit conversions like converting RDDs to DataFrames

      import spark.implicits._

    //读取数据

    val ds = spark

              .sparkContext

              .textFile("D:\\file\\wc.txt")

              .flatMap(_.split("\\s+"))

              .map((_,1))

              .map(x =>(wordcount(x._1,x._2))).toDS()

    ds

  }



##

+-----+-----+

| word|count|

+-----+-----+

|spark|   14|

|storm|    6|

|scala|    4|

+-----+-----+


DataSet转DataFrame

val df = dataSetToDataFrame(spark)
      df.select("word", "count").groupBy("word").count().show()
      val df2= df.select("word", "count").groupBy("word").count() 
      df2.orderBy(df2("count").desc).show
      val df3 = df2.orderBy(df2("count").desc) 
      df3.take(3).foreach(println)

  def dataSetToDataFrame(spark:SparkSession):DataFrame = {
    // For implicit conversions like converting RDDs to DataFrames
      import spark.implicits._
    //读取数据
    val ds = spark
              .read
              .textFile("D:\\file\\wc.txt")
              .flatMap(_.split("\\s+"))
              .map((_,1))
   
    val df=ds.toDF("word","count")
    df
  }

##
+---------+-----+
|     word|count|
+---------+-----+
|    hbase|    2|
|    kafka|    1|
|     mlib|    1|
|    mysql|    3|
|       es|    3|
|    scala|    4|
|    spark|   14|
|    storm|    6|
|    flume|    1|
|    linux|    1|
|mapreduce|    1|
|     solr|    2|
|    flink|    1|
|    oozie|    1|
|     hdfs|    2|
|     java|    1|
|   hadoop|    1|
|  azkaban|    1|
|   python|    1|
|     hive|    1|
+---------+-----+


+---------+-----+
|     word|count|
+---------+-----+
|    spark|   14|
|    storm|    6|
|    scala|    4|
|    mysql|    3|
|       es|    3|
|    hbase|    2|
|     solr|    2|
|     hdfs|    2|
|    kafka|    1|
|    flume|    1|
|     mlib|    1|
|   hadoop|    1|
|    linux|    1|
|    flink|    1|
|     java|    1|
|  azkaban|    1|
|     hive|    1|
|mapreduce|    1|
|    oozie|    1|
|   python|    1|
+---------+-----+


[spark,14]
[storm,6]
[scala,4]


DataSet转RDD 

        val fileRDD = dataSetToRDD(spark)
        val wordCounts = fileRDD.flatMap(_.split("\\s+"))
                            .map((_,1))
                            .reduceByKey(_+_)
                            .map(x =>(x._2,x._1))
                            .sortByKey(false)
                            .map(x =>(x._2,x._1))
        wordCounts.collect().foreach(println)
         wordCounts.take(3).foreach(println)    

  def dataSetToRDD(spark:SparkSession)={
    val rdd = spark.read
                  .textFile("D:\\file\\wc.txt")
                  .rdd
    rdd
  }
##
(spark,14)
(storm,6)
(scala,4)
(es,3)
(mysql,3)
(solr,2)
(hdfs,2)
(hbase,2)
(hive,1)
(mlib,1)
(hadoop,1)
(oozie,1)
(flume,1)
(python,1)
(mapreduce,1)
(flink,1)
(linux,1)
(java,1)
(azkaban,1)
(kafka,1)

(spark,14)
(storm,6)
(scala,4)


DataFrame转RDD 


 val jsonRDD = dataFrameToRDD(spark)

 jsonRDD.foreach(println)      



  def dataFrameToRDD(spark:SparkSession)={

    import spark.implicits._

    val people = spark.read

                  .json("D:\\file\\people.json")   

    people.printSchema()

    people.createOrReplaceTempView("people")   

    val teenagers =spark.sql("select name from people where age >=13 and age<=19")

    teenagers.rdd

  }





{"name":"Michael", "age":28}

{"name":"Andy", "age":30}

{"name":"Justin", "age":19}



root

 |-- age: long (nullable = true)

 |-- name: string (nullable = true)



[Justin]

 

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值