20191204Spark

login as: root
     ┌────────────────────────────────────────────────────────────────────┐
     │                        • MobaXterm 11.0 •                          │
     │            (SSH client, X-server and networking tools)             │
     │                                                                    │
     │ ➤ SSH session to root@192.168.89.105                               │
     │   • SSH compression : ✔                                            │
     │   • SSH-browser     : ✔                                            │
     │   • X11-forwarding  :(disabled or not supported by server)     │
     │   • DISPLAY         : 172.16.9.44:0.0                              │
     │                                                                    │
     │ ➤ For more info, ctrl+click on help or visit our website           │
     └────────────────────────────────────────────────────────────────────┘

Last login: Mon Dec  2 16:33:23 2019 from 192.168.89.1
-bash: /root: Is a directory
[root@SparkOnStandalone ~]# jps
12790 Jps
[root@SparkOnStandalone ~]# start-dfs.sh
Starting namenodes on [SparkOnStandalone]
SparkOnStandalone: /root/.bashrc: line 13: /root: Is a directory
SparkOnStandalone: starting namenode, logging to /usr/hadoop-2.9.2/logs/hadoop-r                                                              oot-namenode-SparkOnStandalone.out
SparkOnStandalone: /root/.bashrc: line 13: /root: Is a directory
SparkOnStandalone: starting datanode, logging to /usr/hadoop-2.9.2/logs/hadoop-r                                                              oot-datanode-SparkOnStandalone.out
Starting secondary namenodes [SparkOnStandalone]
SparkOnStandalone: /root/.bashrc: line 13: /root: Is a directory
SparkOnStandalone: starting secondarynamenode, logging to /usr/hadoop-2.9.2/logs                                                              /hadoop-root-secondarynamenode-SparkOnStandalone.out
[root@SparkOnStandalone ~]# jps
13321 DataNode
13195 NameNode
13615 SecondaryNameNode
13807 Jps
package mby00

import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Seconds, StreamingContext}

// countByValueAndWindow**(*windowLength*, *slideInterval*, [*numTasks*])
// 怎样测试
// 直接点击main + nc -lk 8888
// ok
object CountByValueAndWindowTest {


  // main
  def main(args: Array[String]): Unit = {

    val conf = new SparkConf().setMaster("local[*]").setAppName(" countByValueAndWindow test")
    val ssc = new StreamingContext(conf,Seconds(1))
    ssc.sparkContext.setLogLevel("OFF")
    val ds = ssc.socketTextStream("SparkOnStandalone",8888)

    ssc.checkpoint("hdfs://SparkOnStandalone:9000/checkpoint6")
    ds
        .countByValueAndWindow(Seconds(5),Seconds(5))
        .print()


    ssc.start()
    ssc.awaitTermination()
  }

}


// 直接点击main运行后
/**-------------------------------------------
Time: 1575421403000 ms
-------------------------------------------

-------------------------------------------
Time: 1575421408000 ms
-------------------------------------------

-------------------------------------------
Time: 1575421413000 ms
-------------------------------------------*/
package mby00

import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Seconds, StreamingContext}


// reduceByKeyAndWindow**(*func*, *invFunc*, *windowLength*, *slideInterval*, [*numTasks*])
// 更高效率的`reduceByKeyAndWindow`方法
// 原因: 计算时,会使用`上一窗口的计算结果 + 当前窗口的新增元素 - 上一窗口的过期数据
// 怎样测试
// 直接main  nc -lk 8888
// ok

object ReduceByKeyAndWindowTest {
  // main
  def main(args: Array[String]): Unit = {

    val conf = new SparkConf().setMaster("local[*]").setAppName(" reduceByKeyAndWindow test")
    val ssc = new StreamingContext(conf,Seconds(1))
    ssc.sparkContext.setLogLevel("OFF")
    val ds = ssc.socketTextStream("SparkOnStandalone",8888)


    println("111111111111111111111111")
    ssc.checkpoint("hdfs://SparkOnStandalone:9000/checkpoint7")
    ds
        .map(line => (line,1))
        .reduceByKeyAndWindow((v1:Int,v2:Int) => v1 + v2,(v1:Int,v2:Int) => v1 - v2,Seconds(5),Seconds(2))
        .print()




    ssc.start()
    ssc.awaitTermination()

  }

}

// 直接点击main运行后
/**
111111111111111111111111
-------------------------------------------
Time: 1575422934000 ms
-------------------------------------------

-------------------------------------------
Time: 1575422936000 ms
-------------------------------------------

-------------------------------------------
Time: 1575422938000 ms
-------------------------------------------*/


// 最终结果
/**
(mm,0)
(,0)
(mby1,0)
(m,0)
(b,0)
(mby3mby3,0)
(mby2,0)
(mby3,0)
(kkkkkkkkkkkkkkkkkkkkkkkkk,0)
(kkkkkkkkkkkkkkk,0)*/
package mby00.quickstart

import org.apache.spark.sql.{Dataset, SparkSession}

// SparkSQL入门案例
object SparkSqlExample1SQLTest {

    // main
    def main(args: Array[String]): Unit = {


      val sparkSessoin = SparkSession
        .builder()
        .appName("SparkSQLExample1 test")
        .master("local[*]")
        .getOrCreate()

      import sparkSessoin.implicits._

      val rdd = sparkSessoin.sparkContext.makeRDD(List(("Hello",1),("Scala",1),("Hello",1),("Spark",1)))

      val dataset:Dataset[(String,Int)] = rdd.toDS()

      dataset.createOrReplaceTempView("t_word")

      sparkSessoin
          .sql("select * from t_word")
          .show()


      sparkSessoin.close()
    }
}

/**+-----+---+
|   _1| _2|
+-----+---+
|Hello|  1|
|Scala|  1|
|Hello|  1|
|Spark|  1|
+-----+---+*/
package mby00.quickstart

import org.apache.spark.sql.{Dataset, SparkSession}

// SparkSQL入门案例
// 怎样测试
// 直接点击main
// ok
object SparkSqlExample1SQLTest {

    // main
    def main(args: Array[String]): Unit = {


      val sparkSessoin = SparkSession
        .builder()
        .appName("SparkSQLExample1 test")
        .master("local[*]")
        .getOrCreate()

      import sparkSessoin.implicits._

      val rdd = sparkSessoin.sparkContext.makeRDD(List(("Hello",1),("Scala",1),("Hello",1),("Spark",1)))

      val dataset:Dataset[(String,Int)] = rdd.toDS()

      dataset.createOrReplaceTempView("t_word")

      sparkSessoin
          .sql("select * from t_word")
          .show()
      /**+-----+---+
      |   _1| _2|
      +-----+---+
      |Hello|  1|
      |Scala|  1|
      |Hello|  1|
      |Spark|  1|
      +-----+---+*/

      sparkSessoin
        .sql("select _1 as word, sum(_2) as num from t_word group by _1 ")
        .show()
        /**+-----+---+
        | word|num|
        +-----+---+
        |Hello|  2|
        |Scala|  1|
        |Spark|  1|
        +-----+---+*/

      println("333333333333333333333333333333333333333")
      sparkSessoin
          .sql("select _1 as word, sum(_2) as num from t_word where _1 !='Scala' group by _1 order by num asc ")
          .show()
      /**+-----+---+
      | word|num|
      +-----+---+
      |Spark|  1|
        |Hello|  2|
        +-----+---+*/


      sparkSessoin.close()
    }
}
package mby00.quickstart

import org.apache.spark.sql.{Dataset, SparkSession}


// SparkSQL 入门案例
// 怎样测试
// 直接main
// ok

object SparkSqlExample1FunctionTest {

  // main
  def main(args: Array[String]): Unit = {


    val sparkSession = SparkSession
      .builder()
      .appName("SparkSqlExample1FunctionTest")
      .master("local[*]")
      .getOrCreate()

    import sparkSession.implicits._

    val rdd = sparkSession.sparkContext.makeRDD(List(("Hello",1),("Scala",1),("Hello",1),("Spark",1)))

    val dataset:Dataset[(String,Int)] = rdd.toDS()


    dataset
        .where("_1 != 'Scala'")
        .groupBy("_1")
        .sum("_2")
        .withColumnRenamed("_1","word")
        .withColumnRenamed("sum(_2)","sum")
        .show()

   /** +-----+---+
    | word|sum|
    +-----+---+
    |Hello|  2|
      |Spark|  1|
      +-----+---+*/


    sparkSession.close()


  }



}
package mby00.datasource

import org.apache.spark.sql.SparkSession


// 通过scala集合(元组)创建
// 怎样测试
// 直接点击main
// ok
object CreateDataSetWithTupleTest {

  // main
  def main(args: Array[String]): Unit = {

    val sparkSession = SparkSession
      .builder()
      .appName("CreateDataSetWithTupleTest")
      .master("local[*]")
      .getOrCreate()


    import sparkSession.implicits._


    val dateset = List((1,"zs",true,1000),(2,"ls",false,2000)).toDS()

    dateset
      .show()


    /**+---+---+-----+----+
    | _1| _2|   _3|  _4|
    +---+---+-----+----+
    |  1| zs| true|1000|
      |  2| ls|false|2000|
      +---+---+-----+----+*/
    
    
    sparkSession.close()


  }


}
package mby00.datasource

import org.apache.spark.sql.SparkSession

// 通过样例类(case class)创建
// 怎样测试
// 直接main
// ok


object CreateDatasetWithCaseLClassTest {


  // main
  def main(args: Array[String]): Unit = {

    val sparkSession = SparkSession
      .builder()
      .appName("CreateDatasetWithCaseLClassTest")
      .master("local[*]")
      .getOrCreate()

    import sparkSession.implicits._

    val dataset = List(Person("zs",true,"bj"),Person("ls",false,"sh")).toDS()

    dataset
        .show()
   /** +----+-----+-------+
    |name|  sex|address|
    +----+-----+-------+
    |  zs| true|     bj|
      |  ls|false|     sh|
      +----+-----+-------+*/

    sparkSession.close()

  }
}
case  class Person(name:String,sex:Boolean,address:String) // val 常量
package mby00.datasource

import org.apache.spark.sql.SparkSession


// 通过JSON文件创建
// 怎样测试
// 直接按main
// ok
object CreateDatasetWithJSONTest {


  // main
  def main(args: Array[String]): Unit = {

    val sparkSesison = SparkSession
      .builder()
      .appName("CreateDatasetWithJSON")
      .master("local[*]")
      .getOrCreate()


    val dateset = sparkSesison
      .read
      .json("D:\\IntelliJ IDEA 2018.2.5\\IEDAMBY\\sparksql-day1\\src\\main\\resources")
      .as("user")

    dateset
        .show()

   /** +---+----+-----+
    | id|name|  sex|
    +---+----+-----+
    |  1|  zs| true|
      |  2|  ls|false|
      |  3|  ww| true|
      |  4|  zl|false|
      +---+----+-----+*/

    sparkSesison.close()


  }

}

package mby00.datasource

import org.apache.spark.sql.{Dataset, SparkSession}


// 通过RDD 创建
// 怎样测试
// 直接main
// ok
object CreateDatasetWithRDDTest {

  // main
  def main(args: Array[String]): Unit = {

    val sparkSession = SparkSession
      .builder()
      .appName("CreateDatasetWithRDDTest")
      .master("local[*]")
      .getOrCreate()

    import sparkSession.implicits._

    val rdd = sparkSession.sparkContext.makeRDD(List(("Hello",1),("Hello",1),("Spark",1),("Scala",1)))

    val dataset:Dataset[(String,Int)] = rdd.toDS()

    dataset
        .show()

    /**+-----+---+
    |   _1| _2|
    +-----+---+
    |Hello|  1|
      |Hello|  1|
      |Spark|  1|
      |Scala|  1|
      +-----+---+*/
    sparkSession.close()
  }
}
package mby00.datasource

import org.apache.spark.sql.SparkSession

// 通过JSON文件创建
// 怎样测试 
// 直接main
// ok
object CreateDataFrameWithJSONTest {

  // main
  def main(args: Array[String]): Unit = {

    val sparkSession = SparkSession
      .builder()
      .appName("CreateDataFrameWithJSONTest")
      .master("local[*]")
      .getOrCreate()

    val dataFrame = sparkSession.read.json("D:\\IntelliJ IDEA 2018.2.5\\IEDAMBY\\sparksql-day1\\src\\main\\resources")

    dataFrame
        .show()

    dataFrame
        .printSchema()

   /** +---+----+-----+
    | id|name|  sex|
    +---+----+-----+
    |  1|  zs| true|
      |  2|  ls|false|
      |  3|  ww| true|
      |  4|  zl|false|
      +---+----+-----+

    root
    |-- id: long (nullable = true)
    |-- name: string (nullable = true)
    |-- sex: boolean (nullable = true)*/

    sparkSession.close()
  }

}
package mby00.datasource

import org.apache.spark.sql.SparkSession

// 样例类创建
// 怎样运行
// 直接main
// ok
object CreateDataFrameWithCaseClassTest {

  // main
  def main(args: Array[String]): Unit = {

    val sparkSession = SparkSession
      .builder()
      .appName("CreateDataFrameWithCaseClassTest")
      .master("local[*]")
      .getOrCreate()

    import sparkSession.implicits._

    val dateFrame = List(Person("zs",true,"bj"),Person("ls",false,"sh")).toDF()

    dateFrame
        .show()

   /** +----+-----+-------+
    |name|  sex|address|
    +----+-----+-------+
    |  zs| true|     bj|
      |  ls|false|     sh|
      +----+-----+-------+*/

    sparkSession.close()

  }
}

case class Person(name:String,sex:Boolean,address:String)
package mby00.datasource

import org.apache.spark.sql.SparkSession


// CSV文件构建
// 怎样运行
// 直接mian
// ok
object CreateDataFrameWithCSVTest {

  // main
  def main(args: Array[String]): Unit = {

    val sparkSession = SparkSession
      .builder()
      .appName("CreateDateFrameWithCSVTest")
      .master("local[*]")
      .getOrCreate()

    val dataFrame = sparkSession.read.csv("D:\\IntelliJ IDEA 2018.2.5\\IEDAMBY\\sparksql-day1\\src\\main\\resources\\1.csv")

    dataFrame
        .show()

    dataFrame
        .printSchema()

    /**+---+--------+-----+---+
    |_c0|     _c1|  _c2|_c3|
    +---+--------+-----+---+
    |  1|zhangsan| true| 20|
      |  2|    lisi|false| 21|
      |  3|  wangwu| true| 21|
      +---+--------+-----+---+

    root
    |-- _c0: string (nullable = true)
    |-- _c1: string (nullable = true)
    |-- _c2: string (nullable = true)
    |-- _c3: string (nullable = true)*/
    
    sparkSession.close()
  }
}
package mby00.datasource

import org.apache.spark.sql.SparkSession


// Tuple创建
// 怎样测试
// 直接main
// ok
object CreateDataFrameWithTupleTest {

  // main
  def main(args: Array[String]): Unit = {

    val sparkSession = SparkSession
      .builder()
      .appName("")
      .master("local[*]")
      .getOrCreate()

    import sparkSession.implicits._

    val dateFrame1 = List(("zs",1),("ls",2)).toDF()
    val dataFrame2 = List(("zs",1),("ls",2)).toDF("id","name")

    dateFrame1
        .show()
    dateFrame1
        .printSchema()

    dataFrame2
        .show()
    dataFrame2
        .printSchema()

   /** +---+---+
    | _1| _2|
    +---+---+
    | zs|  1|
      | ls|  2|
      +---+---+
    root
    |-- _1: string (nullable = true)
    |-- _2: integer (nullable = false)

    +---+----+
    | id|name|
    +---+----+
    | zs|   1|
      | ls|   2|
      +---+----+
    root
    |-- id: string (nullable = true)
    |-- name: integer (nullable = false)*/

    sparkSession.close()
  }
}
package mby00.datasource

import org.apache.spark.sql.types.{IntegerType, StringType, StructType}
import org.apache.spark.sql.{Row, SparkSession}


// RDD创建
// 怎样测试
// 直接main
// ok
object CreateDataFrameWithRDDTest {

  // main
  def main(args: Array[String]): Unit = {

    val sparkSession = SparkSession
      .builder()
      .appName("")
      .master("local[*]")
      .getOrCreate()

     val rdd = sparkSession.sparkContext.makeRDD(List((1,"zs"),(2,"ls"))).map(t2 => Row(t2._1,t2._2))

    val schema = new StructType()
        .add("id",IntegerType)
        .add("name",StringType)

    val dataFrame = sparkSession.createDataFrame(rdd,schema)

    dataFrame
        .show()
    dataFrame
        .printSchema()

    /**+---+----+
    | id|name|
    +---+----+
    |  1|  zs|
      |  2|  ls|
      +---+----+

    root
    |-- id: integer (nullable = true)
    |-- name: string (nullable = true)*/

    sparkSession.close()
  }
}
package mby00.datasource

import org.apache.spark.sql.SparkSession


// 通过JAVA Bean创建DF
// 怎样测试
// 直接mian
// ok
object CreateDataFrameWithJavaBeanStudentTest {

  // main
  def main(args: Array[String]): Unit = {

    val sparkSession = SparkSession
      .builder()
      .appName("")
      .master("local[*]")
      .getOrCreate()

    val rdd = sparkSession.sparkContext.makeRDD(List(new Student(1,"zs",true),new Student(2,"ls",false)))

    val dataFrame = sparkSession.createDataFrame(rdd,classOf[Student])

   dataFrame
       .show()
    dataFrame
        .printSchema()

   /** +---+----+-----+
    | id|name|  sex|
    +---+----+-----+
    |  1|  zs| true|
      |  2|  ls|false|
      +---+----+-----+

    root
    |-- id: integer (nullable = true)
    |-- name: string (nullable = true)
    |-- sex: boolean (nullable = true) */

    sparkSession.close()

  }
}
package mby00.datasource

import org.apache.spark.sql.SparkSession


// 通过DS转换获取
// 怎样测试
// 直接main
// ok
object CreateDataFrameWithDSTest {

  // main
  def main(args: Array[String]): Unit = {

    val sparkSession = SparkSession
      .builder()
      .appName("")
      .master("local[*]")
      .getOrCreate()

    import sparkSession.implicits._

    val dataset = List((1,"zs"),(2,"ls")).toDS()

    val dataFrame = dataset.toDF("id","name")

    val dataFrame1 = dataset.toDF()

    dataFrame
        .show()
    dataFrame
        .printSchema()

    dataFrame1
        .show()
    dataFrame1
        .printSchema()

   /** +---+----+
    | id|name|
    +---+----+
    |  1|  zs|
      |  2|  ls|
      +---+----+*/

    /**root
    |-- id: integer (nullable = false)
    |-- name: string (nullable = true)

    +---+---+
    | _1| _2|
    +---+---+
    |  1| zs|
      |  2| ls|
      +---+---+

    root
    |-- _1: integer (nullable = false)
    |-- _2: string (nullable = true)*/
    sparkSession.close()
  }
}
package mby00.datasource

import org.apache.spark.sql.SparkSession


// 通过DF转换获取
// 怎样测试
// 点击main
// ok
object CreateDatasetWithDFTest {

  // main
  def main(args: Array[String]): Unit = {

    val sparkSession = SparkSession
      .builder()
      .appName("CreateDatasetWithFDTest")
      .master("local[*]")
      .getOrCreate()

    import sparkSession.implicits._

    val dataFrame = List((1,"zs"),(2,"ls")).toDF()

    val dataset = dataFrame.as("别名")

    dataset
      .show()

    dataset
        .printSchema()

   /** +---+---+
    | _1| _2|
    +---+---+
    |  1| zs|
      |  2| ls|
      +---+---+
    root
    |-- _1: integer (nullable = false)
    |-- _2: string (nullable = true)*/

    sparkSession.close()

  }
}
package mby00.opt

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.expressions.scalalang.typed

/**
  * 强类型的DataFrame操作
  */

//
// 怎样测试
// 直接main
// ok
object StrongTypedDataFrameOpt {

  // main
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder().appName("strong typed opt").master("local[*]").getOrCreate()

    import spark.implicits._

    val dataframe = List(
      (1, "zs", true, "A", 2000),
      (2, "ls", false, "A", 3000),
      (3, "ww", true, "B", 4000),
      (4, "zl", false, "B", 5000),
      (5, "tq", true, "A", 6000)
    ).toDF("id", "name", "sex", "dept", "salary")

    // 强类型操作  统计每一个部门的男性员工工资总和
    dataframe
      .filter(row => row.getBoolean(2) == true) // 通过下标获取第三列数据
      .groupByKey(row => row.getString(3)) // 操作对象
      .agg(typed.sum(row => row.getInt(4)))
      .withColumnRenamed("value", "dept")
      .withColumnRenamed("TypedSumDouble(org.apache.spark.sql.Row)", "total")
      .show()

    /**+----+------+
    |dept| total|
    +----+------+
    |   B|4000.0|
      |   A|8000.0|
      +----+------+*/

    spark.close()
  }
}
package mby00.opt

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.expressions.scalalang.typed

/**
  * 强类型的Dataset操作
  */
//
// 怎样测试
// 直接main
// ok
object StrongTypedDatasetOpt {

  // main
  def main(args: Array[String]): Unit = {

    val spark = SparkSession.builder().appName("strong typed opt").master("local[*]").getOrCreate()

    import spark.implicits._

    val dataset = List(
      (1, "zs", true, "A", 2000),
      (2, "ls", false, "A", 3000),
      (3, "ww", true, "B", 4000),
      (4, "zl", false, "B", 5000),
      (5, "tq", true, "A", 6000)
    ).toDS()

    // 强类型操作  统计每一个部门的男性员工工资总和
    dataset
      .filter(t5 => t5._3 == true)
      .groupByKey(t5 => t5._4) // 操作对象
      .agg(typed.sum(t5 => t5._5))
      .withColumnRenamed("value","dept")
      .withColumnRenamed("TypedSumDouble(scala.Tuple5)","total")
      .show()

    /**+----+------+
    |dept| total|
    +----+------+
    |   B|4000.0|
      |   A|8000.0|
      +----+------+*/

    spark.close()
  }
}
package mby00.opt

import org.apache.spark.sql.SparkSession


// 打印df的结构
// 怎样测试
// 直接点击main
// ok
object UnTypeDateFrameOptPrintSchemaTest {

  // main
  def main(args: Array[String]): Unit = {

    val sparkSession = SparkSession
      .builder()
      .appName("")
      .master("local[*]")
      .getOrCreate()

    import sparkSession.implicits._

    val rdd = sparkSession.sparkContext.makeRDD(List((1,"zs",1000.0,true),(2,"ls",2000.0,false),(3,"ww",3000.0,true)))

      val dataFrame = rdd.toDF("id","name","salary","sex")

    dataFrame
        .printSchema()

    /**root
    |-- id: integer (nullable = false)
    |-- name: string (nullable = true)
    |-- salary: double (nullable = false)
    |-- sex: boolean (nullable = false)*/

    sparkSession.close()

  }
}
package mby00.opt

import org.apache.spark.sql.SparkSession

// 默认输出df中前20行数据
// 怎样测试
// 直接main
// ok
object UnTypeDataFrameOptShowTest {
  // main
  def main(args: Array[String]): Unit = {

    val sparkSession = SparkSession
      .builder()
      .appName("")
      .master("local[*]")
      .getOrCreate()

    import sparkSession.implicits._

    val rdd = sparkSession.sparkContext.makeRDD(List((1,"zs"),(2,"ls"),(3,"ww")))

    val dataFrame = rdd.toDF("id","name")

    dataFrame
        .show() // 默认展示前20行
    /**+---+----+
    | id|name|
    +---+----+
    |  1|  zs|
      |  2|  ls|
      |  3|  ww|
      +---+----+*/

    dataFrame
        .show(1) // 展示一行
    /**+---+----+
    | id|name|
    +---+----+
    |  1|  zs|
      +---+----+*/
    
    sparkSession.close()

  }
}
package mby00.opt

import org.apache.spark.sql.SparkSession

// 查询指定的字段
// 怎样测试
// 直接main
// ok
object UnTypeDataFrameOptSelectTest {

  // main
  def main(args: Array[String]): Unit = {

    val sparkSession = SparkSession
      .builder()
      .appName("")
      .master("local[*]")
      .getOrCreate()

    import sparkSession.implicits._

    val dataFrame = List(
      (1,"zs",true,"A",1000.0),
      (2,"ls",false,"A",2000.0),
      (3,"ww",true,"B",3000.0),
      (4,"zl",false,"A",4000.0),
      (5,"tq",true,"B",5000.0)
    ).toDF("id","name","sex","dept","salary")

    dataFrame
        .select() // 不指定id无数据
        .show()
    /**++
    ||
    ++
    ||
    ||
    ||
    ||
    ||
    ++*/

    dataFrame
        .select("id","dept","salary") // 查询指定的字段
        .show()
   /**+---+----+------+
    | id|dept|salary|
    +---+----+------+
    |  1|   A|1000.0|
      |  2|   A|2000.0|
      |  3|   B|3000.0|
      |  4|   A|4000.0|
      |  5|   B|5000.0|
      +---+----+------+*/

    // 另外的一种写法 --- 称之为隐式转换
    dataFrame
        .select($"id",$"name")  // $方法名 可以是字符串的列明 包装成Column对象
        .show()
    /**+---+----+
    | id|name|
    +---+----+
    |  1|  zs|
      |  2|  ls|
      |  3|  ww|
      |  4|  zl|
      |  5|  tq|
      +---+----+*/

    sparkSession.close()
  }
}
package mby00.opt

import org.apache.spark.sql.SparkSession


// 添加或者替换【列名相同】字段
// 怎样测死
// 直接main
// ok
object UnTypeDataFrameOptWithColumnTest {

  // main
  def main(args: Array[String]): Unit = {

    val sparkSession = SparkSession
      .builder()
      .appName("")
      .master("local[*]")
      .getOrCreate()

    import sparkSession.implicits._

    val dataFrame = List(
      (1,"zs",true,"A",1000.0),
      (2,"ls",false,"A",2000.0),
      (3,"ww",true,"B",3000.0),
      (4,"zl",false,"A",4000.0),
      (5,"tq",true,"B",5000.0)
    ).toDF("id","name","sex","dept","salary")

    dataFrame
        .select($"id",$"name",$"salary")
        .withColumn("yearSalary",$"salary"*12)  // 添加列
        .show()
    /**+---+----+------+----------+
    | id|name|salary|yearSalary|
    +---+----+------+----------+
    |  1|  zs|1000.0|   12000.0|
      |  2|  ls|2000.0|   24000.0|
      |  3|  ww|3000.0|   36000.0|
      |  4|  zl|4000.0|   48000.0|
      |  5|  tq|5000.0|   60000.0|
      +---+----+------+----------+*/

    dataFrame
        .select($"id",$"name",$"salary")
        .withColumn("salary",$"salary"*12) // 替换已经存在的列
        .show()
    /**+---+----+-------+
    | id|name| salary|
    +---+----+-------+
    |  1|  zs|12000.0|
      |  2|  ls|24000.0|
      |  3|  ww|36000.0|
      |  4|  zl|48000.0|
      |  5|  tq|60000.0|
      +---+----+-------+*/

    sparkSession.close()

  }
}
package mby00.opt

import org.apache.spark.sql.SparkSession

// 改列的名字
// 怎样测试赛
// 直接main
// ok
object UnTypeDataFrameOptWithColumeRenamedTest {

  // main
  def main(args: Array[String]): Unit = {

    val sparkSession = SparkSession
      .builder()
      .appName("")
      .master("local[*]")
      .getOrCreate()

    import sparkSession.implicits._

    val dateFrame = List(
      (1,"zs",true,"A",1000.0),
      (2,"ls",false,"A",2000.0),
      (3,"ww",true,"B",3000.0),
      (4,"zl",false,"A",4000.0),
      (5,"tq",true,"B",5000.0)
    ).toDF("id","name","sex","dept","salary")

    dateFrame
        .select($"id",$"name",$"sex",$"dept",$"salary")
        .withColumnRenamed("id","uid") // 就是改列名
        .withColumnRenamed("name","uname")
        .withColumnRenamed("sex","usex")
        .withColumnRenamed("dept","udept")
        .withColumnRenamed("salary","usalary")
        .show()

    /**(+---+-----+-----+-----+-------+
    |uid|uname| usex|udept|usalary|
    +---+-----+-----+-----+-------+
    |  1|   zs| true|    A| 1000.0|
      |  2|   ls|false|    A| 2000.0|
      |  3|   ww| true|    B| 3000.0|
      |  4|   zl|false|    A| 4000.0|
      |  5|   tq| true|    B| 5000.0|
      +---+-----+-----+-----+-------+*/

    sparkSession.close()

  }
}
package mby00.opt

import org.apache.spark.sql.SparkSession

// 删除
// 怎样测试
// 直接main
// ok
object UnTypeDataFrameOptDropTest {

  // main
  def main(args: Array[String]): Unit = {

    val sparkSession = SparkSession
      .builder()
      .appName("")
      .master("local[*]")
      .getOrCreate()

    import sparkSession.implicits._

    val dataFrame = List(
      (1,"zs",true,"A",1000.0),
      (2,"ls",false,"A",2000.0),
      (3,"ww",true,"B",3000.0),
      (4,"zl",false,"A",4000.0),
      (5,"tq",true,"B",5000.0)
    ).toDF("id","name","sex","dept","salary")

    dataFrame
        .select($"id",$"name",$"dept",$"sex",$"salary") // 隐式转换
        .drop("salary") // 就是删除
        .drop($"sex") // 可以连续的删除
        .show()
    /**+---+----+----+
    | id|name|dept|
    +---+----+----+
    |  1|  zs|   A|
      |  2|  ls|   A|
      |  3|  ww|   B|
      |  4|  zl|   A|
      |  5|  tq|   B|
      +---+----+----+*/

    sparkSession.close()
  }

}
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值