spark-sql初体验

package com.xmgps.sparksql

import java.util.Date
import java.util.concurrent.TimeUnit

import com.xmgps.util.XDate
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.log4j.Logger
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.{StringType, StructField, StructType}
import org.apache.spark.sql.{Row, SQLContext}
import org.apache.spark.{SparkConf, SparkContext}

object SparkSql {

  val log = Logger.getLogger(SparkSql.getClass)

  /**
    * spark sql 查询HDFS文件
    *
    * @param lat      维度
    * @param lng      经度
    * @param ds       半径距离m
    * @param st       开始时间
    * @param et       结束时间
    *                 开始结束时间一般在一个小时内
    * @param dataType 数据类型
    * @return Array
   
   
    
    
    */
  def SparkSqlScan(lat: Int, lng: Int, ds: Double, st: Long, et: Long, dataType: String): Array[String] = {

    // The results of SQL queries are DataFrames and support all the normal RDD operations.
    // 屏蔽不必要的日志显示在终端上,本机测试
//    Logger.getLogger("org.apache.spark").setLevel(Level.WARN)

    val conf = new SparkConf().setAppName("spark sql").setMaster("spark://rm:7077")
    //  val sc = new SparkContext(conf)
    val sc = SparkContext.getOrCreate(conf)

    val sqlContext = new SQLContext(sc)

    sc.addJar("hdfs://mycluster/user/ljk/spark-sql-1.0.0.jar")
    // Create an RDD

    val path = this.getFilePath(st, et, dataType)
    val freight = this.getRdd(path, sc)

    if (null == freight) return new Array[String](0)

    // The schema is encoded in a string
    //闽DTD877,1,1761926679093,1475413905093,4096,0,118127408,24477285,0,0,0,0,,,,,,,,,0,,,,,,,,,,,,,,,,,,,,,,
    val schemaString = "carNo,carColor,gpsTime,sysTime,vehicleStatus,alramStatus,longitude,latidude,height,speed,direction,totalKMS,attacheds,enterPriseId,plateColor,isResend,lineId,updownFlag,preStationOrder,preStationDist,vec2,gnssCenterId,distance"
    // Generate the schema based on the string of schema
    val schema =
    StructType(
      schemaString.split(",").map(fieldName => StructField(fieldName, StringType, true)))

    // Convert records of the RDD (people) to Rows.
    val rowRDD = freight.map(f => f + "end").map(_.split(",")).map(f => Row(f(0), f(1), f(2), f(3), f(4), f(5), f(6), f(7), f(8), f(9), f(10), f(11), f(12), f(13), f(14), f(15), f(16), f(17), f(18), f(19), f(20), f(21), distance(f(7).toInt, f(6).toInt, lat, lng)))

    // Apply the schema to the RDD.
    val peopleDataFrame = sqlContext.createDataFrame(rowRDD, schema)

    //用编程方法比较好构造过滤方法
    val results = peopleDataFrame.filter("distance < " + ds).filter("gpsTime >= " + st).filter("gpsTime <= " + et).select("carNo", "carColor").distinct().toJSON.collect()

    //< 以下是sql部分>
    //    // Register the DataFrames as a table.
    //    peopleDataFrame.registerTempTable("FREIGHT")
    //
    //    // SQL statements can be run by using the sql methods provided by sqlContext.
    //    val results = sqlContext.sql("SELECT carNo,carColor FROM FREIGHT WHERE distance < " + ds + " group by carNo,carColor")
    //< 以上是sql部分>
    // The columns of a row in the result can be accessed by field index or by field name.
    sc.stop()
    results
  }

  /**
    * 求两个经纬度的直线距离
    *
    * @param lat1
    * @param lng1
    * @param lat2
    * @param lng2
    * @return
    */
  private def distance(lat1: Int, lng1: Int, lat2: Int, lng2: Int): String = {


    val EARTH_RADIUS = 637.137
    val Math_PI = 3.1415926

    val radLat1 = 1.0 * lat1 / 1000000 * Math_PI / 180.0
    val radLat2 = 1.0 * lat2 / 1000000 * Math_PI / 180.0
    val a = radLat1 - radLat2

    val b = (1.0 * lng1 / 1000000 * Math_PI / 180.0) - (1.0 * lng2 / 1000000 * Math_PI / 180.0)

    val s = EARTH_RADIUS * (2 * Math.asin(Math.sqrt(Math.pow(Math.sin(a / 2), 2) +
      Math.cos(radLat1) * Math.cos(radLat2) * Math.pow(Math.sin(b / 2), 2))))
    s.toString

  }

  /**
    * 根据时间查找文件路径
    *
    * @param st 开始时间
    * @param et 结束时间
    * @return
    */
  private def getFilePath(st: Long, et: Long, dataType: String): String = {

    val fileIndex = "hdfs://mycluster/LINYUN/" + dataType + "DATA/"
    val startDate = XDate.formateDay(new Date(st))
    val endDate = XDate.formateDay(new Date(et))

    val startArray = startDate.split("-")
    val endArray = endDate.split("-")
    val startYear = startArray(0)
    val startMonth = startArray(1)
    val endYear = endArray(0)
    val endMonth = endArray(1)
    if (!startDate.equals(endDate)) {
      val startDay = startArray(2)
      val endDay = endArray(2)
      fileIndex + startYear + "/" + startMonth + "/" + startDay + "/" + dataType + startDate.replaceAll("-", "") + "," + fileIndex + endYear + "/" + endMonth + "/" + endDay + "/" + dataType + endDate.replaceAll("-", "")
    } else {
      fileIndex + startYear + "/" + startMonth + "/" + startArray(2) + "/" + dataType + startDate.replaceAll("-", "")
    }
  }

  private def getRdd(path: String, sc: SparkContext): RDD[String] = {

    val fs = FileSystem.newInstance(new Configuration())
    val pathArr = path.split(",")

      if (pathArr.length == 2) {

        log.info("加载数据源:" + pathArr(0))
        log.info("加载数据源:" + pathArr(1))
        if (!fs.exists( new Path(pathArr(0))) || !fs.exists(new Path(pathArr(1)))) {
          log.error(path + " is not exits!")
          return null}
        sc.textFile(pathArr(0)) ++ sc.textFile(pathArr(1))
      } else {

        log.info("加载数据源:" + path)

        if (!fs.exists( new Path(path)))
        {
          log.error(path + " is not exits!")
          return null}
        sc.textFile(path)
      }
  }

  def main(args: Array[String]): Unit = {

//    SparkSqlScan(24535500, 118133331, 800, 1455113471000L, 1455114611000L, "FREIGHT").foreach(println)
//    SparkSqlScan(24535500, 118133331, 800, 1455113471000L, 1455114611000L, "FREIGHT").foreach(println)
    this.SparkSqlScan(24481345, 118138825, 800, 1475251278292L, 1475256679093L, "FREIGHT").foreach(println)
//    while(true){
//
//     TimeUnit.SECONDS.sleep(5)
//    }
    //    this.getFilePath(1475251278292L,1475252278292L,"FREIGHT")
  }
}

   
   
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值