spark-streaming_实时写入hbase

当处理实时数据是聚合类的运算是,可以写入到mysql中,因为数据量不大,但如果是非聚合类的数据,mysql中存放不下,此时可以使用支持覆盖写入或事务的大型数据库,例如:hbase,ES,clickhouse

hbase在写入数据时如果行键相同的数据写进来就会覆盖原始数据,所以当我们在运算时将每条数据赋予唯一的行键(例如:订单号,或者设备号加时间戳),即使一批数据写入到一半时中断了,重新写入时会覆盖之前的值,保证了数据的幂等性

在写入数据时,记偏移量,可以将偏移量和数据记在同一行不同的列族中(hbase支持行级事务),而且只需要记该批次的最后一条数据的偏移量即可,但这样我们查询偏移量就需要使用到协处理器或Phoenix求当前topic分区对应的索引的最大值

package cn._51doit.sparkstreaming.day02

import java.util

import com.alibaba.fastjson.{JSON, JSONException}
import org.apache.hadoop.hbase.TableName
import org.apache.hadoop.hbase.client.{Connection, Put}
import org.apache.hadoop.hbase.util.Bytes
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010._
import org.apache.spark.streaming.{Milliseconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext, TaskContext}

/**
  * https://www.jianshu.com/p/f1340eaa3e06
  *
  * spark.task.maxFailures
  * yarn.resourcemanager.am.max-attempts
  * spark.speculation
  *
  * create view "t_orders" (pk VARCHAR PRIMARY KEY, "offset"."appid_groupid" VARCHAR, "offset"."topic_partition" VARCHAR, "offset"."offset" UNSIGNED_LONG);
  * select max("offset") from "t_orders" where "appid_groupid" = 'g1' group by "topic_partition";
  *
  */
object KafkaToHbase {

  def main(args: Array[String]): Unit = {


    //true a1 g1 ta,tb
    val Array(isLocal, appName, groupId, allTopics) = args

    val conf = new SparkConf()
      .setAppName(appName)

    if (isLocal.toBoolean) {
      conf.setMaster("local[*]")
    }

    val sc = new SparkContext(conf)
    sc.setLogLevel("WARN")

    val ssc: StreamingContext = new StreamingContext(sc, Milliseconds(5000))

    val topics = allTopics.split(",")

    //SparkSteaming跟kafka整合的参数
    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> "node-1.51doit.com:9092,node-2.51doit.com:9092,node-3.51doit.com:9092",
      "key.deserializer" -> classOf[StringDeserializer].getName,
      "value.deserializer" -> "org.apache.kafka.common.serialization.StringDeserializer",
      "group.id" -> groupId,
      "auto.offset.reset" -> "earliest", //如果没有记录偏移量,第一次从最开始读,有偏移量,接着偏移量读
      "enable.auto.commit" -> (false: java.lang.Boolean) //消费者不自动提交偏移量
    )

    //查询历史偏移量【上一次成功写入到数据库的偏移量】
    val historyOffsets: Map[TopicPartition, Long] = OffsetUtils.queryHistoryOffsetFromHbase(appName, groupId)

    //跟Kafka进行整合,需要引入跟Kafka整合的依赖
    //createDirectStream更加高效,使用的是Kafka底层的消费API,消费者直接连接到Kafka的Leader分区进行消费
    //直连方式,RDD的分区数量和Kafka的分区数量是一一对应的【数目一样】
    val kafkaDStream: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](
      ssc,
      LocationStrategies.PreferConsistent, //调度task到Kafka所在的节点
      ConsumerStrategies.Subscribe[String, String](topics, kafkaParams, historyOffsets) //指定订阅Topic的规则, 从历史偏移量接着读取数据
    )

    var offsetRanges: Array[OffsetRange] = null

    val ds2 = kafkaDStream.map(rdd => {
      //获取KakfaRDD的偏移量(Driver)
      offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
      rdd.value()
    })

    kafkaDStream.foreachRDD(rdd => {

      if (!rdd.isEmpty()) {

        //获取KakfaRDD的偏移量(Driver)
        val offsetRanges: Array[OffsetRange] = rdd.asInstanceOf[HasOffsetRanges].offsetRanges

        //获取KafkaRDD中的数据
        val lines: RDD[String] = rdd.map(_.value())

        val orderRDD: RDD[Order] = lines.map(line => {
          var order: Order = null
          try {
            order = JSON.parseObject(line, classOf[Order])
          } catch {
            case e: JSONException => {
              //TODO
            }
          }
          order
        })
        //过滤问题数据
        val filtered: RDD[Order] = orderRDD.filter(_ != null)

        //调用Action
        filtered.foreachPartition(iter => {
          if(iter.nonEmpty) {
            //将RDD中每一个分区中的数据保存到Hbase中
            //创建一个Hbase的连接
            val connection: Connection = HBaseUtil.getConnection("node-1.51doit.com,node-2.51doit.com,node-3.51doit.com", 2181)
            val htable = connection.getTable(TableName.valueOf("t_orders"))

            //定义一个ArrayList,并且指定长度,用于批量写入put
            val puts = new util.ArrayList[Put](3)

            //可以获取当前Task的PartitionID,然后到offsetRanges数组中取对应下标的偏移量,就是对应分区的偏移量
            val offsetRange: OffsetRange = offsetRanges(TaskContext.get.partitionId()) //在Executor端获取到偏移量

            //遍历分区中的每一条数据
            iter.foreach(order => {
              //取出想要的数据
              val oid = order.oid
              val money = order.money
              //封装到put中
              val put = new Put(Bytes.toBytes(oid))
              //data列族,offset列族
              put.addColumn(Bytes.toBytes("data"), Bytes.toBytes("money"), Bytes.toBytes(money))
              //put.addColumn(Bytes.toBytes("data"), Bytes.toBytes("province"), Bytes.toBytes(province))

              //分区中的最后一条,将偏移量区出来,存到hbase的offset列族
              if(!iter.hasNext) {
                put.addColumn(Bytes.toBytes("offset"), Bytes.toBytes("appid_groupid"), Bytes.toBytes(appName + "_" + groupId))
                put.addColumn(Bytes.toBytes("offset"), Bytes.toBytes("topic_partition"), Bytes.toBytes(offsetRange.topic + "_" + offsetRange.partition))
                put.addColumn(Bytes.toBytes("offset"), Bytes.toBytes("offset"), Bytes.toBytes(offsetRange.untilOffset))
              }
              //将封装到的put添加到puts集合中
              puts.add(put)
              //满足一定大小,批量写入
              if(puts.size() == 3) {
                htable.put(puts)
                puts.clear() //清空puts集合
              }
            })
            //将不满足批量写入条数的数据在写入
            htable.put(puts)
            //关闭连接
            htable.close()
            connection.close()
          }
        })

      }

    })

    ssc.start()

    ssc.awaitTermination()

  }
}

package cn._51doit.sparkstreaming.day02

import java.sql.{Connection, DriverManager, PreparedStatement, ResultSet}
import java.util

import org.apache.kafka.common.TopicPartition

import scala.collection.mutable

object OffsetUtils {

  /**
   * 使用JDBC查询
   * @param appName
   * @param groupId
   * @return
   */
  //每一次启动该程序,都要从Hbase查询历史偏移量
  def queryHistoryOffsetFromHbase(appid: String, groupid: String): Map[TopicPartition, Long] = {

    val offsets = new mutable.HashMap[TopicPartition, Long]()

    val connection = DriverManager.getConnection("jdbc:phoenix:node-1.51doit.com,node-2.51doit.com,node-3.51doit.com:2181")

    //分组求max,就是求没有分分区最(大)新的偏移量
    val ps = connection.prepareStatement("select \"topic_partition\", max(\"offset\") from \"t_orders\" where \"appid_groupid\" = ? group by \"topic_partition\"")

    ps.setString(1, appid + "_" + groupid)

    //查询返回结果
    val rs: ResultSet = ps.executeQuery()

    while(rs.next()) {

      val topicAndPartition = rs.getString(1)

      val fields = topicAndPartition.split("_")
      val topic = fields(0)
      val partition = fields(1).toInt

      val offset = rs.getLong(2)

      offsets.put(new TopicPartition(topic, partition), offset)

    }

    offsets.toMap
  }



  /**
   * 从Redis中查询历史偏移量
   *
   * @param appId
   * @param groupId
   * @return
   */
  def queryHistoryOffsetFromRedis(appId: String, groupId: String): Map[TopicPartition, Long] = {

    val offsetMap = new mutable.HashMap[TopicPartition, Long]

    val jedis = JedisConnectionPool.getConnection
    jedis.select(14)
    val mp: util.Map[String, String] = jedis.hgetAll(appId + "_" + groupId)
    //导入隐射转换
    import scala.collection.JavaConverters._
    for (tp <- mp.asScala) {
      val topic_partition = tp._1
      val offset = tp._2.toLong
      val fields = topic_partition.split("_")
      val topicPartition = new TopicPartition(fields(0), fields(1).toInt)

      offsetMap(topicPartition) = offset
    }

    offsetMap.toMap

  }


  def queryHistoryOffsetFromMySQL(appId: String, groupId: String): Map[TopicPartition, Long] = {

    val offsetMap = new mutable.HashMap[TopicPartition, Long]()

    //查询MySQL
    var connection: Connection = null
    var statement: PreparedStatement = null
    var resultSet: ResultSet = null
    try {
      connection = DruidConnectionPool.getConnection
      statement = connection.prepareStatement("SELECT topic_partition, offset FROM t_kafka_offset WHERE app_gid = ?")
      statement.setString(1, appId + "_" + groupId)
      val resultSet: ResultSet = statement.executeQuery()
      //变量结果集
      while (resultSet.next()) {
        val topic_partition = resultSet.getString(1)
        val offset = resultSet.getLong(2)
        val fields = topic_partition.split("_")
        val topic = fields(0)
        val partition = fields(1).toInt
        val topicPartition = new TopicPartition(topic, partition)
        offsetMap(topicPartition) = offset
      }
    } catch {
      case e: Exception => {
        throw new RuntimeException("查询历史偏移量出现异常")
      }
    } finally {
      if(resultSet != null) {
        resultSet.close()
      }
      if(statement != null) {
        statement.close()
      }
      if(connection != null) {
        connection.close()
      }
    }
    offsetMap.toMap
  }

}

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值