spark手动维护kafka偏移量录标

spark手动维护kafka偏移量录标

package common
/**
*偏移量操作工具类
*/
import java.sql.{DriverManager, ResultSet}
import org.apache.kafka.common.TopicPartition
import org.apache.spark.streaming.kafka010.OffsetRange
import scala.collection.mutable

object OffsetUtil {

  /**
    * 取offset
    * @param groupId
    * @param topic
    * @return
    */
  def getOffsetMap(groupId:String, topics: Array[String], mysqlJdbcUrl:String, mysqlUsername:String, mysqlPassword:String)={
    //连接数据库
    val connection = DriverManager.getConnection(mysqlJdbcUrl, mysqlUsername, mysqlPassword)
    val pstmt  = connection.prepareStatement("select `topic`,`partition`,`offset` from dataServer_offset where `groupId`=? and `topic`=?")
    //设置参数
    val offsetMap = new mutable.HashMap[TopicPartition, Long]()
    var result: ResultSet = null
    topics.foreach(topic => {
      pstmt.setString(1, groupId)
      pstmt.setString(2, topic)
      result = pstmt.executeQuery()
      while (result.next()){
        offsetMap += new TopicPartition(result.getString("topic"), result.getInt("partition")) -> result.getLong("offset")
      }
    })
    //关闭资源
    result.close()
    pstmt.close()
    connection.close()
    offsetMap
  }

  /**
    * 存offset
    * @param groupid
    * @param offsetRange
    */
  def saveOffset(groupid: String, offsetRange: Array[OffsetRange], mysqlJdbcUrl:String, mysqlUsername:String, mysqlPassword:String) = {
    //连接数据库
    val connection = DriverManager.getConnection(mysqlJdbcUrl, mysqlUsername, mysqlPassword)
    val pstmt = connection.prepareStatement("replace into dataServer_offset(`topic`, `partition`, `groupId`, `offset`) values(?,?,?,?)")
    //设置参数
    offsetRange.foreach(o =>{
      pstmt.setString(1, o.topic)
      pstmt.setInt(2, o.partition)
      pstmt.setString(3, groupid)
      pstmt.setLong(4, o.untilOffset)
      pstmt.executeUpdate()
    })
    //关闭资源
    pstmt.close()
    connection.close()
  }
}
package service.container

import cmb.zh.data.scala.common._
import cmb.zh.data.scala.common.ConfigProperties
import com.alibaba.fastjson.JSON
import common.OffsetUtil
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.security.plain.PlainLoginModule
import org.apache.kafka.common.serialization.{StringDeserializer, StringSerializer}
import org.apache.spark.SparkConf
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.kafka010._

import scala.collection.immutable.HashMap


object MPPRLogService {

  def main(args: Array[String]): Unit = {
    // 参数校验
    //    if (args.length < 1) {
    //      System.err.println( s"""| <configurationFile> is the configuration file """.stripMargin)
    //      System.exit(1)
    //    }


    val conf = new ConfigProperties("D://test.properties") // 测试
    val sparkConf = new SparkConf().setAppName("app.name").setMaster("local") // 测试


    //    val conf = new ConfigProperties(args(0)) // 生产
    //    val sparkConf = new SparkConf().setAppName(conf.get("app.name")) // 生产

    sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")

    val scc = new StreamingContext(sparkConf, Seconds(conf.get("streaming.batch.duration").toLong))

    // kafka 初始化
    val topics = conf.get("input.kafka.topics").split(",")
    val username = conf.get("input.kafka.username")
    val password = conf.get("input.kafka.password")
    val saslJaasConfig = classOf[PlainLoginModule].getName + " required username=\"" + username + "\" password=\"" + password + "\";"
    val kafkaServers = conf.get("input.kafka.bootstrap.servers")
    val groupId = conf.get("input.kafka.group.id")

    val kafkaParam = Map("bootstrap.servers" -> kafkaServers,
      "sasl.jaas.config" -> saslJaasConfig,
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> groupId,
      "auto.offset.reset" -> "latest",
      "security.protocol" -> "SASL_PLAINTEXT",
      "sasl.mechanism" -> "PLAIN")

    if (conf.get("input.kafka.session.timeout.ms", null) != null) {
      kafkaParam.+("session.timeout.ms" -> conf.get("input.kafka.session.timeout.ms"))
    }
    if (conf.get("input.kafka.max.poll.interval.ms", null) != null) {
      kafkaParam.+("max.poll.interval.ms" -> conf.get("input.kafka.max.poll.interval.ms"))
    }
    if (conf.get("input.kafka.max.poll.records", null) != null) {
      kafkaParam.+("max.poll.records" -> conf.get("input.kafka.max.poll.records"))
    }

    try {
      /**
        * 一: 初始化mysql配置
        * 二: 从数据库读取偏移量分为两种情况:
        *        1.如果有,自从偏移量开始读
        *        2.如果没有.从last开始读
        */
      val mysqlUsername = conf.get("offset.mysql.username")
      val mysqlPassword = conf.get("offset.mysql.password")
      val mysqlJdbcUrl = conf.get("offset.mysql.jdbc.url")
      val offsetMap = OffsetUtil.getOffsetMap(groupId, topics, mysqlJdbcUrl, mysqlUsername, mysqlPassword)
      val kafkaStream = if (offsetMap.size > 0) {
        KafkaUtils.createDirectStream[String, String](
          scc,
          //位置策略,源码强烈推荐使用该策略,会让Spark的Executor和Kafka的Broker均匀对应
          LocationStrategies.PreferConsistent,
          //消费策略,源码推荐使用该策略
          ConsumerStrategies.Subscribe[String, String](topics, kafkaParam, offsetMap))
      } else {
        KafkaUtils.createDirectStream[String, String](
          scc,
          LocationStrategies.PreferConsistent,
          //消费策略,源码推荐使用该策略
          ConsumerStrategies.Subscribe[String, String](topics, kafkaParam))
      }

      // 容器云kafka原始日志消费
      // 先用正则过滤掉不合规的日志,然后再从日志提取相关字段,转成JSON
      kafkaStream.foreachRDD(rdd => {
        if (rdd.count() > 0) {
          rdd.map(_.value()).map(rawLogStr => {
            LogProcessUtil.getLogContentFromKafka(rawLogStr)
          }).filter(originLogStr => {
            println("原始日志: " + originLogStr)
            LogProcessUtil.platLogCheck(originLogStr)
          }).map(logStr => {
            LogProcessUtil.getPlatformLogJson(logStr)
            //操作map()生成的RDD,用于将日志输入到kafka
          }).foreach(record => {
            try {
              println("json字符串日志: " + record)
              //判断record日志中的区别条件,push到指定的topic中
              val recordJson = JSON.parseObject(record)
              if (recordJson.get("chlid").equals("1000")) {
                kafkaProducer.value.send(outputTopicMap("1000"), record)
                saveOffset(rdd, groupId, mysqlJdbcUrl, mysqlUsername, mysqlPassword)
              } else if (recordJson.get("chlid").equals("1001")) {
                kafkaProducer.value.send(outputTopicMap("1001"), record)
                saveOffset(rdd, groupId, mysqlJdbcUrl, mysqlUsername, mysqlPassword)
              }
            } catch {
              case e: Throwable => println(e)
            }
          })
        }
      })
    } catch {
      case e: Exception => print(e)
    }
    scc.start()
    scc.awaitTermination()
  }

  def saveOffset(rdd:RDD[ConsumerRecord[String, String]], groupId: String, mysqlJdbcUrl: String, mysqlUsername: String, mysqlPassword: String) = {
    // spark提供了一个类HasOffsetRanges,帮我们封装offset的数据
    val offsetRanges: Array[OffsetRange] = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
    for (o <- offsetRanges) {
      println(s"topic=${o.topic},partition=${o.partition},fromOffset=${o.fromOffset},untilOffset=${o.untilOffset}")
    }
    //手动提交offset,默认提交到Checkpoint中
    //recordDStream.asInstanceOf[CanCommitOffsets].commitAsync(offsetRanges)
    //实际中偏移量可以提交到MySQL/Redis中
    OffsetUtil.saveOffset(groupId, offsetRanges, mysqlJdbcUrl, mysqlUsername, mysqlPassword)
  }

  def selectTopic(topicArray: Array[String]): Map[String, String] = {
    var resultMap: Map[String, String] = new HashMap[String, String]()
    topicArray.foreach(topic => {
      val topicArray = topic.split("_").toArray
      resultMap += (topicArray(5) -> topic)
    })
    return resultMap
  }

}

  • 1
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 3
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值