package spark.util
import kafka.common.TopicAndPartition
import org.apache.spark.streaming.kafka.OffsetRange
import scalikejdbc.{DB, SQL}
object KafkaMysqlOffsetManager {
// 获取offset
def getOffset(groupId: String) = {
val fromOffset: Map[TopicAndPartition, Long] =
DB.readOnly {
implicit session => {
SQL("select * from offset where groupId = '${groupId}'")
//查询出来后 将数据赋值给元组
.map(m => (TopicAndPartition(m.string("topic"), m.int("partitions")), m.long("untilOffset")))
.toList().apply()
}.toMap //最后要toMap因为前面的返回值已经给定
}
fromOffset
}
def updateOffset(groupId: String, offsetRanges: Array[OffsetRange]) = {
val fromOffset = getOffset(groupId)
DB.localTx {
implicit session => {
for (os <- offsetRanges) {
if(fromOffset.nonEmpty) {
SQL("update offset set untilOffset=? where groupId=? and topic=? and partitions=?")
.bind(os.untilOffset,groupId, os.topic, os.partition).update().apply()
println("更新:topic:" + os.topic + " partition:" + os.partition + " fromOffset:" + os.fromOffset + " untilOffset:" + os.untilOffset)
}else{
SQL("replace into offset(topic,partitions,untilOffset,groupId) values(?,?,?,?)")
.bind(os.topic, os.partition, os.untilOffset, groupId).update().apply()
println("插入:topic:"+os.topic+" partition:"+os.partition+" fromOffset:"+ os.fromOffset+" untilOffset:"+ os.untilOffset)
}
}
}
}
}
def insertOffset(groupId: String, offsetRanges: Array[OffsetRange]) = {
val fromOffset = getOffset(groupId)
DB.localTx {
implicit session => {
for (os <- offsetRanges) {
if(fromOffset.nonEmpty)
SQL("replace into offset(topic,partitions,untilOffset,groupId) values(?,?,?,?)")
.bind(os.topic, os.partition, os.untilOffset, groupId).update().apply()
println("插入:topic:"+os.topic+" partition:"+os.partition+" fromOffset:"+ os.fromOffset+" untilOffset:"+ os.untilOffset)
}
}
}
}
}
kafkaOffsetToMysql
最新推荐文章于 2023-12-25 16:45:19 发布