案例:手动维护偏移量到Mysql
●API
http://spark.apache.org/docs/latest/streaming-kafka-0-10-integration.html
- 手动提交offset,以保证数据不会丢失
具体代码实现
import java.sql.{DriverManager, ResultSet}
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010.{OffsetRange, _}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}
import scala.collection.mutable
/**
*
* Date 2019/8/8 10:47
* Desc 使用Spark-Kafka-0-10版本整合,并手动提交偏移量,维护到MySQL中
*/
object SparkKafkaDemo2 {
def main(args: Array[String]): Unit = {
//1.创建StreamingContext
//spark.master should be set as local[n], n > 1
val conf = new SparkConf().setAppName("wc").setMaster("local[*]")
v