需要依赖
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-sql_2.12</artifactId>
<version>3.0.0</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-streaming_2.12</artifactId>
<version>3.0.0</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-streaming-kafka-0-10_2.12</artifactId>
<version>3.0.0</version>
</dependency>
代码样例
package scala.spark.test
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.streaming.{Seconds, StreamingContext}
/**
* spark streaming read kafka data test
*
* @author sssuperMario
* @date 2020-08-12
*/
object SparkStreamingReadKafka {
def main(args: Array[String]): Unit = {
val sc = new SparkConf()
sc.setMaster("local[*]").setAppName("SparkStreamingReadKafkaTest")
val ssc = new StreamingContext(new SparkContext(sc), Seconds(5))
val topics = Array("topic_1", "topic_2")
val kafkaOptions = Map[String, Object](
"bootstrap.servers" -> "127.0.0.1:9092",
"key.deserializer" -> classOf[StringDeserializer],
"value.deserializer" -> classOf[StringDeserializer],
"group.id" -> "SparkStreamingKafkaTest",
"auto.offset.reset" -> "earliest",
"enable.auto.commit" -> (false: java.lang.Boolean)
)
val kafkaDS = KafkaUtils.createDirectStream[String, String](ssc
, LocationStrategies.PreferConsistent
, ConsumerStrategies.Subscribe[String, String](topics, kafkaOptions))
val kafkaDataStreamRDD = kafkaDS.filter(data => data.value().length > 0).map(data => (data.key(), data.value()))
kafkaDataStreamRDD.saveAsTextFiles("/read_data/")
kafkaDataStreamRDD.foreachRDD((rdd, time) => {
println(time)
rdd.foreach(key => {
println(key._1 + " => " + key._2)
})
})
ssc.start()
ssc.awaitTermination()
ssc.stop()
}
}