import kafka.serializer.StringDecoder
import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkConf
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.streaming.{Seconds, StreamingContext}
object KafkaReceiverDemo {
def main(args: Array[String]): Unit = {
Logger.getLogger("org").setLevel(Level.WARN)
System.setProperty("HADOOP_USER_NAME", "tg")
val conf = new SparkConf().setAppName(this.getClass.getSimpleName).setMaster("local[*]")
//限制Receiver的数据接收速率,spark.streaming.receiver.maxRate用来设置receiver每秒接收多少条数据
// conf.set("spark.streaming.receiver.maxRate","2000")
//开启back-pressure反压机制。通过动态控制数据接收速率来适配集群数据处理能力。
conf.set("spark.streaming.backpressure.enable", "true")
//为了确保数据零丢失,可以开启WAL预写日志机制
conf.set("spark.streaming.receiver.writeAheadLog.enable", "true")
val ssc = new StreamingContext(conf, Seconds(2))
//调用StreamingContext的checkpoint方法,设置预写目录
ssc.checkpoint("hdfs://master:9000/checkpoint/")
//基于Kafka创建DStream
val kafkaParams = Map[String, String](
//zk地址
"zookeeper.connect" -> "master:2181",
//每隔2s更新消费偏移量到zk上
"auto.commit.interval.ms" -> "2000",
//消费者组
"group.id" -> "spark1001",
//从哪个位置开始消费,largest代表从每个topic的partition最大偏移量位置开始消费
"auto.offset.reset" -> "largest"
)
//String,String,StringDecoder,StringDecoder,分别代表Key和Value数据类型及其反序列化类型
// val kafkaInputDStream: ReceiverInputDStream[(String, String)] = KafkaUtils.createStream[String, String, StringDecoder, StringDecoder](ssc, kafkaParams, Map("test" -> 3), StorageLevel.MEMORY_ONLY_SER)
//_._2获取Value的值
// val resultDStream: DStream[(String, Int)] = kafkaInputDStream.map(_._2).flatMap(_.split(" ")).map((_, 1)).reduceByKey(_ + _)
// resultDStream.print()
//_._1获取Key的值,发现Key值为null
// kafkaInputDStream.map(x => x._1).print()
//启用预写日志后,receiver的接收速率会下降,此时既想确保数据零丢失,又想提高吞吐量,
// 可以创建多个DStream,从而启动多个receiver并行接收数据。
//注意:此时我们创建了两个输入DStream,也就是说有两个Receiver,每个Receiver会占用一个CPU Core,
// 所以此时的local[N],N>2
val kafkaInputDStreams=(0 until(2)).map(x=>{
val kafkaInputDStream=KafkaUtils.createStream[String,String,StringDecoder,StringDecoder](ssc,kafkaParams,Map("test"->3),StorageLevel.MEMORY_ONLY_SER)
kafkaInputDStream
})
//调用StreamingContext对象的union方法,将三个输入DStream合并成一个
val bigInputDStream=ssc.union(kafkaInputDStreams)
bigInputDStream.map(_._2).flatMap(_.split(" ")).map((_, 1)).reduceByKey(_ + _).print()
ssc.start()
ssc.awaitTermination()
}
}
基于Kafka Receiver方式的实时WordCount
最新推荐文章于 2021-12-11 01:01:40 发布