0.8版本
1、recevie模式
0.8版本之前有这中模式,1.0后取消了这种模式
- 专门的Executor读取数据,速度不统一
- 跨机器传输数据,WAL
- Executor读取数据通过多个线程的方式,想要增加并行度,则需要多个流union
- offset存储在Zookeeper中
package day10
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}
/**
* @author yangkun
* @date 2020/11/6 16:55
* @version 1.0
*/
object spark01_receive {
def main(args: Array[String]): Unit = {
//创建配置文件对象
val conf: SparkConf = new SparkConf().setAppName("SparkStreaming02_RDDQueue").setMaster("local[*]")
//创建SparkStreaming上下文环境对象
val ssc: StreamingContext = new StreamingContext(conf,Seconds(3))
//连接Kafka,创建DStream
val kafkaDstream: ReceiverInputDStream[(String, String)] = KafkaUtils.createStream(
ssc,
"hadoop100:2181",
"yk_test",
Map("a" -> 1)
)
//获取kafka中的消息,我们只需要v的部分
val lineDS: DStream[String] = kafkaDstream.map(_._2)
//扁平化
val flatMapDS: DStream[String] = lineDS.flatMap(_.split(" "))
//结构转换 进行计数
val mapDS: DStream[(String, Int)] = flatMapDS.map((_,1))
//聚合
val reduceDS: DStream[(String, Int)] = mapDS.reduceByKey(_+_)
//打印输出
reduceDS.print
//开启任务
ssc.start()
ssc.awaitTermination()
}
}
2.direct模式
- Executor读取数据并计算
- 增加Executor个数来增加消费的并行度
- offset存储
a) CheckPoint(getActiveOrCreate方式创建StreamingContext)
b) 手动维护(有事务的存储系统)
c) 获取offset必须在第一个调用的算子中:offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
2.1 自动维护offset
自定的维护偏移量,偏移量维护在checkpiont中,目前我们这个版本,只是指定的检查点,只会将offset放到检查点中,但是并没有从检查点中取,会存在消息丢失
package day10
import kafka.serializer.StringDecoder
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.streaming.{Seconds, StreamingContext}
/**
* @author yangkun
* @date 2020/11/6 20:50
* @version 1.0
自定的维护偏移量,偏移量维护在checkpiont中
目前我们这个版本,只是指定的检查点,只会将offset放到检查点中,但是并没有从检查点中取,会存在消息丢失
*/
object spark02_direct_auto1 {
def main(args: Array[String]): Unit = {
//创建配置文件对象
val conf: SparkConf = new SparkConf().setAppName("SparkStreaming02_RDDQueue").setMaster("local[*]")
//创建SparkStreaming上下文环境对象
val ssc: StreamingContext = new StreamingContext(conf,Seconds(3))
//设置检查点目录
ssc.checkpoint("cp")
//配置kafka参数
val kafkaParams = Map(
ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG->"hdp:6667",
ConsumerConfig.GROUP_ID_CONFIG->"yk_test"
)
val kafkaRDD: InputDStream[(String, String)] = KafkaUtils.createDirectStream[String,String,StringDecoder,StringDecoder](ssc,kafkaParams,Set("a"))
val lineDS: DStream[String] = kafkaRDD.map(_._2)
//扁平化
val flatMapDS: DStream[String] = lineDS.flatMap(_.split(" "))
//结构转换 进行计数
val mapDS: DStream[(String, Int)] = flatMapDS.map((_,1))
//聚合
val reduceDS: DStream[(String, Int)] = mapDS.reduceByKey(_+_)
//打印输出
reduceDS.print
ssc.start()
ssc.awaitTermination()
}
}
2.2
- 自定的维护偏移量,偏移量维护在checkpiont中
- 修改StreamingContext对象的获取方式,先从检查点获取,如果检查点没有,通过函数创建。会保证数据不丢失
- 缺点:
- 1.小文件过多
- 2.在checkpoint中,只记录最后offset的时间戳,再次启动程序的时候,会从这个时间到当前时间,把所有周期都执行一次
package day10
import kafka.serializer.StringDecoder
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.streaming.{Seconds, StreamingContext}
/**
* @author yangkun
* @date 2020/11/6 20:50
* @version 1.0
* Desc: 通过DirectAPI连接Kafka数据源,获取数据
* 自定的维护偏移量,偏移量维护在checkpiont中
* 修改StreamingContext对象的获取方式,先从检查点获取,如果检查点没有,通过函数创建。会保证数据不丢失
* 缺点:
* 1.小文件过多
* 2.在checkpoint中,只记录最后offset的时间戳,再次启动程序的时候,会从这个时间到当前时间,把所有周期都执行一次
*/
object spark02_direct_auto2 {
def main(args: Array[String]): Unit = {
//创建Streaming上下文环境对象
val ssc: StreamingContext = StreamingContext.getActiveOrCreate("cp",()=>getStreamingContext())
//设置检查点目录
ssc.checkpoint("cp")
ssc.start()
ssc.awaitTermination()
}
def getStreamingContext()={
val conf: SparkConf = new SparkConf().setAppName("direct2").setMaster("local[*]")
val ssc: StreamingContext = new StreamingContext(conf,Seconds(3))
//配置kafka参数
val kafkaParams = Map(
ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG->"hdp:6667",
ConsumerConfig.GROUP_ID_CONFIG->"yk_test"
)
val kafkaRDD: InputDStream[(String, String)] = KafkaUtils.createDirectStream[String,String,StringDecoder,StringDecoder](ssc,kafkaParams,Set("a"))
val lineDS: DStream[String] = kafkaRDD.map(_._2)
//扁平化
val flatMapDS: DStream[String] = lineDS.flatMap(_.split(" "))
//结构转换 进行计数
val mapDS: DStream[(String, Int)] = flatMapDS.map((_,1))
//聚合
val reduceDS: DStream[(String, Int)] = mapDS.reduceByKey(_+_)
//打印输出
reduceDS.print
ssc
}
}
2.3 手动维护offset
package day10
import kafka.common.TopicAndPartition
import kafka.message.MessageAndMetadata
import kafka.serializer.StringDecoder
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka.{HasOffsetRanges, KafkaUtils, OffsetRange}
/**
* @author yangkun
* @date 2020/11/6 21:20
* @version 1.0
*/
object spark03_direct_handle {
def main(args: Array[String]): Unit = {
//创建配置文件对象
val conf: SparkConf = new SparkConf().setAppName("direct_handle").setMaster("local[*]")
//创建SparkStreaming上下文环境对象
val ssc: StreamingContext = new StreamingContext(conf,Seconds(3))
//配置kafka参数
val kafkaParams = Map(
ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG->"hdp:6667",
ConsumerConfig.GROUP_ID_CONFIG->"yk_test"
)
//获取上一次消费的位置(偏移量)
//实际项目中,为了保证数据精准一致性,我们对数据进行消费处理之后,将偏移量保存在有事务的存储中, 如MySQL
val offsets = Map(TopicAndPartition("a",0)->10L)
val kafkaRDD: InputDStream[String] = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder, String](
ssc,
kafkaParams,
offsets,
(m: MessageAndMetadata[String, String]) => m.message()
)
//消费完毕之后,对偏移量offset进行更新
var offsetRanges = Array.empty[OffsetRange]
kafkaRDD.transform(
//rdd是kafkaRDD类型,但是kafkaRDD是私有类型的且实现了HashOffsetRanges接口,因此将rdd转成HashOffsetRanges然后得到offsetRanges
rdd => {
offsetRanges= rdd.asInstanceOf[HasOffsetRanges].offsetRanges
rdd
}
).foreachRDD{
rdd => {
for(o <- offsetRanges) {
println(o.topic,o.partition,o.fromOffset,o.untilOffset)
}
}
}
// val lineDS: DStream[String] = kafkaRDD.map(_._1)
//扁平化
val flatMapDS: DStream[String] = kafkaRDD.flatMap(_.split(" "))
//结构转换 进行计数
val mapDS: DStream[(String, Int)] = flatMapDS.map((_,1))
//聚合
val reduceDS: DStream[(String, Int)] = mapDS.reduceByKey(_+_)
//打印输出
reduceDS.print
ssc.start()
ssc.awaitTermination()
}
}
pom文件
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<artifactId>blibliSpark</artifactId>
<groupId>com.bupt</groupId>
<version>1.0-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<groupId>com.bupt</groupId>
<artifactId>sparkStreaming</artifactId>
<dependencies>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-core_2.11</artifactId>
<version>2.1.1</version>
</dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>5.1.26</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-sql_2.11</artifactId>
<version>2.1.1</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-hive_2.11</artifactId>
<version>2.1.1</version>
</dependency>
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-exec</artifactId>
<version>1.2.1</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-streaming_2.11</artifactId>
<version>2.1.1</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-streaming-kafka-0-8_2.11</artifactId>
<version>2.1.1</version>
</dependency>
</dependencies>
</project>
0.10版本
- Executor读取数据并计算
- 增加Executor个数来增加消费的并行度
- offset存储
i. a.__consumer_offsets系统主题中
ii. b.手动维护(有事务的存储系统)
import kafka.consumer
import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord}
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Seconds, StreamingContext}
/**
* @author yangkun
* @date 2020/11/7 15:04
* @version 1.0
*/
object spark_dirct010 {
def main(args: Array[String]): Unit = {
val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("kafka010")
val ssc: StreamingContext = new StreamingContext(conf,Seconds(3))
val kafkaParams = Map(
ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG->"hdp:6667",
ConsumerConfig.GROUP_ID_CONFIG->"yk",
ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG -> classOf[StringDeserializer],
ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG -> classOf[StringDeserializer]
)
val kafkaDS: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](
ssc,
LocationStrategies.PreferConsistent,
ConsumerStrategies.Subscribe[String, String](Set("a"), kafkaParams)
)
kafkaDS.map(_.value()).map((_,1)).reduceByKey(_+_).print()
ssc.start()
ssc.awaitTermination()
}
}