消费kafka数据

spark消费kafka数据

1.导入maven
<properties>
	<spark.version>2.1.1</spark.version>
</properties>
<dependency>
    <groupId>org.apache.spark</groupId>
    <artifactId>spark-core_2.11</artifactId>
    <version>${spark.version}</version>
</dependency>
<dependency>
    <groupId>org.apache.spark</groupId>
    <artifactId>spark-streaming_2.11</artifactId>
    <version>${spark.version}</version>
</dependency>
<dependency>
    <groupId>org.apache.spark</groupId>
    <artifactId>spark-streaming-kafka-0-10_2.11</artifactId>
    <version>${spark.version}</version>
</dependency>
2.创建kafkaconsumer.properties
bootstrap.servers=hadoop102:9092,hadoop103:9092,hadoop104:9092
group.id=spark_0615
key.deserializer=org.apache.kafka.common.serialization.StringDeserializer
value.deserializer=org.apache.kafka.common.serialization.StringDeserializer
auto.offset.reset=earlist
3.消费kafka数据
//创建SparkConf
val conf: SparkConf = new SparkConf().setAppName("KafkaTest").setMaster("local[*]")
//设置从kafka消费的速度
conf.set("spark.streaming.kafka.maxRatePerPartition", "100")
conf.set("spark.streaming.pressure.enable", "true")
conf.set("spark.streaming.stopGracefullyOnShutdown", "true")

//创建StreamingContext
val ssc = new StreamingContext(conf, Seconds(5))

//kafka配置信息
val kafkaPara: Map[String, String] = Map(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG -> "hadoop102:9092,hadoop103:9092,hadoop104:9092",
  ConsumerConfig.GROUP_ID_CONFIG -> "bigdata",
  ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG -> "org.apache.kafka.common.serialization.StringDeserializer",
  ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG -> "org.apache.kafka.common.serialization.StringDeserializer")

//配置自定义topicPartition
val offsets = Map(new TopicPartition("first", 0) -> 1L)

val consumerStrategy: ConsumerStrategy[String, String] = ConsumerStrategies.Subscribe[String, String](List("first"), kafkaPara, offsets)

val inputDStream: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](ssc, LocationStrategies.PreferConsistent, consumerStrategy)

var offsetRanges: Array[OffsetRange] = Array.empty[OffsetRange]

val lineDStream: DStream[String] = inputDStream.transform(rdd => {
  offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
  rdd.map(x => x.value())
})

lineDStream.print()

offsetRanges.foreach(x => x.untilOffset)

ssc.start()
ssc.awaitTermination()

flink消费kafka数据

1.导入maven
<dependency>
    <groupId>org.apache.flink</groupId>
    <artifactId>flink-scala_2.11</artifactId>
    <version>1.7.2</version>
</dependency>
<dependency>
    <groupId>org.apache.flink</groupId>
    <artifactId>flink-streaming-scala_2.11</artifactId>
    <version>1.7.2</version>
</dependency>
<dependency>
    <groupId>org.apache.flink</groupId>
    <artifactId>flink-connector-kafka-0.11_2.11</artifactId>
    <version>1.7.2</version>
</dependency>
2.创建kafkaconsumer.properties
bootstrap.servers=hadoop102:9092,hadoop103:9092,hadoop104:9092
group.id=flink_0615
key.deserializer=org.apache.kafka.common.serialization.StringDeserializer
value.deserializer=org.apache.kafka.common.serialization.StringDeserializer
3.消费kafka的value数据
object FromKafkaSource {
    def main(args: Array[String]): Unit = {
        //创建上下文环境
        val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment
        //获取参数配置
        val properties: Properties = ProPertiesUtil.getProperties("kafkaconsumer.properties")
        //添加source创建DataStream
        val lineDataStream: DataStream[String] = env.addSource(new FlinkKafkaConsumer011[String]("flink", new SimpleStringSchema(), properties))
        //获取wordCount
        val wordToCountDataStream: DataStream[(String, Int)] = lineDataStream.flatMap(_.split(" ")).map((_, 1)).keyBy(0).sum(1)
        //打印
        wordToCountDataStream.print()
        //执行
        env.execute()
    }
}
4.消费kafka的key-value数据
object FromKafkaSource {
    def main(args: Array[String]): Unit = {
        //创建上下文环境
        val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment
        //获取参数配置
        val properties: Properties = ProPertiesUtil.getProperties("kafkaconsumer.properties")
        //添加source创建DataStream
        val keyToValueDataStream: DataStream[(String, String)] = env.addSource(new FlinkKafkaConsumer011[(String, String)]("flink", new KeyedDeserializationSchema[(String, String)] {
            override def isEndOfStream(nextElement: (String, String)): Boolean = {
                false
            }

            override def deserialize(messageKey: Array[Byte], message: Array[Byte], topic: String, partition: Int, offset: Long): (String, String) = {
                if (messageKey != null && message != null) {
                    val key = new String(messageKey, "UTF-8")
                    val value = new String(message, "UTF-8")
                    (key, value)
                } else {
                    //如果kafka中的数据为空返回一个固定的二元组
                    ("null", "null")
                }
            }

            override def getProducedType: TypeInformation[(String, String)] = {
                createTuple2TypeInformation(createTypeInformation[String], createTypeInformation[String])
            }
        }, properties))
        //打印
        keyToValueDataStream.print()
        //执行
        env.execute()
    }
}
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值