1、docker 安装kafka版本
KAFKA_VERSION 2.5.0
SCALA_VERSION 2.12
2、flink-connector maven 配置,注意红色部分
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-connector-kafka_${scala.binary.version}</artifactId>
<!--<artifactId>flink-connector-kafka-0.11_${scala.binary.version}</artifactId>-->
<version>${flink.version}</version>
</dependency>
3、代码
引用 flink-connector-kafka_${scala.binary.version} 则
FlinkKafkaConsumer<String> consumer = new FlinkKafkaConsumer<String>("test",
new SimpleStringSchema(), props);
引用 flink-connector-kafka-0.11_${scala.binary.version} 则
FlinkKafkaConsumer011<String> consumer = new FlinkKafkaConsumer011<String>("test",
new SimpleStringSchema(), props);
package com.test.flink;
import java.util.Properties;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.streaming.api.TimeCharacteristic;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
/**
* @author
* @className com.test.flink.CountEventTimeFlink
* @date 2020/07/13 14:44
* @description
*/
public class KafkaEventTimeFlink {
public static void main(String[] args) throws Exception {
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
env.setParallelism(1);
System.out.println("environment.getParallelism >>>> " + env.getParallelism());
Properties props = new Properties();
//hostname:port 或者 ip:port
props.put("bootstrap.servers", "kafka001:9092");
props.put("zookeeper.connect", "zookeeper001:2181");
props.put("group.id", "test");
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("auto.offset.reset", "latest"); //latest
FlinkKafkaConsumer<String> consumer = new FlinkKafkaConsumer<String>("test",
new SimpleStringSchema(), props);
// FlinkKafkaConsumer011<String> consumer = new FlinkKafkaConsumer011<String>("test",
// new SimpleStringSchema(), props);
DataStream<String> dataStream = env.addSource(consumer);
dataStream.print();
env.execute("CountEventTimeFlink");
}
}
5、问题详解
5.1 收不到消息/报错
引用 flink-connector-kafka-0.11_${scala.binary.version} 收不到消息
org.apache.kafka.clients.consumer.internals.AbstractCoordinator - Marking the coordinator kafka001:9092 (id: 2147483647 rack: null) dead for group test
引用 flink-connector-kafka_${scala.binary.version} 报错
11:23:50,775 INFO org.apache.flink.runtime.taskmanager.Task - Source: Custom Source -> Sink: Print to Std. Out (1/1) (51b41ed515565440d8fb13a26e9315e2) switched from RUNNING to FAILED.
org.apache.kafka.common.KafkaException: Failed to construct kafka consumer
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:799)
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:650)
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:630)
at org.apache.flink.streaming.connectors.kafka.internal.KafkaPartitionDiscoverer.initializeConnections(KafkaPartitionDiscoverer.java:58)
at org.apache.flink.streaming.connectors.kafka.internals.AbstractPartitionDiscoverer.open(AbstractPartitionDiscoverer.java:94)
at org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumerBase.open(FlinkKafkaConsumerBase.java:469)
at org.apache.flink.api.common.functions.util.FunctionUtils.openFunction(FunctionUtils.java:36)
at org.apache.flink.streaming.api.operators.AbstractUdfStreamOperator.open(AbstractUdfStreamOperator.java:102)
at org.apache.flink.streaming.runtime.tasks.StreamTask.openAllOperators(StreamTask.java:424)
at org.apache.flink.streaming.runtime.tasks.StreamTask.invoke(StreamTask.java:290)
at org.apache.flink.runtime.taskmanager.Task.run(Task.java:704)
at java.lang.Thread.run(Thread.java:745)
Caused by: org.apache.kafka.common.config.ConfigException: No resolvable bootstrap urls given in bootstrap.servers
at org.apache.kafka.clients.ClientUtils.parseAndValidateAddresses(ClientUtils.java:66)
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:709)
... 11 more
解决:host文件配置 ip hostname 例如: 127.0.0.1 kafka001
6、flink对应kafka版本,可以查看以下博客