Flink 自定义source 写入 Kafka

添加依赖

<dependency>
	<groupId>org.apache.flink</groupId>
	<artifactId>flink-connector-kafka_2.12</artifactId>
	<version>1.13.2</version>
	<scope>provided</scope>
</dependency>

基于 Flink 服务提交任务并执行时需要的依赖包

基于 flink 服务器提交任务前,先上传依赖包到 flink 的 lib 目录下;然后重启 flink 服务,使 jar 进行加载;否则会出现 ClassNoFoundException 的异常。

  • flink-connector-kafka_2.12-1.13.2.jar
  • kafka-clients-2.4.1.jar

启动前注意

确保 topic 在 kafka 中是真实存在的,否则将会产生如下的执行异常:

  • 运行逻辑:先获取kafka中全部的topic list,再进行正则匹配,得到指定的topic list 调试发现,获取kafka全部topic list返回null。然后产生下述异常,此时创建对应的 topic,等待下次任务重启后将可正常运行。
java.lang.RuntimeException: Unable to retrieve any partitions with KafkaTopicsDescriptor: Topic Regex Pattern (WYSXT_47_(.+)_47_other_47_property_47_post)
	at org.apache.flink.streaming.connectors.kafka.internals.AbstractPartitionDiscoverer.discoverPartitions(AbstractPartitionDiscoverer.java:156)
	at org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumerBase.open(FlinkKafkaConsumerBase.java:577)
	at org.apache.flink.api.common.functions.util.FunctionUtils.openFunction(FunctionUtils.java:34)
	at org.apache.flink.streaming.api.operators.AbstractUdfStreamOperator.open(AbstractUdfStreamOperator.java:102)
	at org.apache.flink.streaming.runtime.tasks.OperatorChain.initializeStateAndOpenOperators(OperatorChain.java:442)
	at org.apache.flink.streaming.runtime.tasks.StreamTask.restoreGates(StreamTask.java:582)
	at org.apache.flink.streaming.runtime.tasks.StreamTaskActionExecutor$SynchronizedStreamTaskActionExecutor.call(StreamTaskActionExecutor.java:100)
	at org.apache.flink.streaming.runtime.tasks.StreamTask.executeRestore(StreamTask.java:562)
	at org.apache.flink.streaming.runtime.tasks.StreamTask.runWithCleanUpOnFail(StreamTask.java:647)
	at org.apache.flink.streaming.runtime.tasks.StreamTask.restore(StreamTask.java:537)
	at org.apache.flink.runtime.taskmanager.Task.doRun(Task.java:759)
	at org.apache.flink.runtime.taskmanager.Task.run(Task.java:566)
	at java.lang.Thread.run(Thread.java:748)

构建KafkaSource参数实例

/**
 * kafka source 参数实例
 * @author yinlilan
 *
 */
public class KafkaSource implements Serializable {

	private static final long serialVersionUID = 6060562931782343343L;

	private String bootStrapServers;
	
	private String groupId;
	
	private String topic;
	
	public String getBootStrapServers() {
		return bootStrapServers;
	}

	public String getGroupId() {
		return groupId;
	}
	
	public String getTopic() {
		return topic;
	}

	public KafkaSource(Object obj) {
		final JSONObject json = JSONObject.parseObject(obj.toString());
		this.bootStrapServers = json.getString("bootStrapServers");
		this.groupId = json.getString("groupId");
		this.topic = json.getString("topic");
	}
	
}

构建自定义KafkaMQSource

基于FlinkKafkaConsumer< T > 类实现KafkaSource,其中KafkaDeserializationSchema< T >类型是用于数据反序列化的,可以将数据组装成你想要的方式然后传递出去。

import java.io.Serializable;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ConcurrentHashMap;
import java.util.regex.Pattern;

import org.apache.flink.api.common.typeinfo.TypeHint;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.streaming.connectors.kafka.KafkaDeserializationSchema;
import org.apache.kafka.clients.CommonClientConfigs;
import org.apache.kafka.clients.consumer.ConsumerRecord;

/**
 * kafka source初始化
 * @author yinlilan
 *
 */
public class KafkaMessageSource implements Serializable {
	
	private static final long serialVersionUID = -1128615689349479275L;
	
	private FlinkKafkaConsumer<Map<String, String>> consumer;
	
	public KafkaMessageSource(final String bootStrapServers, final String groupId, final String topic){
    	Properties properties = new Properties();
    	properties.setProperty(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, bootStrapServers);
    	// Flink Kafka Consumer 支持发现动态创建的 Kafka 分区,并使用精准一次的语义保证去消耗它们
    	properties.setProperty("flink.partition-discovery.interval-millis", "10000");//
    	properties.setProperty("group.id", groupId);
    	
    	
		//TODO: 自定义反序列化
		final KafkaDeserializationSchema<Map<String, String>> deserializer = new KafkaDeserializationSchema<Map<String, String>>(){
			
			private static final long serialVersionUID = 1574406844851249992L;
			
			private String encoding = "UTF-8";
    		
			@Override
			public TypeInformation<Map<String, String>> getProducedType() {
				return TypeInformation.of(new TypeHint<Map<String, String>>(){});
			}

			@Override
			public boolean isEndOfStream(Map<String, String> nextElement) {
				return false;
			}

			@Override
			public Map<String, String> deserialize(ConsumerRecord<byte[], byte[]> record) throws Exception {
				final Map<String, String> result = new ConcurrentHashMap<>();
				result.put("topic", record.topic());
				result.put("value", new String(record.value(), encoding));
				return result;
			}
    	};
    			
    	// 构建source
    	Pattern pattern = Pattern.compile(topic);
    	consumer = new FlinkKafkaConsumer<>(pattern, deserializer, properties);
	}

	public FlinkKafkaConsumer<Map<String, String>> getConsumer() {
		return consumer;
	}
}
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值