package flinkdemo.sinkDemo;
import flinkdemo.sourceDemo.deserialization.ConsumerRecordKafkaDeserializationSchema;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer011;
import java.nio.charset.StandardCharsets;
import java.util.*;
import org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition;
import org.apache.kafka.clients.consumer.ConsumerRecord;
/**
* @author zhangkai
* @create 2019/12/11
*/
public class SinkToKafka {
public static void main(String[] args) throws Exception {
String topicList = "otter_sms_0";
System.out.println(topicList);
StreamExecutionEnvironment see = Stre
flink从kafka-topic固定offset消费的5种方式
最新推荐文章于 2024-07-27 15:05:24 发布
本文详细探讨了Flink从Kafka特定offset开始消费数据的五种方法,包括配置初始offset、使用Checkpoint、利用Savepoint、手动设置Consumer Group以及通过连接器属性设置,帮助读者理解如何在不同场景下灵活控制数据处理起点。
摘要由CSDN通过智能技术生成