package my.test.consumer;
import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;
import java.util.Arrays;
import java.util.Map;
import java.util.Properties;
/**
* @author WGY
* 简单consumer
* 手动 同步异步提交
* 自动提交
*/
public class MyConsumer1 {
public static void main(String[] args) {
//1、创建消费者配置信息
Properties prop = new Properties();
//2、给配置信息赋值
// prop.put("bootstrap.servers","hadoop1:9092");
prop.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"hadoop1:9092");
//开启自动提交
//手动提交这里先关闭 再在for循环下面加上consumer.commitSync()//同步提交;
prop.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
//自动提交的延迟
prop.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
//kv的反序列化
prop.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
prop.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
//消费者组
prop.put(ConsumerConfig.GROUP_ID_CONFIG, "aaa");
//重置消费者offset 这个配置只在两种情况下生效: 如果换组或者过了七天offset就会重置
prop.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
//创建消费者
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(prop);
//订阅主题
consumer.subscribe(Arrays.asList("first1","second"));
//拉取数据 一次拉去获得多个数据 开启就不关闭 写入死循环
while(true){
ConsumerRecords<String, String> consumerRecords = consumer.poll(100);
//解析并打印ConsumerRecords
for (ConsumerRecord<String, String> consumerRecord : consumerRecords) {
System.out.println(consumerRecord.key() + "--" + consumerRecord.value());
}
}
//异步提交 一个线程提交 一个线程接收
// consumer.commitAsync(new OffsetCommitCallback() {
// @Override
// public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception exception) {
// if(exception != null){
// System.out.println("Commit failed for"+ offsets);
// }
// }
// });
}
}
kafka学习--Consumer API--代码演示
最新推荐文章于 2023-12-20 23:47:40 发布