spring boot Kafka原生开发

本文介绍了如何在Spring Boot应用中使用Kafka,包括设置Maven依赖、创建生产者和消费者。生产者配置了重试机制和拦截器,消费者实现了手动位移提交并支持消息回溯。同时展示了生产者拦截器和消费者拦截器的实现,允许在消息发送和接收时进行预处理和后处理操作。
摘要由CSDN通过智能技术生成

1、安装maven依赖

<dependency>
            <groupId>org.apache.kafka</groupId>
            <artifactId>kafka-clients</artifactId>
            <version>2.5.1</version>
        </dependency>

2、生产者

package com.example.kafkaproject;

import com.example.config.CompanySerializer;
import com.example.entity.Company;
import com.example.handler.ProducterInterceptor;
import org.apache.kafka.clients.producer.*;
import org.apache.kafka.common.serialization.StringSerializer;
import org.springframework.boot.test.context.SpringBootTest;

import java.nio.ByteBuffer;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;


public class KafkaProducterAnalysis {
    public static final  String brokerList="localhost:9092";
    public static final String topic="topic-demo";
    public static Properties initConfig(){
        Properties props=new Properties();
        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,brokerList);
        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
//        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, CompanySerializer.class.getName());
        props.put(ProducerConfig.CLIENT_ID_CONFIG,"demo");
        props.put(ProducerConfig.RETRIES_CONFIG,10);//重试次数
        props.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, ProducterInterceptor.class.getName());

        return props;
    }
    public static void main(String[] args) {
        Properties props=initConfig();
        KafkaProducer<String, String> producer = new KafkaProducer<>(props);
        ProducerRecord<String,String> record = new ProducerRecord<>(topic,2,"kafka2052","分区测试数据13");
        //producer.send(record);//发送消息,发后即忘
//        try {
//            //同步方式发送
//            //producer.send(record).get();
//            //获取元数据信息
//            RecordMetadata metadata=producer.send(record).get();
//            System.out.println("主题");
//            System.out.println(metadata.topic());
//            System.out.println("偏移");
//            System.out.println(metadata.offset());
//            System.out.println("分区");
//            System.out.println(metadata.partition());
//
//        }catch (ExecutionException | InterruptedException e){
//            e.printStackTrace();
//        }

//        KafkaProducer<String, Company> producer = new KafkaProducer<>(props);
//        Company company = new Company();
//        company.setName("测试数据");
//        company.setAddress("测试地址");
//        ProducerRecord<String,Company> record = new ProducerRecord<>(topic,"kafka1024",company);
//        producer.send(record);
        try {
            //异步方式发送
            producer.send(record, new Callback() {
                @Override
                public void onCompletion(RecordMetadata recordMetadata, Exception e) {
                    if(e!=null){
                        e.printStackTrace();
                    }
                    else {
                        System.out.println("回调函数");
                        System.out.println(recordMetadata.topic()+'-'+recordMetadata.partition());
                    }
                }
            });
        }catch (Exception e){
            e.printStackTrace();
        }
        producer.close();
    }
}

3、消费者

package com.example.kafkaproject;

import com.example.handler.MyConsumerInterceptor;
import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.protocol.types.Field;
import org.apache.kafka.common.serialization.IntegerDeserializer;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.lang.reflect.Array;
import java.time.Duration;
import java.util.*;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.regex.Pattern;

public class KafkaConsumerAnalysis {
    public static final  String brokerList="localhost:9092";
    public static final String topic="topic-demo";
    public static final String groupId="group.demo";
    public static final AtomicBoolean isRunning=new AtomicBoolean(true);

    public static Properties initConfig(){
        Properties props = new Properties();
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,brokerList);
        props.put(ConsumerConfig.GROUP_ID_CONFIG,groupId);
        props.put(ConsumerConfig.CLIENT_ID_CONFIG,"client.id.demo");
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class.getName());
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,false);//自动提交位移
        props.put(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, MyConsumerInterceptor.class.getName());//消费拦截器
        return props;
    }

    public static void main(String[] args) {
        Properties props=initConfig();
        TopicPartition tp = new TopicPartition(topic, 2);//分区类
        KafkaConsumer<String,String> consumer = new KafkaConsumer<>(props);
//        consumer.subscribe(Arrays.asList(topic));//订阅topic
//        consumer.pause(Arrays.asList(tp));//暂停消费指定分区
//        consumer.resume(Arrays.asList(tp));//重新消费指定分区

        //consumer.subscribe(Pattern.compile("topic-*"));  //正则表达式方式订阅主题
//        consumer.subscribe(Arrays.asList(topic), new ConsumerRebalanceListener() {
//            //再均衡
//            @Override
//            public void onPartitionsRevoked(Collection<TopicPartition> collection) {
//                consumer.commitSync();
//            }
//
//            @Override
//            public void onPartitionsAssigned(Collection<TopicPartition> collection) {
//
//            }
//        });
        consumer.assign(Arrays.asList(tp)); //订阅特定分区的主题
        Set<TopicPartition> assignment=new HashSet<>();
        while (assignment.size()==0){
            consumer.poll(Duration.ofMillis(100));
            assignment=consumer.assignment();//获取分区消息
        }
//        //从消息末尾进行消费
//        Map<TopicPartition,Long> offsets=consumer.endOffsets(assignment);
//        consumer.seek(tp,offsets.get(tp));
//        consumer.seekToEnd(Arrays.asList(tp));//简化方式
//        //从消息的开始位置进行消费
//        Map<TopicPartition,Long> offsets=consumer.beginningOffsets(assignment);
//        consumer.seek(tp,offsets.get(tp));
//        consumer.seekToBeginning(Arrays.asList(tp));//简化方式
        //consumer.seek(tp,4);//指定位移进行消费
        //根据时间来定位消费
        Map<TopicPartition,Long> timestampToSearch=new HashMap<>();
        timestampToSearch.put(tp,System.currentTimeMillis()-1*24*3600*1000);
        Map<TopicPartition,OffsetAndTimestamp> offsets=consumer.offsetsForTimes(timestampToSearch);
        OffsetAndTimestamp offsetAndTimestamp=offsets.get(tp);
        consumer.seek(tp,offsetAndTimestamp.offset());


        //查询topic中的分区信息
//        List<TopicPartition> topicPartitions=new ArrayList<>();
//        List<PartitionInfo> partitionInfos=consumer.partitionsFor(topic);
//        if(partitionInfos!=null){
//            for (PartitionInfo tpInfo : partitionInfos){
//                topicPartitions.add(new TopicPartition(tpInfo.topic(),tpInfo.partition()));
//            }
//        }
//        consumer.assign(topicPartitions);
        //consumer.unsubscribe();//取消订阅

        long lastConsumedOffset=-1;
        System.out.println("开始消费");
        try {
            while (isRunning.get()){
                ConsumerRecords<String,String> records=consumer.poll(Duration.ofMillis(1000));//拉取消息
                for(ConsumerRecord<String,String> record :records){//消费逻辑
                    System.out.println("消费消息");
                    System.out.println(record.value());
                    System.out.println(record.topic());
                    //每消费一条消息就提交一次位移
                    long offset=record.offset();
                    TopicPartition partition = new TopicPartition(record.topic(), record.partition());
                    consumer.commitSync(Collections.singletonMap(partition,new OffsetAndMetadata(offset+1)));
                }
                //consumer.commitAsync();//位移提交

//                for (TopicPartition partition : records.partitions()){
//                    //更加细微力度的同步提交位移
//                    List<ConsumerRecord<String, String>> partRecords=records.records(partition);
//                    for(ConsumerRecord<String,String> record : partRecords){
//                        //执行逻辑
//                    }
                    long lastConsumedOffsetVal=partitionRecords.get(partitionRecords.size()-1).offset();
                    consumer.commitSync(Collections.singletonMap(partition,new OffsetAndMetadata(lastConsumedOffsetVal+1)));
                }
            }
        }catch (Exception e){
            System.out.println(e);
        }finally {
            consumer.close();
        }
    }
}

5、生产者拦截器

package com.example.handler;

import org.apache.kafka.clients.producer.ProducerInterceptor;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;

import java.util.Map;

public class ProducterInterceptor implements ProducerInterceptor<String,String> {

    @Override
    public ProducerRecord<String, String> onSend(ProducerRecord<String, String> record) {
        String modifyingValue="preInter-"+record.value();
        return new ProducerRecord<>(record.topic(),record.partition(),record.timestamp(),record.key(),modifyingValue,record.headers());
    }

    @Override
    public void onAcknowledgement(RecordMetadata recordMetadata, Exception e) {

    }

    @Override
    public void close() {

    }

    @Override
    public void configure(Map<String, ?> map) {

    }
}

6、消费者拦截器

package com.example.handler;

import org.apache.kafka.clients.consumer.ConsumerInterceptor;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;

import java.util.Map;

public class MyConsumerInterceptor implements ConsumerInterceptor<String,String> {

    @Override
    public ConsumerRecords<String, String> onConsume(ConsumerRecords<String, String> consumerRecords) {
        System.out.println("消费者拦截器");
        return consumerRecords;
    }

    @Override
    public void onCommit(Map<TopicPartition, OffsetAndMetadata> map) {

    }

    @Override
    public void close() {

    }

    @Override
    public void configure(Map<String, ?> map) {

    }
}

7、在spring boot中,可新起一个线程来进行消费,代码如下
所在类型需要实现ApplicationRunner接口

@Override
    public void run(ApplicationArguments args) throws Exception {
        new Thread(() -> {
            //this.consumerFunc();  执行消费程序
        }).start();
    }
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

程序员阿明

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值
>