spring kafka自定义设置异常提交和停止监听消息

版本信息:

        <org.springframework.boot.version>2.2.2.RELEASE</org.springframework.boot.version>
        <org.springframework.cloud.version>Hoxton.SR1</org.springframework.cloud.version>

代码

自定义异常
package com.kittlen.cloud.kafka.handler;

import org.apache.kafka.clients.consumer.ConsumerRecord;

/**
 * @author kittlen
 * @version 1.0
 * @date 2022/1/10 0010
 */
public class ProcessingKafkaSpecialException extends RuntimeException {

    /**
     * 已经消费到哪一个record,修改偏移量时使用
     */
    private ConsumerRecord<?, ?> errorRecord;

    public ProcessingKafkaSpecialException() {
    }

    public ProcessingKafkaSpecialException(Throwable cause) {
        super(cause);
    }

    public ProcessingKafkaSpecialException(String message) {
        super(message);
    }

    public ProcessingKafkaSpecialException(ConsumerRecord<?, ?> errorRecord) {
        this.errorRecord = errorRecord;
    }

    public ProcessingKafkaSpecialException(Throwable cause, ConsumerRecord<?, ?> errorRecord) {
        super(cause);
        this.errorRecord = errorRecord;
    }

    public ProcessingKafkaSpecialException(String message, ConsumerRecord<?, ?> errorRecord) {
        super(message);
        this.errorRecord = errorRecord;
    }

    public ConsumerRecord<?, ?> getErrorRecord() {
        return errorRecord;
    }

    public void setErrorRecord(ConsumerRecord<?, ?> errorRecord) {
        this.errorRecord = errorRecord;
    }

}

自定义异常处理类
package com.kittlen.cloud.kafka.handler;

import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;
import org.springframework.kafka.listener.KafkaListenerErrorHandler;
import org.springframework.kafka.listener.ListenerExecutionFailedException;
import org.springframework.messaging.Message;
import org.springframework.stereotype.Component;

import java.util.Collections;
import java.util.List;
import java.util.stream.Collectors;
import java.util.stream.Stream;

/**
 * {@link org.springframework.kafka.listener.adapter.BatchMessagingMessageListenerAdapter#invoke(java.lang.Object, org.springframework.kafka.support.Acknowledgment, org.apache.kafka.clients.consumer.Consumer, org.springframework.messaging.Message)}
 * {@link org.springframework.kafka.listener.KafkaMessageListenerContainer.ListenerConsumer#doInvokeBatchListener(org.apache.kafka.clients.consumer.ConsumerRecords, java.util.List, org.apache.kafka.clients.producer.Producer)}
 * {@link org.springframework.kafka.listener.KafkaMessageListenerContainer.ListenerConsumer#invokeBatchListenerInTx(org.apache.kafka.clients.consumer.ConsumerRecords, java.util.List)}
 * 必设参数
 * spring.kafka.consumer.enable-auto-commit : false
 * spring.kafka.listener.type : batch
 * spring.kafka.listener.ack-mode : manual
 *
 * @author kittlen
 * @version 1.0
 * @date 2022/07/04 0010
 */
@Component("myBaseErrorHandler")
@Slf4j
public class MyBaseErrorHandler implements KafkaListenerErrorHandler {

    @Override
    public Object handleError(Message<?> message, ListenerExecutionFailedException exception) {
        return null;
    }

    @Override
    public Object handleError(Message<?> message, ListenerExecutionFailedException exception, Consumer<?, ?> consumer) {
        Throwable cause = exception.getCause();
        if (cause instanceof ProcessingKafkaSpecialException) {
            ProcessingKafkaSpecialException pse = (ProcessingKafkaSpecialException) cause;
            ConsumerRecord<?, ?> errorRecord = pse.getErrorRecord();
            //如果有传入errorRecord,则代表消息消费到该record,设修改偏移量
            //没有传入则偏移量不变,下次消费时还是时重新消费该消息列表
            if (errorRecord != null) {
                log.error("topic为: {} 消费数据:{} 时出现特殊异常,停止监听该topic", errorRecord.topic(), errorRecord.value());
                consumer.commitSync(Collections.singletonMap(new TopicPartition(errorRecord.topic(), errorRecord.partition()), new OffsetAndMetadata(errorRecord.offset())));
                List<TopicPartition> collect = Stream.of(new TopicPartition(errorRecord.topic(), errorRecord.partition())).collect(Collectors.toList());
                consumer.pause(collect);
                throw exception;
            } else {
                Record record = createRecord(message);
                log.error("topic为: {} 消费数据:{}时出现特殊异常,停止监听该topic", record.getTopic(), record.getValue());
                List<TopicPartition> collect = Stream.of(new TopicPartition(record.getTopic(), record.getPartition())).collect(Collectors.toList());
                consumer.pause(collect);
                throw exception;
            }
        }
        return null;
    }

    public Record createRecord(Message<?> message) {
        String topic;
        int partition;
        Object value;
        if (message.getPayload() instanceof List) {
            List<ConsumerRecord> list = (List<ConsumerRecord>) message.getPayload();
            if (list == null || list.isEmpty()) {
                return null;
            }
            ConsumerRecord consumerRecord = list.get(0);
            topic = consumerRecord.topic();
            partition = consumerRecord.partition();
            value = consumerRecord.value();
        } else {
            topic = (String) message.getHeaders().get("kafka_receivedTopic");
            Object partitionId = message.getHeaders().get("kafka_receivedPartitionId");
            if (partitionId instanceof Integer) {
                partition = (int) partitionId;
            } else {
                partition = partitionId == null ? 0 : Integer.parseInt(String.valueOf(partitionId));
            }
            value = message.getPayload();
        }
        return new Record(topic, partition, value);
    }

    class Record {
        private String topic;
        private int partition;
        private Object value;

        public String getTopic() {
            return topic;
        }

        public void setTopic(String topic) {
            this.topic = topic;
        }

        public int getPartition() {
            return partition;
        }

        public void setPartition(int partition) {
            this.partition = partition;
        }

        public Object getValue() {
            return value;
        }

        public void setValue(Object value) {
            this.value = value;
        }

        public Record(String topic, int partition, Object value) {
            this.topic = topic;
            this.partition = partition;
            this.value = value;
        }
    }
}


使用方法
package com.kittlen.cloud.kafka.listener;

import com.kittlen.cloud.kafka.handler.ProcessingKafkaSpecialException;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.stereotype.Component;

import java.util.List;

/**
 * @author kittlen
 * @version 1.0
 * @date 2022/07/04 0012
 */
@Slf4j
@Component
public class TestListener {

    @KafkaListener(topics = "test", errorHandler = "myBaseErrorHandler")
    public void test(List<ConsumerRecord<?, ?>> records, Acknowledgment acknowledgment) {
        for (ConsumerRecord<?, ?> r : records) {
            log.info(r.value().toString());
            throw new ProcessingKafkaSpecialException(r);
        }
        acknowledgment.acknowledge();
    }
}

spring:
  application:
    name: kafka
  kafka:
    bootstrap-servers: 127.0.0.1:9092 #指定链接的集群名
    consumer: #消费者配置
      enable-auto-commit: false #是否采用自动提交的机制
      #      group-id: test-consumer-group #消费组 同一个组的会被轮训消费 不设置入驻时会生成一个不同的主题
      auto-offset-reset: earliest #消费方式 latest 最新的开始消费 earliest 从头开始(换组或者offset过期才会从头开始)
      max-poll-records: 500 #一次调用poll()操作时返回的最大记录数,默认值为500
      #key,value 返序列化
      key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
      value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
      group-id: test
    listener:
      poll-timeout: 3000 #poll函数的超时时间,单位是毫秒
      missing-topics-fatal: true #忽略不存在的topics
      ack-mode: manual
      type: batch
补充
异常
  1. 转换异常
No converter found capable of converting from type [java.lang.String] to type [org. apache.kafka.clients.consumer.ConsumerRecord<? ?>,

使用ConsumerRecords代替List<ConsumerRecord<?, ?>> records

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值