spring集成kafka,以及常见错误解决

spring集成kafka,以及常见错误解决
一.配置kafka
1.引入jar包
<!--Kafka和spring集成的支持类库,spring和kafka通信监听--><!-- https://mvnrepository.com/artifact/org.springframework.integration/spring-integration-kafka -->
<dependency>   
    <groupId>org.springframework.integration</groupId>
    <artifactId>spring-integration-kafka</artifactId>
    <version>3.2.1.RELEASE</version>
</dependency>
2.配置KafkaConfiguration,生产者和消费者
package com.frame.kafka.config;


import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.admin.AdminClientConfig;
import org.apache.kafka.clients.admin.NewPartitions;
import org.apache.kafka.clients.admin.NewTopic;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.requests.IsolationLevel;
import org.apache.kafka.common.serialization.IntegerDeserializer;
import org.apache.kafka.common.serialization.IntegerSerializer;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.core.*;
import org.springframework.kafka.transaction.KafkaTransactionManager;

import java.util.*;

@Configuration
@EnableKafka
public class KafkaConfiguration {

    /**
     *服务器ip
     */
    private String BOOTSTRAP_SERVERS_CONFIG=localhost;

    /**
     *是否自动启动监听器
     */
    private Boolean IS_OPEN_LISTENER;


    /**
     * @Auther:wangli
     * @Version: 1.0
     * @create:2019-04-23 15:23
     * @Description:ConcurrentKafkaListenerContainerFactory为创建Kafka监听器的工程类,这里只配置了消费者
     * @param
     * @return org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory<java.lang.String,java.lang.String>
    */
    @Bean
    public ConcurrentKafkaListenerContainerFactory<Integer, String> kafkaListenerContainerFactory() {
        ConcurrentKafkaListenerContainerFactory<Integer, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerFactory());
        //防止topic不存在的时候报错
        factory.setMissingTopicsFatal(false);
        //是否自动启动
        factory.setAutoStartup(IS_OPEN_LISTENER);
        return factory;
    }

    //根据consumerProps填写的参数创建消费者工厂
    /**
     * @Auther:wangli
     * @Version: 1.0
     * @create:2019-04-23 15:23
     * @Description:ConcurrentKafkaListenerContainerFactory为创建Kafka监听器的工程类,这里只配置了消费者
     * @param
     * @return
     */
    @Bean
    public ConsumerFactory<Integer, String> consumerFactory() {
        return new DefaultKafkaConsumerFactory<>(consumerProps());
    }


    /**
     * @Auther:wangli
     * @Version: 1.0
     * @create:2019-04-23 15:23
     * @Description:根据senderProps填写的参数创建生产者工厂
     * @param
     * @return
     */
    @Bean
    public ProducerFactory<Integer, String> producerFactory() {
        return new DefaultKafkaProducerFactory<>(senderProps());
    }




    @Bean("tranProducerFactory")
    public ProducerFactory<Integer, String> tranProducerFactory() {
        DefaultKafkaProducerFactory factory = new DefaultKafkaProducerFactory<>(senderProps());
        factory.transactionCapable();
        factory.setTransactionIdPrefix("tran-");
        return factory;
    }


    /**
     * @Auther:wangli
     * @Version: 1.0
     * @create:2019-04-23 15:23
     * @Description:kafkaTemplate实现了Kafka发送接收等功能
     * @param
     * @return
     */
    @Bean
    public KafkaTemplate<Integer, String> kafkaTemplate() {
        KafkaTemplate template = new KafkaTemplate<Integer, String>(producerFactory());
        return template;
    }

    /**
     * @Auther:wangli
     * @Version: 1.0
     * @create:2019-04-23 15:23
     * @Description:kafkaTemplate实现了Kafka发送接收等功能(支持事务)
     * @param
     * @return
     */
    @Bean("tranKafkaTemplate")
    public KafkaTemplate<Integer, String> tranKafkaTemplate() {
        KafkaTemplate template = new KafkaTemplate<Integer, String>(tranProducerFactory());
        return template;
    }





    /**
     * 操作kafka的admin工具
     * @return
     */
    @Bean
    public KafkaAdmin kafkaAdmin() {
        Map<String, Object> props = new HashMap<>();
        //配置Kafka实例的连接地址
        props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, BOOTSTRAP_SERVERS_CONFIG);
        KafkaAdmin admin = new KafkaAdmin(props);
        return admin;
    }

    /**
     * 初始化topic,无法增加分区数
     */
    @Bean
    public void creatTopic() {
        AdminClient adminClient = AdminClient.create(kafkaAdmin().getConfig());
        List<NewTopic> topics = new ArrayList<>();
        topics.add(new NewTopic("test", 3, (short) 1));
        adminClient.createTopics(topics);
    }


    /**
     * 增加topic的分区,只能增加不能减少
     */
    @Bean
    public void createPartitions() {
        AdminClient adminClient = AdminClient.create(kafkaAdmin().getConfig());
        Map<String, NewPartitions> newPartitions=new HashMap<String, NewPartitions>();
        newPartitions.put(integral_update_topic, NewPartitions.increaseTo(3));
        adminClient.createPartitions(newPartitions);
    }

    /**
     * @Auther:wangli
     * @Version: 1.0
     * @create:2019-04-23 15:23
     * @Description:消费者配置参数
     * @param
     * @return
     */
    private Map<String, Object> consumerProps() {
        Map<String, Object> props = new HashMap<>();
        //连接地址
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, BOOTSTRAP_SERVERS_CONFIG);
        //GroupID
        props.put(ConsumerConfig.GROUP_ID_CONFIG, "test");
        //是否自动提交
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true);
        //自动提交的频率
        props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
        //consumer 读取级别 (开启事务的时候一定要设置为读已提交)
        props.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, IsolationLevel.READ_COMMITTED.toString().toLowerCase(Locale.ROOT));
        //Session超时设置
        props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "15000");
        //键的反序列化方式
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, IntegerDeserializer.class);
        //值的反序列化方式
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        return props;
    }



    //生产者配置
    /**
     * @Auther:wangli
     * @Version: 1.0
     * @create:2019-04-23 15:23
     * @Description:ConcurrentKafkaListenerContainerFactory为创建Kafka监听器的工程类,这里只配置了消费者
     * @param
     * @return
     */
    private Map<String, Object> senderProps (){
        Map<String, Object> props = new HashMap<>();
        //连接地址
        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, BOOTSTRAP_SERVERS_CONFIG);
        //重试,0为不启用重试机制
        props.put(ProducerConfig.RETRIES_CONFIG, 1);
        //acks=0 : 生产者在成功写入消息之前不会等待任何来自服务器的响应。
        //# acks=1 : 只要集群的首领节点收到消息,生产者就会收到一个来自服务器成功响应。
        //acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。
        props.put(ProducerConfig.ACKS_CONFIG, "all");
        // 设置幂等性
        props.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, true);
        //控制批处理大小,单位为字节
        props.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384);
        //批量发送,延迟为1毫秒,启用该功能能有效减少生产者发送消息次数,从而提高并发量
        props.put(ProducerConfig.LINGER_MS_CONFIG, 1);
        //生产者可以使用的总内存字节来缓冲等待发送到服务器的记录
        props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 1024000);
        //键的序列化方式
        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class);
        //值的序列化方式
        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        return props;
    }

}
3.配置手动提交的监听器,AckListener
package com.frame.kafka.config;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.requests.IsolationLevel;
import org.apache.kafka.common.serialization.IntegerDeserializer;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.listener.AbstractMessageListenerContainer;
import org.springframework.kafka.listener.ContainerProperties;
import org.springframework.stereotype.Component;

import java.util.HashMap;
import java.util.Locale;
import java.util.Map;

/**
 * @Title: AckListener
 * @Description: 使用Ack机制确认消费
 * @Auther:wangli
 * @Version: 1.0
 * @create 2019-04-23 17:19
 */
@Component
public class AckListener {

     /**
     *服务器ip
     */
    private String BOOTSTRAP_SERVERS_CONFIG=localhost;

    /**
     *是否自动启动监听器
     */
    private Boolean IS_OPEN_LISTENER;

    private static final Logger log= LoggerFactory.getLogger(AckListener.class);

    private Map<String, Object> consumerProps() {
        Map<String, Object> props = new HashMap<>();
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, BOOTSTRAP_SERVERS_CONFIG);
        //consumer 读取级别
        props.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, IsolationLevel.READ_COMMITTED.toString().toLowerCase(Locale.ROOT));
        //禁止自动提交
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
        props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
        props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "15000");
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, IntegerDeserializer.class);
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        return props;
    }

    @Bean("ackContainerFactory")
    public ConcurrentKafkaListenerContainerFactory ackContainerFactory() {
        ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory();
        factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE);
        factory.setConsumerFactory(new DefaultKafkaConsumerFactory(consumerProps()));
        factory.setAutoStartup(IS_OPEN_LISTENER);
        //防止topic不存在的时候报错
        factory.setMissingTopicsFatal(false);
        return factory;
    }


}

3.配置批量监听器,BatchListener

package com.frame.kafka.config;

import org.apache.kafka.clients.admin.NewTopic;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.requests.IsolationLevel;
import org.apache.kafka.common.serialization.IntegerDeserializer;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.stereotype.Component;

import java.util.HashMap;
import java.util.Locale;
import java.util.Map;

/**
 * @Title: BatchListener
 * @Description:   批量监听器
 * @Auther:wangli
 * @Version: 1.0
 * @create 2019-04-23 16:34
 */
@Component
public class BatchListener {

    /**
     *服务器ip
     */
    private String BOOTSTRAP_SERVERS_CONFIG=localhost;

    /**
     *是否自动启动监听器
     */
    private Boolean IS_OPEN_LISTENER;

    private static final Logger log= LoggerFactory.getLogger(BatchListener.class);

    private Map<String, Object> consumerProps() {
        Map<String, Object> props = new HashMap<>();
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, BOOTSTRAP_SERVERS_CONFIG);
        //consumer 读取级别
        props.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, IsolationLevel.READ_COMMITTED.toString().toLowerCase(Locale.ROOT));
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true);
        props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
        props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "15000");
        //一次拉取消息数量
        props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "5");
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, IntegerDeserializer.class);
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        return props;
    }

    @Bean("batchContainerFactory")
    public ConcurrentKafkaListenerContainerFactory listenerContainer() {
        ConcurrentKafkaListenerContainerFactory container = new ConcurrentKafkaListenerContainerFactory();
        container.setConsumerFactory(new DefaultKafkaConsumerFactory(consumerProps()));
        //设置并发量,小于或等于Topic的分区数
        container.setConcurrency(5);
        //设置为批量监听
        container.setBatchListener(true);
        container.setAutoStartup(IS_OPEN_LISTENER);
        //防止topic不存在的时候报错
        container.setMissingTopicsFatal(false);
        return container;
    }



}
4.发送者测试demo,kafaProductTest
package com.frame.web.notify.controller.kafkaTest;

import com.alibaba.fastjson.JSON;
import com.frame.common.dto.TempFormMap;
import com.frame.kafka.handler.KafkaSendResultHandler;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.kafka.core.KafkaOperations;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.support.KafkaHeaders;
import org.springframework.kafka.support.SendResult;
import org.springframework.messaging.MessageHeaders;
import org.springframework.messaging.support.GenericMessage;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.util.concurrent.FailureCallback;
import org.springframework.util.concurrent.ListenableFuture;
import org.springframework.util.concurrent.SuccessCallback;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestMethod;
import org.springframework.web.bind.annotation.RestController;

import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.util.HashMap;
import java.util.Map;

/**
 * @Title: DrugstoreMember
 * @Description:
 * @Auther:wangli
 * @Version: 1.0
 * @create 2018/11/30 14:05
 */

@RestController
@RequestMapping("/kafka")
public class kafaProductTest{

    @Autowired
    private KafkaTemplate<Integer, String> kafkaTemplate;

    @Autowired
    private KafkaTemplate tranKafkaTemplate;

    @Autowired
    private KafkaSendResultHandler producerListener;

    @Value("${integral.update.topic}")
    private String integral_update;


    /**==================================================================================================
     topic:这里填写的是Topic的名字
     partition:这里填写的是分区的id,其实也是就第几个分区,id从0开始。表示指定发送到该分区中
     timestamp:时间戳,一般默认当前时间戳
     key:消息的键
     data:消息的数据
     ProducerRecord:消息对应的封装类,包含上述字段
     Message<?>:Spring自带的Message封装类,包含消息及消息头
     ====================================================================================================**/

    /**
     * @Auther:wangli
     * @Version: 1.0
     * @create:2019-04-01 11:35
     * @Description: 普通发消息测试
     * @param req
     * @param res
     * @param tempFormMap
     * @return void
     */
    @SuppressWarnings("rawtypes")
    @RequestMapping(value = "/sendMessage", method = RequestMethod.POST)

    public void sendMessage(HttpServletRequest req,
                            HttpServletResponse res, @RequestBody TempFormMap tempFormMap) throws Exception {

        String message = tempFormMap.getStr("message");
        //kafkaTemplate.send("first",message);
        ListenableFuture<SendResult<Integer, String>> listenableFuture = kafkaTemplate.send("first", message); //异步发送
        kafkaTemplate.send("test", message); //异步发送
        //int a = 0 / 0;
        //ListenableFuture<SendResult<String, String>> listenableFuture =   kafkaTemplate.send("first",message).get(); //同步发送
        Thread.sleep(1000);
        //发送成功回调
        SuccessCallback<SendResult<Integer, String>> successCallback = new SuccessCallback<SendResult<Integer, String>>() {
            @Override
            public void onSuccess(SendResult<Integer, String> result) {
                //成功业务逻辑
                System.out.println("onSuccess");
            }
        };
        //发送失败回调
        FailureCallback failureCallback = new FailureCallback() {
            @Override
            public void onFailure(Throwable ex) {
                //失败业务逻辑
                System.out.println("onFailure");
            }
        };
        listenableFuture.addCallback(successCallback, failureCallback);
    }


    /**
     * @param req
     * @param res
     * @param tempFormMap
     * @return void
     * @Auther:wangli
     * @Version: 1.0
     * @create:2019-04-01 11:35
     * @Description: 发送带有时间搓的消息
     */
    @SuppressWarnings("rawtypes")
    @RequestMapping(value = "/sendMessage1", method = RequestMethod.POST)
    public void sendMessage1(HttpServletRequest req,
                             HttpServletResponse res, @RequestBody TempFormMap tempFormMap) throws Exception {
        //发送带有时间戳的消息
        kafkaTemplate.send("test", 0, System.currentTimeMillis(), 0, "send message with timestamp");
    }


    /**
     * @param req
     * @param res
     * @param tempFormMap
     * @return void
     * @Auther:wangli
     * @Version: 1.0
     * @create:2019-04-01 11:35
     * @Description: 使用ProducerRecord发送消息
     */
    @SuppressWarnings("rawtypes")
    @RequestMapping(value = "/sendMessage2", method = RequestMethod.POST)
    public void sendMessage2(HttpServletRequest req,
                             HttpServletResponse res, @RequestBody TempFormMap tempFormMap) throws Exception {

        //使用ProducerRecord发送消息

        ProducerRecord record = new ProducerRecord(integral_update, JSON.toJSONString(tempFormMap));
        kafkaTemplate.send(record);

    }


    /**
     * @param req
     * @param res
     * @param tempFormMap
     * @return void
     * @Auther:wangli
     * @Version: 1.0
     * @create:2019-04-01 11:35
     * @Description: 使用Message发送消息
     */
    @SuppressWarnings("rawtypes")
    @RequestMapping(value = "/sendMessage3", method = RequestMethod.POST)
    public void sendMessage3(HttpServletRequest req,
                             HttpServletResponse res, @RequestBody TempFormMap tempFormMap) throws Exception {

        //使用Message发送消息
        Map map = new HashMap();
        map.put(KafkaHeaders.TOPIC, "test");
        map.put(KafkaHeaders.PARTITION_ID, 0);
        map.put(KafkaHeaders.MESSAGE_KEY, 0);
        GenericMessage message = new GenericMessage("use Message to send message", new MessageHeaders(map));
        kafkaTemplate.send(message);
    }


    /**
     * @param req
     * @param res
     * @param tempFormMap
     * @return void
     * @Auther:wangli
     * @Version: 1.0
     * @create:2019-04-01 11:35
     * @Description: KafkaSendResultHandler实现消息发送结果回调
     */
    @SuppressWarnings("rawtypes")
    @RequestMapping(value = "/sendMessage4", method = RequestMethod.POST)
    public void sendMessage4(HttpServletRequest req,
                             HttpServletResponse res, @RequestBody TempFormMap tempFormMap) throws Exception {

        kafkaTemplate.setProducerListener(producerListener);
        kafkaTemplate.send("test", "test producer listen");
        //发送消息的时候需要休眠一下,否则发送时间较长的时候会导致进程提前关闭导致无法调用回调时间。主要是因为KafkaTemplate发送消息是采取异步方式发送的
        Thread.sleep(1000);
    }


    /**
     * @param req
     * @param res
     * @param tempFormMap
     * @return void
     * @Auther:wangli
     * @Version: 1.0
     * @create:2019-04-01 11:35
     * @Description: 注解事务开启
     */
    @SuppressWarnings("rawtypes")
    @RequestMapping(value = "/sendMessage5", method = RequestMethod.POST)
    @Transactional
    public void sendMessage5(HttpServletRequest req,
                             HttpServletResponse res, @RequestBody TempFormMap tempFormMap) throws Exception {


        tranKafkaTemplate.send("test", "test transactional annotation");
        throw new RuntimeException("fail");
    }


    /**
     * @param req
     * @param res
     * @param tempFormMap
     * @return void
     * @Auther:wangli
     * @Version: 1.0
     * @create:2019-04-01 11:35
     * @Description: 本地事务开启
     */
    @SuppressWarnings("rawtypes")
    @RequestMapping(value = "/sendMessage6", method = RequestMethod.POST)
    public void sendMessage6(HttpServletRequest req,
                             HttpServletResponse res, @RequestBody TempFormMap tempFormMap) throws Exception {
        tranKafkaTemplate.executeInTransaction(new KafkaOperations.OperationsCallback() {
            @Override
            public Object doInOperations(KafkaOperations kafkaOperations) {
                kafkaOperations.send("test", "test executeInTransaction");
                throw new RuntimeException("fail");
                //return true;
            }
        });
    }


    /**
     * @param req
     * @param res
     * @param tempFormMap
     * @return void
     * @Auther:wangli
     * @Version: 1.0
     * @create:2019-04-01 11:35
     * @Description: 批量发送消息测试
     */
    @SuppressWarnings("rawtypes")
    @RequestMapping(value = "/sendMessage7", method = RequestMethod.POST)
    public void sendMessage7(HttpServletRequest req,
                             HttpServletResponse res, @RequestBody TempFormMap tempFormMap) throws Exception {

        for (int i = 0; i < 12; i++) {
            // kafkaTemplate.send("topic.quick.batch", "test batch listener,dataNum-" + i);
            kafkaTemplate.send("topic.quick.batch.partition", "test batch listener,dataNum-" + i);
        }
    }
}


5.监听器demo,DemoListener
package com.frame.web.notify.service;

import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.annotation.PartitionOffset;
import org.springframework.kafka.annotation.TopicPartition;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.kafka.support.KafkaHeaders;
import org.springframework.messaging.handler.annotation.Header;
import org.springframework.messaging.handler.annotation.Payload;
import org.springframework.stereotype.Component;

import java.util.List;

/**
 * @Title: DemoListener
 * @Description:
 * @Auther:wangli
 * @Version: 1.0
 * @create 2019-04-22 16:35
 */
@Component
public class DemoListener {

    @Autowired
    private KafkaTemplate kafkaTemplate;

    private static final Logger log= LoggerFactory.getLogger(DemoListener.class);

    /**===================================================================================================
     * data : 对于data值的类型其实并没有限定,根据KafkaTemplate所定义的类型来决定。data为List集合的则是用作批量消费。
     * ConsumerRecord:具体消费数据类,包含Headers信息、分区信息、时间戳等
     * Acknowledgment:用作Ack机制的接口
     * Consumer:消费者类,使用该类我们可以手动提交偏移量、控制消费速率等功能
     *====================================================================================================
     * **/



    /**===================================================================================================
     *id:消费者的id,当GroupId没有被配置的时候,默认id为GroupId
     * containerFactory:上面提到了@KafkaListener区分单数据还是多数据消费只需要配置一下注解的containerFactory属性就可以了,这里面配置的是监听容器工厂,也就是ConcurrentKafkaListenerContainerFactory,配置BeanName
     * topics:需要监听的Topic,可监听多个
     * topicPartitions:可配置更加详细的监听信息,必须监听某个Topic中的指定分区,或者从offset为200的偏移量开始监听
     * errorHandler:监听异常处理器,配置BeanName
     * groupId:消费组ID
     * idIsGroup:id是否为GroupId
     * clientIdPrefix:消费者Id前缀
     * beanRef:真实监听容器的BeanName,需要在 BeanName前加 "__"
     *====================================================================================================
     * **/



    /**
     * @Auther:wangli
     * @Version: 1.0
     * @create:2019-04-23 16:23
     * @Description:基础接收消息 直接获取topic
     * @param
     * @return void
    */
    @KafkaListener(groupId = "dyzh",topics = "test")
    public void listen(String msgData) {
        System.out.println("test-------"+msgData);
        log.info("demo receive : "+msgData);
    }


    /**
     * @Auther:wangli
     * @Version: 1.0
     * @create:2019-04-23 16:23
     * @Description:基础接收消息 直接获取topic    同一个消费者,监听同一个topic
     * @param
     * @return void
     */
    @KafkaListener(groupId = "dyzh",topics = "test")
    public void listen1(String msgData) {
        //同一个消费者组里面的消息只能监听一次,所以这个方法不会被消费
        System.out.println("test-------"+msgData);
        log.info("demo receive : "+msgData);
    }

    /**
     * @Auther:wangli
     * @Version: 1.0
     * @create:2019-04-23 16:23
     * @Description:不同的消费者组,监听同一个topic
     * @param
     * @return void
     */
    @KafkaListener(groupId = "dyzhTTTT",topics = "test")
    public void listen2(String msgData) {
        //同一个消费者组里面的消息只能监听一次,所以这个方法不会被消费
        System.out.println("test-------"+msgData);
        log.info("demo receive : "+msgData);
    }



    /**
     * @Auther:wangli
     * @Version: 1.0
     * @create:2019-04-23 16:26
     * @Description: ConsumerRecord类里面包含分区信息、消息头、消息体等内容,如果业务需要获取这些参数时,使用ConsumerRecord会是个不错的选择
     * @param
     * @return void
    */
    @KafkaListener(id = "consumer", topics = "first")
    public void listen3(ConsumerRecord<Integer, String> record) {
        log.info("topic.quick.consumer receive : " + record.toString());
    }



    /**
     * @Auther:wangli
     * @Version: 1.0
     * @create:2019-04-23 17:02
     * @Description: 批量接收消息
     * @param
     * @return void
    */
    @KafkaListener(id = "batch",topics = {"topic.quick.batch"},containerFactory = "batchContainerFactory")
    public void batchListener(List<String> data) {
        log.info("topic.quick.batch  receive : ");
        for (String s : data) {
            log.info(  s);
        }
    }




    
    /**
     * @Auther:wangli
     * @Version: 1.0
     * @create:2019-04-23 17:08
     * @Description: 监听Topic中指定的分区
     * @KafkaListener注解的topicPartitions属性监听不同的partition分区。
     * @TopicPartition:topic--需要监听的Topic的名称,partitions --需要监听Topic的分区id,partitionOffsets --可以设置从某个偏移量开始监听
     * @PartitionOffset:partition --分区Id,非数组,initialOffset --初始偏移量
     *
     *
     * @param 
     * @return void
    */
    @KafkaListener(id = "batchWithPartition",containerFactory = "batchContainerFactory",
            topicPartitions = {
                    @TopicPartition(topic = "topic.quick.batch.partition",partitions = {"1","3"}),
                    @TopicPartition(topic = "topic.quick.batch.partition",partitions = {"0","4"},
                            partitionOffsets = @PartitionOffset(partition = "2",initialOffset = "100"))
            }
    )
    public void batchListenerWithPartition(List<String> data) {
        log.info("topic.quick.batch.partition  receive : ");
        for (String s : data) {
            log.info(s);
        }
    }





    /**
     * @Auther:wangli
     * @Version: 1.0
     * @create:2019-04-23 17:17
     * @Description: 注解方式获取消息头及消息体
     * @Payload:获取的是消息的消息体,也就是发送内容
     * @Header(KafkaHeaders.RECEIVED_MESSAGE_KEY):获取发送消息的key
     * @Header(KafkaHeaders.RECEIVED_PARTITION_ID):获取当前消息是从哪个分区中监听到的
     * @Header(KafkaHeaders.RECEIVED_TOPIC):获取监听的TopicName
     * @Header(KafkaHeaders.RECEIVED_TIMESTAMP):获取时间戳

     * @return void
    */
    @KafkaListener(id = "anno", topics = "topic.quick.anno")
    public void annoListener(@Payload String data,
                             @Header(KafkaHeaders.RECEIVED_MESSAGE_KEY) Integer key,
                             @Header(KafkaHeaders.RECEIVED_PARTITION_ID) int partition,
                             @Header(KafkaHeaders.RECEIVED_TOPIC) String topic,
                             @Header(KafkaHeaders.RECEIVED_TIMESTAMP) long ts) {
        log.info("topic.quick.anno receive : \n" +
                "data : " + data + "\n" +
                "key : " + key + "\n" +
                "partitionId : " + partition + "\n" +
                "topic : " + topic + "\n" +
                "timestamp : " + ts + "\n"
        );

    }




    /**
     * @Auther:wangli
     * @Version: 1.0
     * @create:2019-04-23 17:24
     * @Description: 使用Ack机制确认消费
     * @param record
     * @param ack
     * @return void
    */
    @KafkaListener(id = "ack", topics = "topic.quick.ack",containerFactory = "ackContainerFactory")
    public void ackListener(ConsumerRecord record, Acknowledgment ack) {
        log.info("topic.quick.ack receive : " + record.value());
        ack.acknowledge();
    }




    /**
     * @Auther:wangli
     * @Version: 1.0
     * @create:2019-04-23 17:27
     * @Description: 重新将消息发送到队列中,这种方式比较简单而且可以使用Headers实现第几次消费的功能,用以下次判断
     * @param record
     * @param ack
     * @param consumer
     * @return void
    */
    @KafkaListener(id = "ack2", topics = "topic.quick.ack", containerFactory = "ackContainerFactory")
    public void ackListener(ConsumerRecord record, Acknowledgment ack, Consumer consumer) {
        log.info("topic.quick.ack receive : " + record.value());

        //如果偏移量为偶数则确认消费,否则拒绝消费
        if (record.offset() % 2 == 0) {
            log.info(record.offset()+"--ack");
            ack.acknowledge();
        } else {
            log.info(record.offset()+"--nack");
            kafkaTemplate.send("topic.quick.ack", record.value());
        }
    }





    /**
     * @Auther:wangli
     * @Version: 1.0
     * @create:2019-04-23 17:28
     * @Description: 使用Consumer.seek方法,重新回到该未ack消息偏移量的位置重新消费,这种可能会导致死循环,原因出现于业务一直没办法处理这条数据,但还是不停的重新定位到该数据的偏移量上。
     * @param record
     * @param ack
     * @param consumer
     * @return void
    */
    @KafkaListener(id = "ack3", topics = "topic.quick.ack", containerFactory = "ackContainerFactory")
    public void ackListener2(ConsumerRecord record, Acknowledgment ack, Consumer consumer) {
        log.info("topic.quick.ack receive : " + record.value());

        //如果偏移量为偶数则确认消费,否则拒绝消费
        if (record.offset() % 2 == 0) {
            log.info(record.offset()+"--ack");
            ack.acknowledge();
        } else {
            log.info(record.offset()+"--nack");

            consumer.seek(new org.apache.kafka.common.TopicPartition("topic.quick.ack",record.partition()),record.offset());
        }
    }

}

二.常见错误及注意事项
1.启动kafka报错
ERROR [KafkaServer id=0] Fatal error during KafkaServer startup. Prepare to shutdown (kafka.server.KafkaServer)
java.net.UnknownHostException: centos6.8002: centos6.8002: unknown error
  • 修改/etc/hosts文件

    操作:1、cd etc ——2、vi hosts ——3、在最后一行添加如下所示;

    127.0.0.1 主机名 localhost.localdomain localhost

2.启动之后topic不存在,监听器报错
Kafka报错: Topic(s) [publish] is/are not present and missingTopicsFatal is true

报错原因: 消费监听接口监听的主题不存在时,默认会报错

解决方法: 配置文件中将listener的属性missingTopicsFatal设置为false

  • 方案1:springboot直接在配置文件中加上

    spring.kafka.listener.missing-topics-fatal=false

  • 方案2:springmvc中设置Kafka监听器工程参数

    ConcurrentKafkaListenerContainerFactory.setMissingTopicsFatal(false);

3.kafka事务消息
  • 保证消息不丢失:

    • 多broker的情况下设置 ack为 -1 或者all
    • 必须设置消费者的隔离级别:isolation.level 为 read_committed,默认是read_uncommitted,防止读到未提交的数据
    • 生产者尽可能的设置较大的重试次数(参数是retries )
    • 建议消费者设置成手动提交 enable.auto.commit=false
  • 关于事务消息,偏移量不是自增长的问题(kafka发送消息是2的倍数增加)

    • kafka在发送事务消息的时候,有一条消息是记录本次发送消息的事务状态的

      There is no “2-phase commit” with Kafka transactions. These extra records are not real records, they are markers to indicate whether the previous transaction was successful or not. There is nothing to “get” that is meaningful outside of the broker.

      If you publish 10 records in a transaction, it will use 11 slots in the log; if you only publish 1 record, it will use 2 slots.

  • kafka在spring中的Xa事务

在spring的XA事务中,应用程序代码才能通过检索事务性Kafka资源 ProducerFactoryUtils.getTransactionalResourceHolder(ProducerFactory, String, java.time.Duration)。Spring KafkaTemplate会自动检测线程绑定的Producer并自动参与其中。所以开启了数据库事务就不用在开启KafkaTransactionManager ,注意消费者需要设置事务隔离级别为:READ_COMMITTED,防止未提交读

  • 关于事务的错误

    java.lang.IllegalStateException: No transaction is in process; possible solutions: run the template operation within the scope of a template.executeInTransaction() operation, start a transaction with @Transactional before invoking the template method, run in a transaction started by a listener container when consuming a record

    • 使用spring-kafka事务消息的时候务必加上: @Transactional注解 ,KafkaTransactionManager 和DataSourceTransactionManager 都是继承了AbstractPlatformTransactionManager,使用事务消息的时候都需要把注解加上或者开启本地事务
  • 事务开启失败(仅供参考)

    Invalid transition attempted from state IN_TRANSACTION to state IN_TRANSACTION

    • 请求过快导致事务开启的id相同
    • 未加@Transactional或者未开启本地事务,导致事务无法开启
    • 消费者事务隔离级别为read_uncommitted,导致读取到数据异常
4. kafka 消费者 auto.offset.reset 参数含义
  • earliest

当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,从头开始消费

  • latest

当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据

  • none

topic各分区都存在已提交的offset时,从offset后开始消费;只要有一个分区不存在已提交的offset,则抛出异常

ps:

默认建议用earliest。设置该参数后 kafka出错后重启,找到未消费的offset可以继续消费。而latest 这个设置容易丢失消息,假如kafka出现问题,还有数据往topic中写,这个时候重启kafka,这个设置会从最新的offset开始消费,中间出问题的哪些就不管了。 none这个设置没有用过,兼容性太差,经常出问题。

三.kafka日志查看工具

Dump Log Segment

有时候我们需要验证日志索引是否正确,或者仅仅想从log文件中直接打印消息,我们可以使用kafka.tools.DumpLogSegments类来实现,先来看看它需要的参数:

[iteblog@www.iteblog.com /]$ bin/kafka-run-class.sh kafka.tools.DumpLogSegments 
Parse a log file and dump its contents to the console, useful for debugging a seemingly corrupt log segment.
Option                                  Description                            
------                                  -----------                            
--deep-iteration                        if set, uses deep instead of shallow   
                                          iteration                            
--files <file1, file2, ...>             REQUIRED: The comma separated list of  
                                          data and index log files to be dumped
--key-decoder-class                     if set, used to deserialize the keys.  
                                          This class should implement kafka.   
                                          serializer.Decoder trait. Custom jar 
                                          should be available in kafka/libs    
                                          directory. (default: kafka.          
                                          serializer.StringDecoder)            
--max-message-size <Integer: size>      Size of largest message. (default:     
                                          5242880)                             
--print-data-log                        if set, printing the messages content  
                                          when dumping data logs               
--value-decoder-class                   if set, used to deserialize the        
                                          messages. This class should          
                                          implement kafka.serializer.Decoder   
                                          trait. Custom jar should be          
                                          available in kafka/libs directory.   
                                          (default: kafka.serializer.          
                                          StringDecoder)                       
--verify-index-only                     if set, just verify the index log      
                                          without printing its content

很明显,我们在使用kafka.tools.DumpLogSegments的时候必须输入–files,这个参数指的就是kafka中Topic分区所在的绝对路径。分区所在的目录由config/server.properties文件中log.dirs参数决定。比如我们想看/home/q/kafka/kafka_2.10-0.8.2.1/data/test-4/00000000000034245135.log日志文件的相关情况可以 使用下面的命令:

[xxx/]$ /usr/local/kafka_2.12-2.3.1/bin/kafka-run-class.sh kafka.tools.DumpLogSegments  --print-data-log --files /usr/local/kafka_2.12-2.3.1/logs/dyzh.pharmacyWallet.topic.dev-0/00000000000000010708.log 
Dumping /usr/local/kafka_2.12-2.3.1/logs/dyzh.pharmacyWallet.topic.dev-0/00000000000000010708.log
Starting offset: 10708
baseOffset: 10708 lastOffset: 10708 count: 1 baseSequence: 0 lastSequence: 0 producerId: 85094 producerEpoch: 0 partitionLeaderEpoch: 0 isTransactional: true isControl: false position: 0 CreateTime: 1588921890685 size: 100 magic: 2 compresscodec: NONE crc: 2784045922 isvalid: true
| offset: 10708 CreateTime: 1588921890685 keysize: -1 valuesize: 32 sequence: 0 headerKeys: [] payload: b7ddad61094f4849a17cedfed4dfb119
baseOffset: 10709 lastOffset: 10709 count: 1 baseSequence: -1 lastSequence: -1 producerId: 85094 producerEpoch: 0 partitionLeaderEpoch: 0 isTransactional: true isControl: true position: 100 CreateTime: 1588921890700 size: 78 magic: 2 compresscodec: NONE crc: 811342613 isvalid: true
| offset: 10709 CreateTime: 1588921890700 keysize: 4 valuesize: 6 sequence: -1 headerKeys: [] endTxnMarker: COMMIT coordinatorEpoch: 70
baseOffset: 10710 lastOffset: 10712 count: 3 baseSequence: 0 lastSequence: 2 producerId: 85094 producerEpoch: 1 partitionLeaderEpoch: 0 isTransactional: true isControl: false position: 178 CreateTime: 1588922828302 size: 178 magic: 2 compresscodec: NONE crc: 182076061 isvalid: true

可以看出,这个命令将Kafka中Message中Header的相关信息和偏移量都显示出来了,但是没有看到日志的内容,我们可以通过–print-data-log来设置。如果需要查看多个日志文件,可以以逗号分割。

kafka是根据 baseSequence, lastSequence,producerId 判断数据的幂等性

事务属性实现前提是幂等性,即在配置事务属性transaction id时,必须还得配置幂等性;但是幂等性是可以独立使用的,不需要依赖事务属性。

  • 幂等性引入了Porducer ID

  • 事务属性引入了Transaction Id属性。、
    设置

  • enable.idempotence = true,transactional.id不设置:只支持幂等性。

  • enable.idempotence = true,transactional.id设置:支持事务属性和幂等性

  • enable.idempotence = false,transactional.id不设置:没有事务属性和幂等性的kafka

  • enable.idempotence = false,transactional.id设置:无法获取到PID,此时会报错

参考链接:
KafkaTransactionManager
Kafka生产者事务和幂等

欢迎大家关注我的微信公众号共同学习进步:

在这里插入图片描述

  • 5
    点赞
  • 15
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值