Kafka 入门(二)

3 篇文章 0 订阅

Kafka 入门(二)

接着Kafka 入门(一),接着就要在应用中使用。

Spring6-Kafka

生产者,消费者分别使用不同的应用

生产者

引入依赖
<dependencies>
    <dependency>
        <groupId>org.springframework.kafka</groupId>
        <artifactId>spring-kafka</artifactId>
        <version>3.0.4</version>
    </dependency>
    <dependency>
        <groupId>org.projectlombok</groupId>
        <artifactId>lombok</artifactId>
        <version>1.18.26</version>
    </dependency>
    <dependency>
        <groupId>ch.qos.logback</groupId>
        <artifactId>logback-classic</artifactId>
        <version>1.4.5</version>
    </dependency>
</dependencies>
application.properties
spring.kafka.bootstrap-servers=192.168.79.177:9092
logback.xml
<?xml version="1.0" encoding="UTF-8"?>
<configuration scan="true" scanPeriod="60 seconds" status="WARN">
    <appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
        <!--此日志appender是为开发使用,只配置最底级别,控制台输出的日志级别是大于或等于此级别的日志信息-->
        <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
            <level>INFO</level>
        </filter>
        <layout class="ch.qos.logback.classic.PatternLayout">
            <pattern>%date %level %logger %msg%n</pattern>
        </layout>
    </appender>

    <logger name="pr.iceworld.fernando.spring6.kafka.producer" level="INFO" additivity="false">
        <appender-ref ref="CONSOLE"/>
    </logger>

    <logger name="org.springframework.kafka" level="WARN" additivity="false">
        <appender-ref ref="CONSOLE"/>
    </logger>

    <logger name="org.apache.kafka" level="WARN" additivity="false">
        <appender-ref ref="CONSOLE"/>
    </logger>

    <root level="INFO">
        <appender-ref ref="CONSOLE"/>
    </root>

</conf
配置类

扫描包下相关配置类,便于AnnotationConfigApplicationContext 加载。

@Configuration
@ComponentScan("pr.iceworld.fernando.spring6.kafka.producer")
public class KafkaCommonConfig {
}

Topic配置类

  1. 配置连接客户端
  2. 配置Topic
@Configuration
@PropertySource("classpath:application.properties")
public class KafkaTopicConfig {

    @Value(value = "${spring.kafka.bootstrap-servers}")
    private String bootstrapAddress;

    @Bean
    public KafkaAdmin admin() {
        Map<String, Object> configs = new HashMap<>();
        configs.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapAddress);
        return new KafkaAdmin(configs);
    }
    
    @Bean
    public NewTopic topic1() {
        return TopicBuilder.name("topic1")
                .partitions(3)
                .replicas(1)
                .compact()
                .build();
    }

    @Bean
    public NewTopic topic2() {
        return TopicBuilder.name("topic2")
                .partitions(3)
                .replicas(1)
                .compact()
                .build();
    }

    @Bean
    public NewTopic topic3() {
        return TopicBuilder.name("topic3")
                .partitions(3)
                .replicas(1)
                .compact()
                .build();
    }
}

生产者配置类

  1. 生成Producer工厂
  2. KafkaTemplate用于发送消息的模板。
@Configuration
public class KafkaProducerConfig {

    @Value(value = "${spring.kafka.bootstrap-servers}")
    private String bootstrapAddress;

    @Bean
    public ProducerFactory<String, String> producerFactory() {
        Map<String, Object> props = new HashMap<>();
        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapAddress);
        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        return new DefaultKafkaProducerFactory<>(props);
    }

    @Bean
    public KafkaTemplate<String, String> kafkaTemplate() {
        return new KafkaTemplate<>(producerFactory());
    }
}

发送消息类

  1. 模拟发送10个分区
  2. 消息发送包含确认回调
@Service
@Slf4j
public class KafkaProducerService {

    @Autowired
    private KafkaTemplate<String, String> kafkaTemplate;

    Random random = new Random(10);
    public void sendMessageNormal(String message, String key, String topic) {
        int ranPartition = random.nextInt(3);
        kafkaTemplate.send(createRecord(message, ranPartition, key, topic));
        log.info("Normal sent topic={}, message={}, partition={}", topic, message, ranPartition);
    }

    public void sendMessageWithHeaders(String message, String key, String topic) {
        int ranPartition = random.nextInt(3);
        Message<String> providedMessage = MessageBuilder
                .withPayload(message)
                .setHeader(KafkaHeaders.TOPIC, topic)
                .setHeader(KafkaHeaders.KEY, key)
                .setHeader(KafkaHeaders.PARTITION, ranPartition)
                .build();
        kafkaTemplate.send(providedMessage);
        log.info("Header normal sent topic={}, message={}, partition={}", topic, message, ranPartition);
    }

    public void sendMessageWithCallback(String message, String key, String topic) {
        final int ranPartition = random.nextInt(3);
        final ProducerRecord<String, String> record = createRecord(message, ranPartition, key, topic);
        CompletableFuture<SendResult<String, String>> future = kafkaTemplate.send(record);
        future.whenComplete((result, ex) -> {
            if (ex == null) {
                handleSuccess(result, message);
            } else {
                handleFailure(record, ex);
            }
        });
    }

    private void handleFailure(ProducerRecord<String, String> record, Throwable ex) {
        log.info("Fail to send topic={}, message={} due to : {}", record.topic(), record.value(), ex.getMessage());
    }

    private void handleSuccess(SendResult<String, String> sendResult, String message) {
        log.info("Sent topic={}, message={}, offset={}, partition={}",
                sendResult.getRecordMetadata().topic(),
                message,
                sendResult.getRecordMetadata().offset(),
                sendResult.getRecordMetadata().partition());
     }

    private ProducerRecord<String, String> createRecord(String message, int ranPartition, String key, String topic) {
        return new ProducerRecord(topic, ranPartition, key, message);
    }

}

启动类

public class MainApp {

    public static void main(String[] args) {
        AnnotationConfigApplicationContext applicationContext = new AnnotationConfigApplicationContext(KafkaCommonConfig.class);

        KafkaProducerService kafkaProducerService = applicationContext.getBean(KafkaProducerService.class);
        for (int i = 0; i < 5; i++) {
            kafkaProducerService.sendMessageNormal(Uuid.randomUuid().toString(), "key1", "topic1");
            kafkaProducerService.sendMessageWithHeaders(Uuid.randomUuid().toString(), "key2", "topic2");
            kafkaProducerService.sendMessageWithCallback(Uuid.randomUuid().toString(), "key3", "topic3");
        }
        new Thread(() -> {
            try {
                Thread.sleep(10000);
            } catch (InterruptedException e) {
                throw new RuntimeException(e);
            }
        }).start();
    }
}

消费者

引入依赖
<!-- ommit, same as producer's -->
application.properties
spring.kafka.bootstrap-servers=192.168.79.177:9092
配置配置类

扫描包下相关配置类,便于AnnotationConfigApplicationContext 加载。

@Configuration
@ComponentScan("pr.iceworld.fernando.spring6.kafka.consumer")
public class KafkaCommonConfig {
}

消费者配置类,配置监听。

  1. 配置2个Consumer工厂
  2. 配置2个KafkaListenerContainerFactory,
    2.1 自动提交 默认
    2.2 手动提交
    props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
    factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL)
  3. 开启Kafka监听容器
    @EnableKafka Enable Kafka listener annotated endpoints that are created under the covers by a AbstractListenerContainerFactory.
@EnableKafka
@Configuration
@PropertySource("classpath:application.properties")
public class KafkaConsumerConfig {

    @Value(value = "${spring.kafka.bootstrap-servers}")
    private String bootstrapAddress;

    @Bean
    public ConsumerFactory<String, String> consumerFactory() {
        Map<String, Object> props = new HashMap<>();
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapAddress);
        props.put(ConsumerConfig.GROUP_ID_CONFIG, "local");
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        //props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
        return new DefaultKafkaConsumerFactory<>(props);
    }

    @Bean
    public ConcurrentKafkaListenerContainerFactory<String, String> kafkaListenerContainerFactory() {
        ConcurrentKafkaListenerContainerFactory<String, String> factory =
                new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerFactory());
        factory.setConcurrency(3);
        factory.getContainerProperties().setPollTimeout(3000);
        return factory;
    }

    @Bean
    public ConsumerFactory<String, String> consumerFactoryAutoCommitFalse() {
        Map<String, Object> props = new HashMap<>();
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapAddress);
        props.put(ConsumerConfig.GROUP_ID_CONFIG, "localCommitFalse");
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        //props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
        return new DefaultKafkaConsumerFactory<>(props);
    }

    @Bean
    public ConcurrentKafkaListenerContainerFactory<String, String> kafkaListenerContainerFactoryAutoCommitFalse() {
        ConcurrentKafkaListenerContainerFactory<String, String> factory =
                new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerFactoryAutoCommitFalse());
        factory.setConcurrency(3);
        factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL);
        factory.getContainerProperties().setPollTimeout(3000);
        return factory;
    }
}

Topic接收类

  1. 设置接收Topic
  2. 设置需要处理的监听容器工厂
  3. 筛选需要处理的partition消息
@Service
@Slf4j
public class KafkaConsumerService {

    @KafkaListener(
            topics = "topic1",
            containerFactory = "kafkaListenerContainerFactory",
            groupId = "local"
    )
    public void receiveMessage(String message, ConsumerRecord consumerRecord) {
        log.info("====> Topic={}, Partition={}, offset={}, Received message={}",
                consumerRecord.topic(), consumerRecord.partition(), consumerRecord.offset(), message);
    }

    @KafkaListener(
            topics = "topic2",
            containerFactory = "kafkaListenerContainerFactory",
            groupId = "local"
    )
    public void receiveMessageWithHeaders(@Payload String message,
                                          @Header(KafkaHeaders.RECEIVED_TOPIC) String topic,
                                          @Header(KafkaHeaders.RECEIVED_PARTITION) int partition,
                                          @Header(KafkaHeaders.RECEIVED_KEY) String messageKey,
                                          @Header(KafkaHeaders.TIMESTAMP_TYPE) String  timestampType,
                                          @Header(KafkaHeaders.RECEIVED_TIMESTAMP) Long timestamp,
                                          @Header(KafkaHeaders.OFFSET) int offset) {
        log.info("====> Headers topic={}, partition={}, offset={}, messageKey={}, "
                        + "timestampType={}, timestamp={}, Received message={}",
                 topic, partition, offset, messageKey, timestampType, timestamp, message);
    }

    @KafkaListener(
            containerFactory = "kafkaListenerContainerFactoryAutoCommitFalse",
            groupId = "localCommitFalse",
            topicPartitions = {
                    @TopicPartition(topic = "topic3",
                            partitions = {"0"}
                    )
            }
    )
    public void receiveMessageBySpecificPartition(String message, ConsumerRecord consumerRecord, Acknowledgment ack) {
        log.info("====> Specific topic={}, partition partition={}, offset={}, Received message={}",
                consumerRecord.topic(), consumerRecord.partition(), consumerRecord.offset(), message);
        ack.acknowledge();
    }
}

启动类,启动多个实例模拟消费

public class MainApp {
    public static void main(String[] args) throws InterruptedException {
        new AnnotationConfigApplicationContext(KafkaCommonConfig.class);
    }
}

生产日志

2023-03-10 20:39:16,967 INFO pr.iceworld.fernando.spring6.kafka.producer.service.KafkaProducerService Normal sent topic=topic1, message=6smhtHX7SJ6hEwnr4Io9kA, partition=0
2023-03-10 20:39:17,030 INFO pr.iceworld.fernando.spring6.kafka.producer.service.KafkaProducerService Header normal sent topic=topic2, message=QMIol2GlSRuODmK9hDDQcw, partition=0
2023-03-10 20:39:17,041 INFO pr.iceworld.fernando.spring6.kafka.producer.service.KafkaProducerService Normal sent topic=topic1, message=sRi5iHY7TUqSqzM4TbUF1g, partition=0
2023-03-10 20:39:17,042 INFO pr.iceworld.fernando.spring6.kafka.producer.service.KafkaProducerService Header normal sent topic=topic2, message=uiffzWw9TCab9u4QAQqoOA, partition=1
2023-03-10 20:39:17,043 INFO pr.iceworld.fernando.spring6.kafka.producer.service.KafkaProducerService Normal sent topic=topic1, message=Vsl9WmPGQnCn9Ox6R3y-3Q, partition=1
2023-03-10 20:39:17,043 INFO pr.iceworld.fernando.spring6.kafka.producer.service.KafkaProducerService Header normal sent topic=topic2, message=RSmQ34W9T9GiJsLBwW5XiQ, partition=1
2023-03-10 20:39:17,044 INFO pr.iceworld.fernando.spring6.kafka.producer.service.KafkaProducerService Normal sent topic=topic1, message=vM447x4yTgqXp1ciulyeSw, partition=1
2023-03-10 20:39:17,044 INFO pr.iceworld.fernando.spring6.kafka.producer.service.KafkaProducerService Header normal sent topic=topic2, message=WXQldMTxRxiUXhn0Ov2gPg, partition=1
2023-03-10 20:39:17,045 INFO pr.iceworld.fernando.spring6.kafka.producer.service.KafkaProducerService Normal sent topic=topic1, message=ffYc0g3tR-euzDKO3Y2DQw, partition=0
2023-03-10 20:39:17,046 INFO pr.iceworld.fernando.spring6.kafka.producer.service.KafkaProducerService Header normal sent topic=topic2, message=xpZy-YswSdeoobQaeZIFZA, partition=0
2023-03-10 20:39:17,053 INFO pr.iceworld.fernando.spring6.kafka.producer.service.KafkaProducerService Sent topic=topic3, message=m0A92H1xQLq_cfxqNkc6ZQ, offset=69, partition=0
2023-03-10 20:39:17,060 INFO pr.iceworld.fernando.spring6.kafka.producer.service.KafkaProducerService Sent topic=topic3, message=SwXaJVq7SQCZe3NkanhUBA, offset=70, partition=0
2023-03-10 20:39:17,070 INFO pr.iceworld.fernando.spring6.kafka.producer.service.KafkaProducerService Sent topic=topic3, message=Qt4G_oXNTfy8gFs43SWp4Q, offset=23, partition=1
2023-03-10 20:39:17,070 INFO pr.iceworld.fernando.spring6.kafka.producer.service.KafkaProducerService Sent topic=topic3, message=5qWN4-AXT9O2gLLvwH3s6A, offset=23, partition=2
2023-03-10 20:39:17,071 INFO pr.iceworld.fernando.spring6.kafka.producer.service.KafkaProducerService Sent topic=topic3, message=vuaoBOK_SamaeQ1e3MJ-kg, offset=71, partition=0

消费者1 topic3 partition = {“0”}

2023-03-10 20:39:17,113 INFO pr.iceworld.fernando.spring6.kafka.consumer.service.KafkaConsumerService ====> Specific topic=topic3, partition partition=0, offset=69, Received message=m0A92H1xQLq_cfxqNkc6ZQ
2023-03-10 20:39:17,131 INFO pr.iceworld.fernando.spring6.kafka.consumer.service.KafkaConsumerService ====> Specific topic=topic3, partition partition=0, offset=70, Received message=SwXaJVq7SQCZe3NkanhUBA
2023-03-10 20:39:17,134 INFO pr.iceworld.fernando.spring6.kafka.consumer.service.KafkaConsumerService ====> Specific topic=topic3, partition partition=0, offset=71, Received message=vuaoBOK_SamaeQ1e3MJ-kg

消费者2 topic3 partition = {“1”, “2”}

2023-03-10 20:39:17,021 INFO pr.iceworld.fernando.spring6.kafka.consumer.service.KafkaConsumerService ====> Topic=topic1, Partition=0, offset=69, Received message=6smhtHX7SJ6hEwnr4Io9kA
2023-03-10 20:39:17,037 INFO pr.iceworld.fernando.spring6.kafka.consumer.service.KafkaConsumerService ====> Headers topic=topic2, partition=0, offset=106, messageKey=key2, timestampType=CREATE_TIME, timestamp=1678451957029, Received message=QMIol2GlSRuODmK9hDDQcw
2023-03-10 20:39:17,051 INFO pr.iceworld.fernando.spring6.kafka.consumer.service.KafkaConsumerService ====> Topic=topic1, Partition=0, offset=70, Received message=sRi5iHY7TUqSqzM4TbUF1g
2023-03-10 20:39:17,054 INFO pr.iceworld.fernando.spring6.kafka.consumer.service.KafkaConsumerService ====> Specific topic=topic3, partition partition=1, offset=23, Received message=Qt4G_oXNTfy8gFs43SWp4Q
2023-03-10 20:39:17,054 INFO pr.iceworld.fernando.spring6.kafka.consumer.service.KafkaConsumerService ====> Specific topic=topic3, partition partition=2, offset=23, Received message=5qWN4-AXT9O2gLLvwH3s6A
2023-03-10 20:39:17,056 INFO pr.iceworld.fernando.spring6.kafka.consumer.service.KafkaConsumerService ====> Topic=topic1, Partition=1, offset=46, Received message=Vsl9WmPGQnCn9Ox6R3y-3Q
2023-03-10 20:39:17,056 INFO pr.iceworld.fernando.spring6.kafka.consumer.service.KafkaConsumerService ====> Topic=topic1, Partition=1, offset=47, Received message=vM447x4yTgqXp1ciulyeSw
2023-03-10 20:39:17,057 INFO pr.iceworld.fernando.spring6.kafka.consumer.service.KafkaConsumerService ====> Headers topic=topic2, partition=1, offset=9, messageKey=key2, timestampType=CREATE_TIME, timestamp=1678451957041, Received message=uiffzWw9TCab9u4QAQqoOA
2023-03-10 20:39:17,058 INFO pr.iceworld.fernando.spring6.kafka.consumer.service.KafkaConsumerService ====> Headers topic=topic2, partition=1, offset=10, messageKey=key2, timestampType=CREATE_TIME, timestamp=1678451957043, Received message=RSmQ34W9T9GiJsLBwW5XiQ
2023-03-10 20:39:17,058 INFO pr.iceworld.fernando.spring6.kafka.consumer.service.KafkaConsumerService ====> Headers topic=topic2, partition=1, offset=11, messageKey=key2, timestampType=CREATE_TIME, timestamp=1678451957044, Received message=WXQldMTxRxiUXhn0Ov2gPg
2023-03-10 20:39:17,062 INFO pr.iceworld.fernando.spring6.kafka.consumer.service.KafkaConsumerService ====> Topic=topic1, Partition=0, offset=71, Received message=ffYc0g3tR-euzDKO3Y2DQw
2023-03-10 20:39:17,064 INFO pr.iceworld.fernando.spring6.kafka.consumer.service.KafkaConsumerService ====> Headers topic=topic2, partition=0, offset=107, messageKey=key2, timestampType=CREATE_TIME, timestamp=1678451957046, Received message=xpZy-YswSdeoobQaeZIFZA

Reference

https://docs.spring.io/spring-kafka/reference/html/

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值