推荐链接:
总结——》【Java】
总结——》【Mysql】
总结——》【Redis】
总结——》【Spring】
总结——》【SpringBoot】
总结——》【MyBatis、MyBatis-Plus】
SpringBoot——》集成Kafka示例
一、pom
<properties>
<spring.kafka.version>2.4.3.RELEASE</spring.kafka.version>
<kafka-client.version>2.4.1</kafka-client.version>
</properties>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>${kafka-client.version}</version>
</dependency>
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
<version>${spring.kafka.version}</version>
</dependency>
</dependencies>
</dependencyManagement>
二、配置参数
1、生产者
spring.kafka.bootstrap-servers = kafka-s1:9092,kafka-s2:9092,kafka-s3:9092
spring.kafka.producer.retries = 0
spring.kafka.producer.acks = 1
spring.kafka.producer.batch-size = 16384
spring.kafka.producer.buffer-memory = 33554432
spring.kafka.producer.key-serializer = org.apache.kafka.common.serialization.StringSerializer
spring.kafka.producer.value-serializer = org.apache.kafka.common.serialization.StringSerializer
2、消费者
spring.kafka.bootstrap-servers = kafka-s1:9092,kafka-s2:9092,kafka-s3:9092
spring.kafka.consumer.auto-commit-interval = 1S
spring.kafka.consumer.auto-offset-reset = earliest
spring.kafka.consumer.enable-auto-commit = true
spring.kafka.consumer.key-deserializer = org.apache.kafka.common.serialization.StringDeserializer
spring.kafka.consumer.value-deserializer = org.apache.kafka.common.serialization.StringDeserializer
spring.kafka.consumer.listener.concurrency = 5
spring.kafka.consumer.group-id = g1
三、消费者配置类KafkaConsumerConfig.java
import com.eju.goodhouse.service.business.consumer.SyncEsfCommunityComsumer;
import jodd.util.StringUtil;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;
import java.util.HashMap;
import java.util.Map;
@Configuration
@EnableKafka
public class KafkaConsumerConfig {
@Value("${spring.kafka.bootstrap-servers}")
private String broker;
@Value("${spring.kafka.consumer.group-id}")
private String groupId;
@Value("${spring.kafka.consumer.auto-offset-reset}")
private String autoOffsetReset;
@Value("${spring.kafka.consumer.enable-auto-commit}")
private String enableAutoCommit;
public Map<String, Object> consumerConfigs(String consumerGroupId) {
Map<String, Object> propsMap = new HashMap<>();
// kafka服务地址
propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, broker);
// 消费后是否自动提交,true自动,false手动
propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit);
// 获取消息后提交偏移量的最大时间
propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "100");
// 超时时间,服务端没有收到心跳就会认为当前消费者失效
propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "15000");
// 序列化
propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
// 默认消费组
propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, StringUtil.isNotBlank(consumerGroupId) ? consumerGroupId : groupId);
// earliest从头开始消费、latest获取最新消息 、none
propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 1000);
return propsMap;
}
public ConsumerFactory<String, String> consumerFactory(String consumerGroupId) {
return new DefaultKafkaConsumerFactory<>(consumerConfigs(consumerGroupId));
}
@Bean("kafkaListenerContainerFactory")
KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
// consumerGroupId为空时,会用默认的groupId
factory.setConsumerFactory(consumerFactory("g1"));
factory.setConcurrency(4);
// 设置为批量消费,每个批次数量在Kafka配置参数中设置ConsumerConfig.MAX_POLL_RECORDS_CONFIG
factory.setBatchListener(true);
factory.getContainerProperties().setPollTimeout(3000);
return factory;
}
@Bean("kafkaListenerContainerFactory2")
KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactory2() {
ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
// consumerGroupId为空时,会用默认的groupId
factory.setConsumerFactory(consumerFactory("g2"));
factory.setConcurrency(1);
// 设置为批量消费,每个批次数量在Kafka配置参数中设置ConsumerConfig.MAX_POLL_RECORDS_CONFIG
factory.setBatchListener(true);
factory.getContainerProperties().setPollTimeout(3000);
factory.getContainerProperties().setAckCount(10);
factory.getContainerProperties().setAckTime(10000);
return factory;
}
@Bean
public SyncEsfCommunityComsumer listenerForSyncEsfCommunity() {
return new SyncEsfCommunityComsumer();
}
}
四、消费者SyncEsfCommunityComsumer.java
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.annotation.KafkaListener;
import java.util.List;
import java.util.Optional;
@Slf4j
public class SyncEsfCommunityComsumer {
@KafkaListener(id = "listenerForSyncEsfCommunity", topics = "${monitor.house-asset-community.topic}", containerFactory = "kafkaListenerContainerFactory")
public void listenerForSyncEsfCommunity(List<ConsumerRecord<?, ?>> records) throws Exception {
log.info("【listenerForSyncEsfCommunity】records size:【{}】, Thread ID:【{}】", records.size(), Thread.currentThread().getId());
for (ConsumerRecord<?, ?> record : records) {
Optional<?> kafkaMessage = Optional.ofNullable(record.value());
if (kafkaMessage.isPresent()) {
// TODO 业务处理
}
}
}
@KafkaListener(id = "listenerForSyncEsfRegion", topics = "${monitor.house-asset-region.topic}", containerFactory = "kafkaListenerContainerFactory2")
public void listenerForSyncEsfRegion(List<ConsumerRecord<?, ?>> records) throws Exception {
log.info("【listenerForSyncEsfRegion】records size:【{}】, Thread ID:【{}】", records.size(), Thread.currentThread().getId());
for (ConsumerRecord<?, ?> record : records) {
Optional<?> kafkaMessage = Optional.ofNullable(record.value());
if (kafkaMessage.isPresent()) {
// TODO 业务处理
}
}
}
}