SpringBoot整合Kafka
准备工作
- 安装zookeeper+kafka环境
Windows系统:
zookeeper安装:Windows系统ZooKeeper集群环境搭建
kafka安装: - 利用命令创建topic
- jdk8
工程目录
pom.xml 文件
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-parent</artifactId>
<version>2.3.1.RELEASE</version>
<relativePath/> <!-- lookup parent from repository -->
</parent>
<groupId>com.kafka</groupId>
<artifactId>SpringBootKafka</artifactId>
<version>0.0.1-SNAPSHOT</version>
<name>SpringBootKafka</name>
<description>Demo project for Spring Boot</description>
<properties>
<java.version>1.8</java.version>
</properties>
<dependencies>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-test</artifactId>
<scope>test</scope>
<exclusions>
<exclusion>
<groupId>org.junit.vintage</groupId>
<artifactId>junit-vintage-engine</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka-test</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-configuration-processor</artifactId>
<optional>true</optional>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-maven-plugin</artifactId>
</plugin>
</plugins>
</build>
</project>
application.yml 配置文件
server.port: 8086
kafka:
brokerAddress: 192.168.2.76:9092
topic: mu.topic
fooTopic: mu.fooTopic
spring:
jmx:
enabled: false
logback.xml 日志文件
<?xml version="1.0" encoding="UTF-8"?>
<configuration debug="false">
<!--定义日志文件的存储地址 勿在 LogBack 的配置中使用相对路径-->
<property name="LOG_HOME" value="/home" />
<!-- 控制台输出 -->
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<!--格式化输出:%d表示日期,%thread表示线程名,%-5level:级别从左显示5个字符宽度%msg:日志消息,%n是换行符-->
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
</encoder>
</appender>
<!-- 按照每天生成日志文件 -->
<appender name="FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<!--日志文件输出的文件名-->
<FileNamePattern>${LOG_HOME}/TestWeb.log.%d{yyyy-MM-dd}.log</FileNamePattern>
<!--日志文件保留天数-->
<MaxHistory>30</MaxHistory>
</rollingPolicy>
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<!--格式化输出:%d表示日期,%thread表示线程名,%-5level:级别从左显示5个字符宽度%msg:日志消息,%n是换行符-->
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
</encoder>
<!--日志文件最大的大小-->
<triggeringPolicy class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
<MaxFileSize>10MB</MaxFileSize>
</triggeringPolicy>
</appender>
<logger name="org.apache.kafka.clients.consumer" level="off" />
<logger name="org.apache.kafka.clients.producer" level="off" />
<!-- 日志输出级别 -->
<root level="INFO">
<appender-ref ref="STDOUT" />
</root>
</configuration>
ConfigProperties ,这是配置类,将配置文件中的连接参数、主题名称转换为对象
package com.example.kafka;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.stereotype.Component;
@Component
@ConfigurationProperties(prefix = "kafka")
public class ConfigProperties {
private String brokerAddress;
private String topic;
private String fooTopic;
public String getBrokerAddress() {
return brokerAddress;
}
public void setBrokerAddress(String brokerAddress) {
this.brokerAddress = brokerAddress;
}
public String getTopic() {
return topic;
}
public void setTopic(String topic) {
this.topic = topic;
}
public String getFooTopic() {
return fooTopic;
}
public void setFooTopic(String fooTopic) {
this.fooTopic = fooTopic;
}
}
CommonConfiguration 配置消费者、生产者
package com.example.kafka;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Bean;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.core.*;
import org.apache.kafka.common.serialization.StringSerializer;
import org.springframework.kafka.support.converter.StringJsonMessageConverter;
import org.springframework.retry.support.RetryTemplate;
import java.util.HashMap;
import java.util.Map;
public class CommonConfiguration {
@Autowired
private ConfigProperties configProperties;
@Bean
public ProducerFactory<String,String> producerFactory(){
Map<String,Object> props = new HashMap<>();
//kafka地址,集群用逗号分隔开
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,this.configProperties.getBrokerAddress());
//消息发送失败重试次数,默认0
props.put(ProducerConfig.RETRIES_CONFIG,0);
//批量发送,当达到batch size最大值触发发送机制(10.0后支持批量发送)
props.put(ProducerConfig.BATCH_SIZE_CONFIG,16384);
//该配置是指在batch.size数量未达到时,指定时间内也会推送数据
props.put(ProducerConfig.LINGER_MS_CONFIG,1);
//配置缓存
props.put(ProducerConfig.BUFFER_MEMORY_CONFIG,33554432);
//key 序列号方式
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,StringSerializer.class);
//value 序列号方式
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,StringSerializer.class);
return new DefaultKafkaProducerFactory<>(props);
}
@Bean
public KafkaTemplate<String,String> kafkaTemplate(){
return new KafkaTemplate<>(producerFactory());
}
@Bean
public Map<String,Object> consumerProperties(){
Map<String,Object> props = new HashMap<>();
//kafka地址,集群用逗号分隔开
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,this.configProperties.getBrokerAddress());
//自定义客户端id
props.put(ConsumerConfig.GROUP_ID_CONFIG,"s1pGroup");
//如果为true,消费者的偏移量将在后台定期提交
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,false);
//在使用Kafka的组管理时,用于检测消费者故障的超时
props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG,15000);
//key 序列号方式
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
//value 序列号方式
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class);
return props;
}
@Bean
public ConsumerFactory<String,String> consumerFactory(){
return new DefaultKafkaConsumerFactory<>(consumerProperties());
}
@Bean
public ConcurrentKafkaListenerContainerFactory<String,String> kafkaListenerContainerFactory(){
ConcurrentKafkaListenerContainerFactory<String,String> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
return factory;
}
@Bean
public ConcurrentKafkaListenerContainerFactory<String,String> jsonKafkaListenerContainerFactory(){
ConcurrentKafkaListenerContainerFactory<String,String> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
factory.setMessageConverter(new StringJsonMessageConverter());
return factory;
}
@Bean
public ConcurrentKafkaListenerContainerFactory<String,String> retryKafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String,String> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
factory.setRetryTemplate(new RetryTemplate());
return factory;
}
}
Producer 生产者
package com.example.kafka;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.stereotype.Component;
@Component
public class Producer {
@Autowired
private ConfigProperties configProperties;
@Autowired
private KafkaTemplate<String,String> template;
public void send(String foo){
this.template.send(this.configProperties.getTopic(),foo);
}
}
Consumer 消费者
package com.example.kafka;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.stereotype.Component;
import java.util.concurrent.CountDownLatch;
@Component
public class Consumer {
public final CountDownLatch latch = new CountDownLatch(1);
@KafkaListener(topics = "${kafka.topic}")
public void listen(String foo){
System.out.println("Received: " + foo);
this.latch.countDown();
}
}
KafkaApplication 使用,我这边是传入一个字符串
package com.example.kafka;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.WebApplicationType;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.boot.builder.SpringApplicationBuilder;
import org.springframework.context.ConfigurableApplicationContext;
import org.springframework.context.annotation.Import;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.ui.Model;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
import java.util.concurrent.TimeUnit;
@SpringBootApplication
@RestController
@Import({CommonConfiguration.class,ConfigProperties.class})
@EnableKafka
public class KafkaApplication {
public static void main(String[] args) throws Exception {
ConfigurableApplicationContext context = new SpringApplicationBuilder(KafkaApplication.class)
.web(WebApplicationType.NONE)
.run(args);
Producer producer = context.getBean(Producer.class);
producer.send("day day up");
context.getBean(Consumer.class).latch.await(10, TimeUnit.SECONDS);
context.close();
}
}
执行结果
总结:在实际项目中灵活调用生产者和消费者即可。