本人用kafka版本:_2.10-0.10.0.0
请注意:springboot整合的kafka最低版本为0.9.xxx
## **spring整合kafka:**spring-kafka-2.1.4.RELEASE.jar
spring版本:spring4.3.5
Kafka-clients:kafka-clients-1.0.0.jar
**生产者:**
<?xml version="1.0" encoding="UTF-8"?>
<beans xmlns="http://www.springframework.org/schema/beans"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:context="http://www.springframework.org/schema/context"
xmlns:p="http://www.springframework.org/schema/p" xmlns:tx="http://www.springframework.org/schema/tx"
xmlns:aop="http://www.springframework.org/schema/aop"
xsi:schemaLocation="http://www.springframework.org/schema/beans
http://www.springframework.org/schema/beans/spring-beans-4.0.xsd
http://www.springframework.org/schema/context
http://www.springframework.org/schema/context/spring-context-4.0.xsd
http://www.springframework.org/schema/aop
http://www.springframework.org/schema/aop/spring-aop-4.0.xsd
http://www.springframework.org/schema/tx
http://www.springframework.org/schema/tx/spring-tx-4.0.xsd">
<!--基本配置 -->
<bean id="producerProperties" class="java.util.HashMap">
<constructor-arg>
<map>
<!-- kafka服务地址,可能是集群 localhost:9092,localhost:9093,localhost:9094-->
<entry key="bootstrap.servers" value="localhost:9093" />
<!-- 有可能导致broker接收到重复的消息,默认值为3 -->
<entry key="retries" value="10" />
<!-- 每次批量发送消息的数量 -->
<entry key="batch.size" value="1638" />
<!-- 默认0ms,在异步IO线程被触发后(任何一个topic,partition满都可以触发) -->
<entry key="linger.ms" value="1" />
<!--producer可以用来缓存数据的内存大小。如果数据产生速度大于向broker发送的速度,producer会阻塞或者抛出异常 -->
<entry key="buffer.memory" value="33554432 " />
<!-- producer需要server接收到数据之后发出的确认接收的信号,此项配置就是指procuder需要多少个这样的确认信号 -->
<entry key="acks" value="all" />
<entry key="key.serializer"
value="org.apache.kafka.common.serialization.StringSerializer" />
<entry key="value.serializer"
value="org.apache.kafka.common.serialization.StringSerializer" />
</map>
</constructor-arg>
</bean>
<!-- 创建kafkatemplate需要使用的producerfactory bean -->
<bean id="producerFactory"
class="org.springframework.kafka.core.DefaultKafkaProducerFactory">
<constructor-arg>
<ref bean="producerProperties" />
</constructor-arg>
</bean>
<!-- 创建kafkatemplate bean,使用的时候,只需要注入这个bean,即可使用template的send消息方法 -->
<bean id="kafkaTemplate" class="org.springframework.kafka.core.KafkaTemplate">
<constructor-arg ref="producerFactory" />
<!--设置对应topic -->
<property name="defaultTopic" value="app_log" />
</bean>
</beans>
**消费者:**
<?xml version="1.0" encoding="UTF-8"?>
<beans xmlns="http://www.springframework.org/schema/beans"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:context="http://www.springframework.org/schema/context"
xmlns:p="http://www.springframework.org/schema/p" xmlns:tx="http://www.springframework.org/schema/tx"
xmlns:aop="http://www.springframework.org/schema/aop"
xsi:schemaLocation="http://www.springframework.org/schema/beans
http://www.springframework.org/schema/beans/spring-beans-4.0.xsd
http://www.springframework.org/schema/context
http://www.springframework.org/schema/context/spring-context-4.0.xsd
http://www.springframework.org/schema/aop
http://www.springframework.org/schema/aop/spring-aop-4.0.xsd
http://www.springframework.org/schema/tx
http://www.springframework.org/schema/tx/spring-tx-4.0.xsd">
<bean id="consumerProperties" class="java.util.HashMap">
<constructor-arg>
<map>
<!--Kafka服务地址:对应服务端的地址端口号 -->
<entry key="bootstrap.servers" value="localhost:9092" />
<!--Consumer的组ID,相同goup.id的consumer属于同一个组。 -->
<entry key="group.id" value="app_log" />
<!--如果此值设置为true,consumer会周期性的把当前消费的offset值保存到zookeeper。当consumer失败重启之后将会使用此值作为新开始消费的值。 -->
<entry key="enable.auto.commit" value="true" />
<!--网络请求的socket超时时间。实际超时时间由max.fetch.wait + socket.timeout.ms 确定 -->
<entry key="session.timeout.ms" value="15000 " />
<entry key="key.deserializer"
value="org.apache.kafka.common.serialization.StringDeserializer" />
<entry key="value.deserializer"
value="org.apache.kafka.common.serialization.StringDeserializer" />
</map>
</constructor-arg>
</bean>
<!--指定具体监听类的bean -->
<bean id="kafkaConsumerListener" class="com.lnsoft.module.commons.kafkaConfig.KafkaConsumerListener" />
<!-- 创建consumerFactory bean -->
<bean id="consumerFactory"
class="org.springframework.kafka.core.DefaultKafkaConsumerFactory">
<constructor-arg>
<ref bean="consumerProperties" />
</constructor-arg>
</bean>
<!-- 实际执行消息消费的类 -->
<bean id="containerProperties"
class="org.springframework.kafka.listener.config.ContainerProperties">
<constructor-arg value="app_log" />
<property name="messageListener" ref="kafkaConsumerListener" />
</bean>
<bean id="messageListenerContainer"
class="org.springframework.kafka.listener.KafkaMessageListenerContainer"
init-method="doStart">
<constructor-arg ref="consumerFactory" />
<constructor-arg ref="containerProperties" />
</bean>
</beans>
**消费者的监听类**
package com.lnsoft.module.commons.kafkaConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.kafka.listener.MessageListener;
public class KafkaConsumerListener implements MessageListener<String, String> {
protected final Logger LOG = LoggerFactory.getLogger(this.getClass());
@Override
public void onMessage(ConsumerRecord<String, String> consumerRecord) {
try {
System.out.println("=============kafkaConsumer开始消费=============");
System.out.println("consumerRecord==" + consumerRecord);
Object o = consumerRecord.value();
System.out.println("consumerRecord.value()==" + String.valueOf(o));
String topic = consumerRecord.topic();
System.out.println("topic==" + topic);
String key = consumerRecord.key();
System.out.println("key==" + key);
String value = consumerRecord.value();
System.out.println("value==" + value);
long offset = consumerRecord.offset();
System.out.println("offset==" + offset);
int partition = consumerRecord.partition();
System.out.println("partition==" + partition);
System.out.println("=============kafkaConsumer消费结束=============");
} catch (Exception e) {
e.printStackTrace();
}
}
}
## **springboot整合kafka:**
kafka版本:_2.10-0.10.0.0 该版本的kafka,必须使用springboot-1.5.8springboot:spring-boot-starter-parent
1.5.8.RELEASE</version>
spring-kafka:org.springframework.kafka
1.0.6.RELEASE
**生产者**
*(1)生产者的application.properties配置*
#生产者
spring.kafka.producer.bootstrap-servers=127.0.0.1:9092
server.port=8080
*(2)生产者yml方式*
#生产者
#server:
#port: 8081
#spring:
#kafka:
#producer:
#bootstrap-servers: 192.168.71.11:9092,192.168.71.12:9092,192.168.71.13:9092
*代码:*
package com.lnsoft;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.scheduling.annotation.EnableScheduling;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Component;
import org.springframework.util.concurrent.ListenableFuture;
import java.util.UUID;
@Component
@EnableScheduling
public class KafkaProducer {
@Autowired
private KafkaTemplate kafkaTemplate;
@Scheduled(cron = "00/5 * * * * ?")
public void send(){
String message= UUID.randomUUID().toString();
ListenableFuture future=
kafkaTemplate.send("app_log",message);
kafkaTemplate.send("test", message);
future.addCallback(o -> System.out.println("成功"+message),throwable -> System.out.println("失败"+message));
}
}
**消费者**
*(1)消费者的application.properties配置*
消费者
spring.kafka.consumer.bootstrap-servers=127.0.0.1:9092
spring.kafka.consumer.group-id=applog
spring.kafka.consumer.enable-auto-commit=true
spring.kafka.consumer.auto-offset-reset=latest
server.port=8083
*(2)消费者yml方式**
#消费者
#server:
#port: 8082
#spring:
#kafka:
#consumer:
#enable-auto-commit: true
#group-id: applog
#auto-offset-reset: latest
#bootstrap-servers: 192.168.71.11:9092,192.168.71.12:9092,192.168.71.13:9092
*代码*
package com.lnsoft;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.stereotype.Component;
@Component
public class KafkaConsumer {
@KafkaListener(topics = {"app_log"})
public void receive(String message){
System.out.println("app_log+消费"+message);
}
@KafkaListener(topics = {"test"},group ="myGroup2" )
public void receive2(String message) {
System.out.println("app_log+消费==========" + message);
}
}