SpringBoot集成Kafka简单应用
windows下搭建kafaka开发环境请参考-Windows平台kafka环境的搭建
1: pom文件引入kafka所需jar包
<!--kafka支持-->
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
<version>1.3.5.RELEASE</version><!--$NO-MVN-MAN-VER$-->
</dependency>
2:配置文件application.properties加入如下配置
#kafka相关配置
spring.kafka.bootstrap-servers=127.0.0.1:9092
#设置一个默认组
spring.kafka.consumer.group-id=0
#key-value序列化反序列化
spring.kafka.consumer.key-deserializer=org.apache.kafka.common.serialization.StringDeserializer
spring.kafka.consumer.value-deserializer=org.apache.kafka.common.serialization.StringDeserializer
spring.kafka.producer.key-serializer=org.apache.kafka.common.serialization.StringSerializer
spring.kafka.producer.value-serializer=org.apache.kafka.common.serialization.StringSerializer
#每次批量发送消息的数量
spring.kafka.producer.batch-size=65536
spring.kafka.producer.buffer-memory=524288
3:创建项目启动类
package spark;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
@SpringBootApplication
public class SpringkafkaproducerApplication {
public static void main(String[] args) {
SpringApplication.run(SpringkafkaproducerApplication.class, args);
}
}
4:创建kafaka消息发送类
package spark;
import org.json.JSONObject;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.support.SendResult;
import org.springframework.util.concurrent.ListenableFuture;
import org.springframework.util.concurrent.ListenableFutureCallback;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.RestController;
@RestController
@RequestMapping("kafka")
public class KafkaProducer {
private static final Logger logger = LoggerFactory.getLogger(KafkaProducer.class);
//自定义topic
public static final String TOPIC_ONE="topic.one";
@Autowired
private KafkaTemplate<String, String> kafkaTemplate;
@RequestMapping("send")
public String send(@RequestParam("msg")String msg) {
logger.info("准备发送消息为:{}",msg);
//发送消息
ListenableFuture<SendResult<String,String>> future=kafkaTemplate.send(TOPIC_ONE,msg);
future.addCallback(new ListenableFutureCallback<SendResult<String, String>>() {
@Override
public void onFailure(Throwable throwable) {
//发送失败的处理
logger.info(TOPIC_ONE+" - 生产者 发送消息失败:"+throwable.getMessage());
}
@Override
public void onSuccess(SendResult<String, String> stringObjectSendResult) {
//成功的处理
logger.info(TOPIC_ONE+" - 生产者 发送消息成功:"+stringObjectSendResult.toString());
}
});
return "0000";
}
}
5:启动项目访问连接http://localhost:8888/kafka/send?msg=saa1111s
控制台会打出如下日志:
16:06:03.752 [http-nio-8888-exec-1] INFO spark.KafkaProducer - 准备发送消息为:saa1111s
16:06:03.768 [http-nio-8888-exec-1] INFO org.apache.kafka.clients.producer.ProducerConfig - ProducerConfig values:
acks = 1
batch.size = 16384
block.on.buffer.full = false
bootstrap.servers = [192.168.100.150:9092]
buffer.memory = 33554432
client.id =
compression.type = none
connections.max.idle.ms = 540000
interceptor.classes = null
key.serializer = class org.apache.kafka.common.serialization.StringSerializer
linger.ms = 0
max.block.ms = 60000
max.in.flight.requests.per.connection = 5
max.request.size = 1048576
metadata.fetch.timeout.ms = 60000
metadata.max.age.ms = 300000
metric.reporters = []
metrics.num.samples = 2
metrics.sample.window.ms = 30000
partitioner.class = class org.apache.kafka.clients.producer.internals.DefaultPartitioner
receive.buffer.bytes = 32768
reconnect.backoff.ms = 50
request.timeout.ms = 30000
retries = 0
retry.backoff.ms = 100
sasl.kerberos.kinit.cmd = /usr/bin/kinit
sasl.kerberos.min.time.before.relogin = 60000
sasl.kerberos.service.name = null
sasl.kerberos.ticket.renew.jitter = 0.05
sasl.kerberos.ticket.renew.window.factor = 0.8
sasl.mechanism = GSSAPI
security.protocol = PLAINTEXT
send.buffer.bytes = 131072
ssl.cipher.suites = null
ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
ssl.endpoint.identification.algorithm = null
ssl.key.password = null
ssl.keymanager.algorithm = SunX509
ssl.keystore.location = null
ssl.keystore.password = null
ssl.keystore.type = JKS
ssl.protocol = TLS
ssl.provider = null
ssl.secure.random.implementation = null
ssl.trustmanager.algorithm = PKIX
ssl.truststore.location = null
ssl.truststore.password = null
ssl.truststore.type = JKS
timeout.ms = 30000
value.serializer = class org.apache.kafka.common.serialization.StringSerializer
16:06:03.775 [http-nio-8888-exec-1] INFO org.apache.kafka.clients.producer.ProducerConfig - ProducerConfig values:
acks = 1
batch.size = 16384
block.on.buffer.full = false
bootstrap.servers = [192.168.100.150:9092]
buffer.memory = 33554432
client.id = producer-1
compression.type = none
connections.max.idle.ms = 540000
interceptor.classes = null
key.serializer = class org.apache.kafka.common.serialization.StringSerializer
linger.ms = 0
max.block.ms = 60000
max.in.flight.requests.per.connection = 5
max.request.size = 1048576
metadata.fetch.timeout.ms = 60000
metadata.max.age.ms = 300000
metric.reporters = []
metrics.num.samples = 2
metrics.sample.window.ms = 30000
partitioner.class = class org.apache.kafka.clients.producer.internals.DefaultPartitioner
receive.buffer.bytes = 32768
reconnect.backoff.ms = 50
request.timeout.ms = 30000
retries = 0
retry.backoff.ms = 100
sasl.kerberos.kinit.cmd = /usr/bin/kinit
sasl.kerberos.min.time.before.relogin = 60000
sasl.kerberos.service.name = null
sasl.kerberos.ticket.renew.jitter = 0.05
sasl.kerberos.ticket.renew.window.factor = 0.8
sasl.mechanism = GSSAPI
security.protocol = PLAINTEXT
send.buffer.bytes = 131072
ssl.cipher.suites = null
ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
ssl.endpoint.identification.algorithm = null
ssl.key.password = null
ssl.keymanager.algorithm = SunX509
ssl.keystore.location = null
ssl.keystore.password = null
ssl.keystore.type = JKS
ssl.protocol = TLS
ssl.provider = null
ssl.secure.random.implementation = null
ssl.trustmanager.algorithm = PKIX
ssl.truststore.location = null
ssl.truststore.password = null
ssl.truststore.type = JKS
timeout.ms = 30000
value.serializer = class org.apache.kafka.common.serialization.StringSerializer
16:06:03.798 [http-nio-8888-exec-1] INFO org.apache.kafka.common.utils.AppInfoParser - Kafka version : 0.10.1.1
16:06:03.799 [http-nio-8888-exec-1] INFO org.apache.kafka.common.utils.AppInfoParser - Kafka commitId : f10ef2720b03b247
此时说明程序已经成功将消息发送到kafka中名为topic.one的主题上,其实这里的主题就相当于rabbitmq中的队列。
注意:当kafka中没有名为topic.one的主题时程序会自动创建。
6:创建kakfa消费者类
- 广播形式消费
package spark;
import java.util.Optional;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.support.KafkaHeaders;
import org.springframework.messaging.handler.annotation.Header;
import org.springframework.stereotype.Component;
@Component
public class KafkaConsumer {
private static final Logger logger = LoggerFactory.getLogger(KafkaConsumer.class);
@KafkaListener(topics = KafkaProducer.TOPIC_ONE,groupId = KafkaProducer.TOPIC_ONE)
public void topic_one(ConsumerRecord<?, ?> record, @Header(KafkaHeaders.RECEIVED_TOPIC) String topic){
Optional message=Optional.ofNullable(record.value());
if (message.isPresent()){
Object msg=message.get();
logger.info("被"+KafkaProducer.TOPIC_ONE+"消费了: +++++++++++++++ Topic:" + topic+",Record:" + record+",Message:" + msg);
}
}
@KafkaListener(topics = KafkaProducer.TOPIC_ONE,groupId="TWO")
public void topic_two(ConsumerRecord<?, ?> record, @Header(KafkaHeaders.RECEIVED_TOPIC) String topic){
Optional message=Optional.ofNullable(record.value());
if (message.isPresent()){
Object msg=message.get();
logger.info("被"+KafkaProducer.TOPIC_ONE+"1111消费了: +++++++++++++++ Topic:" + topic+",Record:" + record+",Message:" + msg);
}
}
}
如上面代码所示,程序创建了两个消息消费者,每个消费者都监听来自主题topic_one的消息,唯一不同的是两个消费者分别属于不同的组别,即groupId设置不同。启动工程,访问连接http://localhost:8888/kafka/send?msg=3
查看程序日志如下:
17:17:30.493 [http-nio-8888-exec-9] INFO spark.KafkaProducer - 准备发送消息为:3
17:17:30.503 [org.springframework.kafka.KafkaListenerEndpointContainer#0-0-C-1] INFO spark.KafkaConsumer - 被topic.one消费了: +++++++++++++++ Topic:topic.one,Record:ConsumerRecord(topic = topic.one, partition = 0, offset = 7, CreateTime = 1567156650493, serialized key size = -1, serialized value size = 1, headers = RecordHeaders(headers = [], isReadOnly = false), key = null, value = 3),Message:3
17:17:30.503 [kafka-producer-network-thread | producer-1] INFO spark.KafkaProducer - topic.one - 生产者 发送消息成功:SendResult [producerRecord=ProducerRecord(topic=topic.one, partition=null, headers=RecordHeaders(headers = [], isReadOnly = true), key=null, value=3, timestamp=null), recordMetadata=topic.one-0@7]
17:17:30.503 [org.springframework.kafka.KafkaListenerEndpointContainer#1-0-C-1] INFO spark.KafkaConsumer - 被topic.one1111消费了: +++++++++++++++ Topic:topic.one,Record:ConsumerRecord(topic = topic.one, partition = 0, offset = 7, CreateTime = 1567156650493, serialized key size = -1, serialized value size = 1, headers = RecordHeaders(headers = [], isReadOnly = false), key = null, value = 3),Message:3
由此可见一条消息分别被两个不同的消费者消费,由此达到了广播的效果。有读者会问了,那我不需要每个消费者都读取到重复的消息该如何配置?
- 相同消费组唯一消费模式
package spark;
import java.util.Optional;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.support.KafkaHeaders;
import org.springframework.messaging.handler.annotation.Header;
import org.springframework.stereotype.Component;
@Component
public class KafkaConsumer {
private static final Logger logger = LoggerFactory.getLogger(KafkaConsumer.class);
@KafkaListener(topics = KafkaProducer.TOPIC_ONE,groupId = KafkaProducer.TOPIC_ONE)
public void topic_one(ConsumerRecord<?, ?> record, @Header(KafkaHeaders.RECEIVED_TOPIC) String topic){
Optional message=Optional.ofNullable(record.value());
if (message.isPresent()){
Object msg=message.get();
logger.info("被"+KafkaProducer.TOPIC_ONE+"消费了: +++++++++++++++ Topic:" + topic+",Record:" + record+",Message:" + msg);
}
}
@KafkaListener(topics = KafkaProducer.TOPIC_ONE,groupId = KafkaProducer.TOPIC_ONE)
public void topic_two(ConsumerRecord<?, ?> record, @Header(KafkaHeaders.RECEIVED_TOPIC) String topic){
Optional message=Optional.ofNullable(record.value());
if (message.isPresent()){
Object msg=message.get();
logger.info("被"+KafkaProducer.TOPIC_ONE+"1111消费了: +++++++++++++++ Topic:" + topic+",Record:" + record+",Message:" + msg);
}
}
}
上述代码与前段代码唯一的区别在于,两个消费者的组别设置成了一样的。启动程序,多次访问连接http://localhost:8888/kafka/send?msg=4
查看程序日志如下:
17:33:20.426 [http-nio-8888-exec-1] INFO org.apache.kafka.common.utils.AppInfoParser - Kafka version : 1.0.1
17:33:20.426 [http-nio-8888-exec-1] INFO org.apache.kafka.common.utils.AppInfoParser - Kafka commitId : c0518aa65f25317e
17:33:20.468 [kafka-producer-network-thread | producer-1] INFO spark.KafkaProducer - topic.one - 生产者 发送消息成功:SendResult [producerRecord=ProducerRecord(topic=topic.one, partition=null, headers=RecordHeaders(headers = [], isReadOnly = true), key=null, value=4, timestamp=null), recordMetadata=topic.one-0@8]
17:33:20.475 [org.springframework.kafka.KafkaListenerEndpointContainer#0-0-C-1] INFO spark.KafkaConsumer - 被topic.one消费了: +++++++++++++++ Topic:topic.one,Record:ConsumerRecord(topic = topic.one, partition = 0, offset = 8, CreateTime = 1567157600446, serialized key size = -1, serialized value size = 1, headers = RecordHeaders(headers = [], isReadOnly = false), key = null, value = 4),Message:4
从日志可以看出无论访问多少次,只会有一个消费者消费了消息,因此达到了不重复消费的目的。
上述两种模式,读者可以根据自己项目的实际需要进行选择。