目录
一、Kafka配置
1、Kafka下载地址
地址:http://kafka.apache.org/downloads,选择二进制文件下载(Binary downloads),然后解压即可。
2、Kafka安装使用
Kafka的配置文件位于config目录下,因为Kafka集成了Zookeeper(Kafka存储消息的地方),所以config目录下除了有Kafka的配置文件server.properties外,还可以看到一个Zookeeper配置文件zookeeper.properties。如下图
打开server.properties,将broker.id的值修改为1,每个broker的id都必须设置为Integer类型,且不能重复。Zookeeper的配置保持默认即可。
3、启动Zookeeper
在下图/kafka_2.12-2.6.1/目录下,执行命令
bin\windows\zookeeper-server-start.bat config\zookeeper.properties
② linux环境:执行下列命令
bin/zookeeper-server-start.sh -daemon config/zookeeper.properties
4、启动Kafka
图同上
bin\windows\kafka-server-start.bat config\server.properties
bin/kafka-server-start.sh config/server.properties
5、创建Topic
bin\windows\kafka-topics.bat --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic test
bin\windows\kafka-topics.bat --describe --zookeeper localhost:2181 --topic test
bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic test
bin/kafka-topics.sh --describe --zookeeper localhost:2181 --topic test
5、生产消息和消费消息
5-1、启动Producers
bin\windows\kafka-console-producer.bat --broker-list localhost:9092 --topic test
bin/kafka-console-producer.sh --broker-list localhost:9092 --topic test
5-2、启动Consumers
bin\windows\kafka-console-consumer.bat --bootstrap-server localhost:9092 --topic test --from-beginning
bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic test --from-beginning
二、SpringBoot2整合Kafka
1、参数配置及新增消息Bean
1-1、在文件application-dev.properties中配置
######################## Kafka配置 ########################
spring.kafka.bootstrap-servers=localhost:9092
# 消费者进行分组(也可以不进行分组),组名为test-consumer
spring.kafka.consumer.group-id=test-consumer
# 消息读取策略,包含四个可选值
# earliest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,从头开始消费
# latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据
# none:topic各分区都存在已提交的offset时,从offset后开始消费;只要有一个分区不存在已提交的offset,则抛出异常
# exception:直接抛出异常
spring.kafka.consumer.auto-offset-reset=latest
1-2、在org.springboot.springboot01.bean包下,新增Message实体
public class Message implements Serializable {
private static final long serialVersionUID = 6074416803681026630L;
private String from;
private String message;
public Message(String from, String message) {
this.from = from;
this.message = message;
}
@Override
public String toString() {
return "Message{" +
"from='" + from + '\'' +
", message='" + message + '\'' +
'}';
}
// get和set...省略
}
2、配置生产者
在org.springboot.springboot01.config包下,新增KafkaProducerConfig配置类
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.serialization.StringSerializer;
import org.springboot.springboot01.bean.Message;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.core.ProducerFactory;
import org.springframework.kafka.support.serializer.JsonSerializer;
import java.util.HashMap;
import java.util.Map;
/**
* 生产者配置
*/
@Configuration
public class KafkaProducerConfig {
@Value("${spring.kafka.bootstrap-servers}")
private String bootstrapServers;
@Bean
public ProducerFactory<String, Message> producerFactory() {
Map<String, Object> configProps = new HashMap<>();
configProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
// 指定了key,value序列化策略
configProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
configProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, JsonSerializer.class);
return new DefaultKafkaProducerFactory<>(configProps);
}
@Bean
public KafkaTemplate<String, Message> kafkaTemplate() {
return new KafkaTemplate<>(producerFactory());
}
}
3、配置消费者
在org.springboot.springboot01.config包下,新增KafkaConsumerConfig配置类
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.springboot.springboot01.bean.Message;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.support.serializer.JsonDeserializer;
import java.util.HashMap;
import java.util.Map;
/**
* 消费者配置
*/
@EnableKafka
@Configuration
public class KafkaConsumerConfig {
@Value("${spring.kafka.bootstrap-servers}")
private String bootstrapServers;
@Value("${spring.kafka.consumer.group-id}")
private String consumerGroupId;
@Value("${spring.kafka.consumer.auto-offset-reset}")
private String autoOffsetReset;
@Bean
public ConsumerFactory<String, Message> consumerFactory() {
Map<String, Object> consumer = new HashMap<>();
consumer.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
consumer.put(ConsumerConfig.GROUP_ID_CONFIG, consumerGroupId);
consumer.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
return new DefaultKafkaConsumerFactory<>(consumer, new StringDeserializer(), new JsonDeserializer<>(Message.class));
}
@Bean
public ConcurrentKafkaListenerContainerFactory<String, Message> kafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, Message> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
// 配置过滤器(如下表示:如果消息内容包含fuck这个粗鄙之语的时候,则不接受消息)
factory.setRecordFilterStrategy(r -> r.value().getMessage().contains("fuck"));
return factory;
}
}
4、配置监听
在org.springboot.springboot01.listener包下,新增KafkaMessageListener监听类
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springboot.springboot01.bean.Message;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.annotation.TopicPartition;
import org.springframework.kafka.support.KafkaHeaders;
import org.springframework.messaging.handler.annotation.Header;
import org.springframework.messaging.handler.annotation.Payload;
import org.springframework.stereotype.Component;
/**
* 消费者监听
* 注:@KafkaListener除了可以指定Topic名称和分组id外,我们还可以同时监听来自多个Topic的消息: @KafkaListener(topics = "topic1, topic2")
*/
@Component
public class KafkaMessageListener {
private static final Logger log = LoggerFactory.getLogger(KafkaMessageListener.class);
/**
* 通过@KafkaListener注解来监听名称为test的Topic,消费者分组的组名为test-consumer
* 通过@Header注解来获取当前消息来自哪个分区(partitions)
*
* @param message 消息
*/
@KafkaListener(topics = "test", groupId = "test-consumer")
public void listen(@Payload Message message, @Header(KafkaHeaders.RECEIVED_PARTITION_ID) int partition) {
// 如果我们没有进行分区,partition就会为0
log.info("1-接收到的消息:{},partition:{}", message, partition);
}
/**
* 指定只接收来自特定分区的消息
* 注:topicPartitions = @TopicPartition(topic = "test", partitionOffsets = {@PartitionOffset(partition = "0", initialOffset = "0")}))
* 不指定initialOffset时,可简化成下列的注解方式
*
* @param message 消息
* @param partition 分区指定
*/
@KafkaListener(groupId = "test-consumer", topicPartitions = @TopicPartition(topic = "test2", partitions = {"0", "1"}))
public void listen2(@Payload String message, @Header(KafkaHeaders.RECEIVED_PARTITION_ID) int partition) {
// 如果我们没有进行分区,partition就会为0
log.info("2-接收到的消息:{},partition:{}", message, partition);
}
}
5、控制层接口
在org.springboot.springboot01.controller包下,新增KafkaSendMessageController控制类
@RestController
public class KafkaSendMessageController {
private static final Logger log = LoggerFactory.getLogger(KafkaSendMessageController.class);
@Resource
private KafkaTemplate<String, Message> kafkaTemplate;
@GetMapping("send/{message}")
public void sendMsg(@PathVariable String message) {
// test为Topic的名称,已经使用命令创建过这个Topic了
this.kafkaTemplate.send("test", new Message("kafka", message));
}
@GetMapping("sendBack/{message}")
public void sendMsgBack(@PathVariable String message) {
// test为Topic的名称,已经使用命令创建过这个Topic了
ListenableFuture<SendResult<String, Message>> future = kafkaTemplate.send("test", new Message("kafka", message));
future.addCallback(new ListenableFutureCallback<SendResult<String, Message>>() {
@Override
public void onFailure(Throwable ex) {
log.error("消息:{} 发送失败,原因:{}", message, ex.getMessage(), ex);
}
@Override
public void onSuccess(SendResult<String, Message> result) {
log.info("消息:{} 发送成功,offset[{}]", message, result.getRecordMetadata().offset());
}
});
}
}
三、参考来源
https://mrbird.cc/Spring-Boot-Kafka.html