Kafka
kafka的配置
1、 pom.xml 引入依赖
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
<version>2.4.7.RELEASE</version>
</dependency>
2、application.propertise配置
#kafka配置
kafka:
consumer:
bootstrap-servers: 127.0.0.1:9092 #服务器的ip及端口,可以写多个,服务器之间用逗号间隔
enable-auto-commit: true # 是否自动提交offset
# 消费会话超时时间(超过这个时间consumer没有发送心跳,就会触发rebalance操作)
session-timeout: 120000
auto-commit-interval: 5000
# 当kafka中没有初始offset或offset超出范围时将自动重置offset
# earliest:重置为分区中最小的offset;
# latest:重置为分区中最新的offset(消费分区中新产生的数据);
# none:只要有一个分区不存在已提交的offset,就抛出异常;
auto-offset-reset: earliest
group-id: test #设置消费者的组id
max-poll-records: 1 # 批量消费每次最多消费多少条消息
# Kafka提供的序列化和反序列化类
key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
username:
password:
producer:
servers: 127.0.0.xx:9092
retries: 0 # 重试次数
batch-size: 4096 # 批量大小
linger: 1
buffer-memory: 40960 # 生产端缓冲区大小
# Kafka提供的序列化和反序列化类
key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
username:
password:
KafkaProducerConfig
package com.xxx.config;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.*;
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;
import java.util.HashMap;
import java.util.Map;
/**
*/
@Configuration
@EnableKafka
public class KafkaProducerConfig {
@Value("${kafka.producer.bootstrap-servers}")
private String servers;
@Value("${kafka.producer.retries}")
private boolean retries;
@Value("${kafka.producer.batch-size}")
private String batchSize;
@Value("${kafka.producer.linger}")
private String linger;
@Value("${kafka.producer.buffer-memory}")
private String bufferMemory;
@Value("${kafka.producer.username}")
private String username;
@Value("${kafka.producer.password}")
private String password;
@Bean
public KafkaTemplate<String, String> kafkaTemplate() {
return new KafkaTemplate<>(producerFactory());
}
public ProducerFactory<String, String> producerFactory() {
return new DefaultKafkaProducerFactory<>(producerConfigs());
}
public Map<String, Object> producerConfigs() {
Map<String, Object> propsMap = new HashMap<>();
propsMap.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, servers);
propsMap.put(ProducerConfig.RETRIES_CONFIG, retries);
propsMap.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize);
propsMap.put(ProducerConfig.LINGER_MS_CONFIG, linger);
propsMap.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, 180000);
propsMap.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringDeserializer.class);
propsMap.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringDeserializer.class);
propsMap.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory);
// 目的是让客户端启动时带上身份信息,也就是它使用的用户名和密码
propsMap.put("security.protocol", "SASL_PLAINTEXT");
propsMap.put("sasl.mechanism", "PLAIN");
propsMap.put("sasl.jaas.config",
"org.apache.kafka.common.security.scram.ScramLoginModule required username=\""+username+"\" password=\""+password+"\";");
return propsMap;
}
}
KafkaConsumerConfig
package com.xxx.config;
import java.io.FileNotFoundException;
import java.net.URL;
import java.util.HashMap;
import java.util.Map;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;
import org.springframework.util.ResourceUtils;
@Configuration
@EnableKafka
public class KafkaConfig {
@Value("${kafka.consumer.bootstrap-servers}")
private String servers;
@Value("${kafka.consumer.enable-auto-commit}")
private boolean enableAutoCommit;
@Value("${kafka.consumer.session-timeout}")
private String sessionTimeout;
@Value("${kafka.consumer.auto-commit-interval}")
private String autoCommitInterval;
@Value("${kafka.consumer.group-id}")
private String groupId;
@Value("${kafka.consumer.auto-offset-reset}")
private String autoOffsetReset;
@Value("${kafka.consumer.max-poll-records}")
private Integer maxPollRecords;
@Value("${kafka.consumer.username}")
private String username;
@Value("${kafka.consumer.password}")
private String password;
@Bean
public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
//每个listener拥有一个处理线程
factory.setConcurrency(1);
factory.setBatchListener(true);//@KafkaListener 批量消费 每个批次数量在Kafka配置参数中设置ConsumerConfig.MAX_POLL_RECORDS_CONFIG
// factory.getContainerProperties().setAckMode(AckMode.MANUAL_IMMEDIATE);//设置提交偏移量的方式
return factory;
}
public ConsumerFactory<String, String> consumerFactory() {
return new DefaultKafkaConsumerFactory<>(consumerConfigs());
}
public Map<String, Object> consumerConfigs() {
Map<String, Object> propsMap = new HashMap<>();
propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, servers);
propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit);
propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, autoCommitInterval);
propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout);
propsMap.put(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, 180000);
propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords);//每个批次获取数
propsMap.put("security.protocol", "SASL_PLAINTEXT");
propsMap.put("sasl.mechanism", "PLAIN");
propsMap.put("sasl.jaas.config",
"org.apache.kafka.common.security.scram.ScramLoginModule required username=\""+username+"\" password=\""+password+"\";");
return propsMap;
}
}
生产者:
/**
* kafka生产者【实际上就是一个Controller,用来进行消息生产】
*/
@RestController
public class KafkaProducer {
private final static String TOPIC_NAME = "zhTest"; //topic的名称
@Autowired
private KafkaTemplate<String, String> kafkaTemplate;
@RequestMapping("/send")
public void send() {
//发送功能就一行代码~
kafkaTemplate.send(TOPIC_NAME, "key", "test message send~");
}
@PostMapping("/testSend")
@ApiOperation("测试kafka生产者")
public void testSend(){
ViOrderinfoEntity tbo = new ViOrderinfoEntity();
tbo.setId("OI999923");
tbo.setEndareaname("张");
ViOrderinfoEntity tbo1 = new ViOrderinfoEntity();
tbo1.setId("OI999924");
tbo1.setEndareaname("赵");
List<ViOrderinfoEntity> resultList = new ArrayList<>();
resultList.add(tbo);
resultList.add(tbo1);
String string = JSON.toJSONString(resultList);
System.out.println("+++++++++++++++++++++ message = {}");
System.out.println(string);
System.out.println("+++++++++++++++++++++ message = {}");
kafkaTemplate.send("testOrder", string);
}
}
消费者:
/**
* kafka消费者
*/
@Component
public class KafkaConsumer {
//kafka的监听器,topic为"zhTest",消费者组为"zhTestGroup"
@KafkaListener(topics = "zhTest")
public void listenZhugeGroup(ConsumerRecord<String, String> record) {
String value = record.value();
System.out.println(value);
System.out.println(record);
//手动提交offset
ack.acknowledge();
}
@KafkaListener(topics = { "testOrder" }, containerFactory = "kafkaListenerContainerFactory")
public void consumer(List<ConsumerRecord<?, ?>> records) throws Exception {
List<TableView> alarmInfoList = new ArrayList<TableView>();
for (ConsumerRecord<?, ?> record : records) {
String message = record.value().toString();
JSONObject jsonObject = JSONObject.parseObject(message);
System.out.println( jsonObject.getString("id"));
}
}
}