Kafka命令
- 启动ZooKeeper服务器
bin/zkServer.sh start - 启动CLI
bin/zkCli.sh - 停止Zookeeper服务器
bin/zkServer.sh stop - 启动kafka服务器
bin/kafka-server-start.sh config/server.properties - 停止kafka服务器
bin/kafka-server-stop.sh config/server.properties - 创建 Kafka 主题
- 单节点单代理配置,创建了一个名为 Hello-Kafka 的主题,其中包含一个分区和一个副本因子
bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic Hello-Kafka - 单节点多代理配置,让我们为此主题将复制因子值指定为三个,因为我们有三个不同的代理运行
bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 3 --partitions 1 --topic Multibrokerapplication
- 主题列表
bin/kafka-topics.sh --list --zookeeper localhost:2181 - 启动生产者以发送消息
bin/kafka-console-producer.sh --broker-list localhost:9092 --topic Hello-Kafka - 启动消费者以接收消息
bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic Hello-Kafka --from-beginning
bin/kafka-topics.sh --describe --zookeeper localhost:2181 --topic Multibrokerapplication
windows 下命令
-
启动ZooKeeper服务器
D:\software\kafka\kafka_2.13-2.6.0\bin\windows\zookeeper-server-start.bat D:\software\kafka\kafka_2.13-2.6.0\config\zookeeper.properties -
启动kafka服务器
D:\software\kafka\kafka_2.13-2.6.0\bin\windows\kafka-server-start.bat D:\software\kafka\kafka_2.13-2.6.0\config\server.properties -
主题列表
.\kafka-topics.bat --list --zookeeper localhost:2181 -
启动生产者以发送消息
.\kafka-console-producer.bat --broker-list localhost:9092 --topic Hello-Kafka -
启动消费者以接收消息
.\kafka-console-consumer.bat --bootstrap-server localhost:9092 --topic Hello-Kafka --from-beginning -
获取Offset
.\kafka-run-class.bat kafka.tools.GetOffsetShell --broker-list ip1:port2,ip2:port
java 配置
- 类:org.apache.kafka.clients.consumer.ConsumerConfig
属性 | 配置 | 说明 |
---|---|---|
BOOTSTRAP_SERVERS_CONFIG | bootstrap.servers | kafka服务 |
GROUP_ID_CONFIG | group.id | 消费组 |
AUTO_OFFSET_RESET_CONFIG | auto.offset.reset | 消费位置配置 |
ENABLE_AUTO_COMMIT_CONFIG | enable.auto.commit | 自动提交,标记该消息消费完成 |
AUTO_COMMIT_INTERVAL_MS_CONFIG | auto.commit.interval.ms | 自动提交Offset时间间隔 |
SESSION_TIMEOUT_MS_CONFIG | session.timeout.ms | 心跳线程来同步服务端,告诉服务端自己是正常可用的(毫秒) |
MAX_POLL_RECORDS_CONFIG | max.poll.records | 一次调用poll()操作时返回的最大记录数,批量消费每次最多消费多少条 |
MAX_POLL_INTERVAL_MS_CONFIG | max.poll.interval.ms | poll 的最大间隔时间(毫秒) |
auto.offset.reset
- earliest
当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,从头开始消费 - latest
当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 - none
topic各分区都存在已提交的offset时,从offset后开始消费;只要有一个分区不存在已提交的offset,则抛出异常
- 类: ConcurrentKafkaListenerContainerFactory
配置 | 示例 | 说明 |
---|---|---|
setConsumerFactory | DefaultKafkaConsumerFactory | 设置消费者工厂 |
setAutoStartup | false | 是否自动启动 |
setConcurrency | 4 | 消费线程数 |
setBatchListener | true | 是否批量消费 |
getContainerProperties().setPollTimeout() | 600000 | 拉取topic的超时时间(毫秒) |
代码
消费者工厂和配置
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;
import java.util.HashMap;
import java.util.Map;
/**
* @author liutao
* @description 创建kafka消费者工厂和配置
* @param
* @return
*/
@Configuration
@EnableKafka
public class KafkaConsumerConfig {
@Value("${kafka.consumer.group-id}") //1
private String groupId;
@Value("${kafka.consumer.auto-offset-reset}") //1
private String autoOffsetReset;
@Value("${kafka.consumer.bootstrap-servers}") //1
private String bootstrapServers;
@Value("${kafka.consumer.enable-auto-commit}") //1
private Boolean autoCommit;
@Value("${kafka.consumer.auto-commit-interval}") //1
private Integer autoCommitInterval;
@Value("${kafka.consumer.max-poll-records}") //1
private Integer maxPollRecords;
@Value("${kafka.consumer.session-timeout}")
private String sessionTimeout;
@Value("${kafka.consumer.concurrency}")
private int concurrency;
@Value("${kafka.authentication.enable}")
private boolean authEnable;
@Value("${kafka.ssl.username}")
private String userName;
@Value("${kafka.ssl.password}")
private String pwd;
@Bean
public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory();
factory.setConsumerFactory(consumerFactory());
//不自动启动
factory.setAutoStartup(false);
// factory.setConcurrency(concurrency);
factory.setBatchListener(true);
factory.getContainerProperties().setPollTimeout(600000); //每十分钟执行一次
return factory;
}
//消费者创建工厂
public ConsumerFactory<String, String> consumerFactory() {
return new DefaultKafkaConsumerFactory(consumerConfigs());
}
//消费者配置信息
public Map<String, Object> consumerConfigs() {
Map<String, Object> props = new HashMap<>();
props.put("sasl.jaas.config",
"org.apache.kafka.common.security.scram.ScramLoginModule required username=\""+userName+"\" password=\""+pwd+"\";");
props.put("security.protocol", "SASL_PLAINTEXT");
props.put("sasl.mechanism", "SCRAM-SHA-256");
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, autoCommit);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout);
props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords);
props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, autoCommitInterval);
return props;
}
}
消费调用Controller
import java.util.*;
import com.unicom.microserv.ai.semantic.service.*;
import com.unicom.microserv.ai.semantic.util.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.config.KafkaListenerEndpointRegistry;
import org.springframework.kafka.listener.MessageListenerContainer;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Component;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.ResponseBody;
import org.springframework.web.bind.annotation.RestController;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiOperation;
/**
* @author liutao
* @description kafka存储数据
*/
@RestController
@Api(value = "KafkaData", description = "kafka读取数据服务")
@Component
@RequestMapping("/ai/semantictagging/msa/kafkadata")
public class KafkaDataController {
private static Logger logger = LoggerFactory.getLogger(KafkaDataController.class);
@Autowired
private KafkaListenerEndpointRegistry registry;
@Autowired
private KafkaConsumerService kafkaConsumerService;
private final String topics = "test-topics";
/**
* 启动kafka监听
*/
@GetMapping("/start")
@ResponseBody
public void startListener() {
logger.info("开启监听");
MessageListenerContainer container = registry.getListenerContainer(topics);
if (!container.isRunning()) {
container.start();
}
}
/**
* 关闭kafka监听
*/
@GetMapping("/end")
@ResponseBody
public void shutdownListener() {
logger.info("关闭监听");
//暂停
MessageListenerContainer container = registry.getListenerContainer(topics);
container.stop();
//插入到kafka
//insertKafkaData();
}
/**
* 将kafka数据读取并存储到本地
* @param record kafka数据
* @throws Exception
*/
@ApiOperation(value = "读取kafka数据插入数据库", notes = "读取kafka数据插入数据库")
@KafkaListener(id = topics, topics = {topics}, containerFactory = "kafkaListenerContainerFactory")
public void readKafkaData(String record){
try {
//logger.info("kafkaDataCount --------->:" + kafkaDataCount);
//logger.info("record --------->:" + record);
kafkaConsumerService.dealRecord (record);
} catch (Exception e) {
logger.error(e.getMessage());
}
}
/**
* liutao
* 20210616
* 消费kafka数据,过滤并存入oss
* @param consumerRecords kafka数据
* @throws Exception
*/
@ApiOperation(value = "消费kafka数据,过滤并存入oss", notes = "消费kafka数据,过滤并存入oss")
@KafkaListener(id = "${kafka.consumer.topics}", topics = {"${kafka.consumer.topics}"},
containerFactory = "kafkaListenerContainerFactory")
public void readKafkaData(List<ConsumerRecord> consumerRecords){
//打印当前偏移量
// ConsumerRecord record = consumerRecords.get(0);
// logger.info("kafka record ========= topic:{} partition:{} offset:{},size{}",
// record.topic(), record.partition(), record.offset(),consumerRecords.size());
kafkaConsumerService.dealRecord(consumerRecords);
}
}