Linux中Kafka集群搭建实战与常用命令
想要了解zookeeper集群搭建实战请移步博文:Linux搭建Zookeeper集群实战
环境准备:
三台服务器:192.168.0.105、192.168.0.106、192.168.0.107(这里为CentOS 7 64位)
JDK:1.8
Zookeeper集群
Kafka:kafka_2.13-2.6.0.tgz
搭建三板斧:
上传、解压、配置server.properties
192.168.0.105
上传kafka并解压
root@cloud_1 bin]# cd /usr/local/
[root@cloud_1 local]# ls
bin etc games include java lib lib64 libexec sbin share src zookeeper
[root@cloud_1 local]# mkdir kafka
[root@cloud_1 local]# cd kafka/
[root@cloud_1 kafka]# ls
kafka_2.13-2.6.0.tgz
[root@cloud_1 kafka]# tar -zxvf kafka_2.13-2.6.0.tgz
[root@cloud_1 kafka]# ls
kafka_2.13-2.6.0 kafka_2.13-2.6.0.tgz
创建kafka储存目录并配置server.properties
[root@cloud_1 kafka]# cd kafka_2.13-2.6.0
[root@cloud_1 kafka_2.13-2.6.0]# ls
bin config libs LICENSE NOTICE site-docs
[root@cloud_1 kafka_2.13-2.6.0]# mkdir kafka-logs
[root@cloud_1 kafka_2.13-2.6.0]# ls
bin config kafka-logs libs LICENSE NOTICE site-docs
root@cloud_1 kafka_2.13-2.6.0]# cd config/
[root@cloud_1 config]# ls
connect-console-sink.properties connect-file-sink.properties connect-mirror-maker.properties log4j.properties tools-log4j.properties
connect-console-source.properties connect-file-source.properties connect-standalone.properties producer.properties trogdor.conf
connect-distributed.properties connect-log4j.properties consumer.properties server.properties zookeeper.properties
[root@cloud_1 config]# vi server.properties
[root@cloud_1 config]# cat server.properties | grep -v '#'
#集群唯一编号,不能重复
broker.id=0
# 端口号
port=9092
# 主机ip或者主机名
host.name=192.168.0.105
# broker处理消息的线程数
num.network.threads=3
# borker处理磁盘io的线程数
num.io.threads=8
# socket发送数据的缓冲区
socket.send.buffer.bytes=102400
# socket接收数据的缓冲区
socket.receive.buffer.bytes=102400
# broker处理数据的最大缓冲区
socket.request.max.bytes=104857600
# kafka存放数据的地址
log.dirs=/usr/local/kafka/kafka_2.13-2.6.0/kafka-logs
# kafka分区
num.partitions=1
# kafka恢复线程数
num.recovery.threads.per.data.dir=1
# 默认副本数
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
# zk集群配置
zookeeper.connect=192.168.0.105:2181,192.168.0.106:2181,192.168.0.107:2181
# zk链接超时的最大时间
zookeeper.connection.timeout.ms=18000
group.initial.rebalance.delay.ms=0
192.168.0.106与107
三台服务器中:server.properties
#集群唯一编号,不能重复
broker.id=0
so...
各自broker.id与host.name有区别:
192.168.0.105 broker.id=0
192.168.0.106 broker.id=1
192.168.0.107 broker.id=2
host.name=192.168.0.105
host.name=192.168.0.106
host.name=192.168.0.107
其余均一致;
三台服务器均开放9092白名单端口方便通信
firewall-cmd --zone=public --add-port=9092/tcp --permanent
firewall-cmd --reload
否则:创建消费者时消费消息失败;
常用操作命令
后台启动kafka
[root@cloud_1 bin]# ./kafka-server-start.sh -daemon ../config/server.properties
查看进程
[root@cloud_1 bin]# jps
2050 QuorumPeerMain
2690 Kafka
2758 Jps
创建主题
[root@cloud_1 bin]# ./kafka-topics.sh --create --zookeeper 192.168.0.105:2181,192.168.0.106:2181,192.168.0.107:2181 --replication-factor 1 --partitions 1 --topic test1
ps:
–create 创建
–delete 删除
–zookeeper 后面参数是zk集群节点
–replication-factor 1 副本数
–partitions 1 分区数
–topic aa 主题名是aa
查看主题
[root@cloud_1 bin]# ./kafka-topics.sh --list --zookeeper 192.168.0.105:2181,192.168.0.106:2181,192.168.0.107:2181
test1
cloud_test_1
查看指定主题
[root@cloud_1 bin]# ./kafka-topics.sh --describe --zookeeper 192.168.0.105:2181,192.168.0.106:2181,192.168.0.107:2181 --topic cloud_test_1
Topic: test1 PartitionCount: 1 ReplicationFactor: 1 Configs:
Topic: test1 Partition: 0 Leader: 3 Replicas: 3 Isr: 3
删除主题
删除主题
[root@cloud_1 bin]# ./kafka-topics.sh --delete --zookeeper 192.168.0.105:2181,192.168.0.106:2181,192.168.0.107:2181 --topic cloud_test_1
创建生产者
[root@cloud_1 bin]# ./kafka-console-producer.sh --broker-list 192.168.0.105:9092,192.168.0.106:9092,192.168.0.107:9092 --topic test1
>
创建消费者
[root@cloud_1 bin]# ./kafka-console-consumer.sh --bootstrap-server 192.168.0.105:9092,192.168.0.106:9092,192.168.0.107:9092 --topic test1 --consumer-property group.id=consumer1 --partition 0 --offset 0
发送消费并通信
生产者
消费者
同分区同组另一个消费者
消息消费失败