#由于Kafka依赖于Zookeeper集群,所以在安装之前需要先部署Zookeeper集群
一.环境说明
#系统:Centos7
#服务版本:Zookeeper-3.7.1 Kafka-3.0.2
#主机ip:node1/10.0.0.100 node2/10.0.0.101 node3/10.0.0.102
二.部署Zookeeper集群
往期博文已经讲过Zookeeper集群的搭建,还未部署请戳-->ZooKeeper集群搭建
三.部署Kafka
1.下载压缩包
#官方下载地址:Index of /dist/kafka
#国内下载地址: Index of /apache/kafka
[root@node1 ~]#wget https://mirrors.tuna.tsinghua.edu.cn/apache/kafka/3.0.2/kafka_2.13-3.0.2.tgz
[root@node2 ~]#wget https://mirrors.tuna.tsinghua.edu.cn/apache/kafka/3.0.2/kafka_2.13-3.0.2.tgz
[root@node3 ~]#wget https://mirrors.tuna.tsinghua.edu.cn/apache/kafka/3.0.2/kafka_2.13-3.0.2.tgz
2.解压
[root@node1 ~]# tar xf kafka_2.13-3.0.2.tgz -C /usr/local/
[root@node2 ~]# tar xf kafka_2.13-3.0.2.tgz -C /usr/local/
[root@node3 ~]# tar xf kafka_2.13-3.0.2.tgz -C /usr/local/
3.创建软连接
[root@node1 ~]#ln -s /usr/local/kafka_2.13-3.0.2/ /usr/local/kafka
[root@node2 ~]#ln -s /usr/local/kafka_2.13-3.0.2/ /usr/local/kafka
[root@node3 ~]#ln -s /usr/local/kafka_2.13-3.0.2/ /usr/local/kafka
4.配置全局变量
[root@node1 ~]#echo 'PATH=/usr/local/kafka/bin:$PATH' > /etc/profile.d/kafka.sh
[root@node2 ~]#echo 'PATH=/usr/local/kafka/bin:$PATH' > /etc/profile.d/kafka.sh
[root@node3 ~]#echo 'PATH=/usr/local/kafka/bin:$PATH' > /etc/profile.d/kafka.sh
[root@node1 ~]#. /etc/profile.d/kafka.sh
[root@node2 ~]#. /etc/profile.d/kafka.sh
[root@node3 ~]#. /etc/profile.d/kafka.sh
5.修改第一个节点的配置文件
[root@node1 ~]#vim /usr/local/kafka/config/server.properties
broker.id=1
#每个broker在集群中每个节点的正整数唯一标识,此值保存在log.dirs下的
meta.properties文件
listeners=PLAINTEXT://10.0.0.104:9092
#指定当前主机的IP做为监听地址
log.dirs=/usr/local/kafka/data
#kakfa用于保存数据的目录,所有的消息都会存储在该目录当中
num.partitions=1
#设置创建新的topic时默认分区数量,建议和kafka的节点数量一致
log.retention.hours=168
#设置kafka中消息保留时间,默认为168小时即7天
zookeeper.connect=10.0.0.100:2181,10.0.0.101:2181,10.0.0.102:2181
#指定连接的zk的地址,zk中存储了broker的元数据信息
zookeeper.connection.timeout.ms=18000
#设置连接zookeeper的超时时间,单位为ms
6.创建数据目录
[root@node1 ~]#mkdir /usr/local/kafka/data
7.将修改过的配置文件远程复制到另外两个节点上
[root@node1 ~]# scp /usr/local/kafka/config/server.properties 10.0.0.101:/usr/local/kafka/config
[root@node1 ~]# scp /usr/local/kafka/config/server.properties 10.0.0.102:/usr/local/kafka/config
8.修改第二个节点的配置文件
[root@node2 ~]#vim /usr/local/kafka/config/server.properties
broker.id=2
listeners=PLAINTEXT://10.0.0.101:9092
9.修改第三个节点的配置文件
[root@node3 ~]#vim /usr/local/kafka/config/server.properties
broker.id=3
listeners=PLAINTEXT://10.0.0.102:9092
10.启动服务
[root@node1 ~]#kafka-server-start.sh -daemon /usr/local/kafka/config/server.properties
[root@node2 ~]#kafka-server-start.sh -daemon /usr/local/kafka/config/server.properties
[root@node3 ~]#kafka-server-start.sh -daemon /usr/local/kafka/config/server.properties
11.验证服务状态
[root@node1 ~]#ss -ntl | grep 9092
[root@node2 ~]#ss -ntl | grep 9092
[root@node3 ~]#ss -ntl | grep 9092
12.配置service文件
vim /lib/systemd/system/kafka.service
[Unit]
Description=Apache Kafka server (broker)
After=network.target zookeeper.service
[Service]
Type=simple
##系统环境变量:echo $PATH
Environment="PATH=/usr/java/jdk1.8.0_281/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/usr/local/kafka/bin:"
User=root
Group=root
##kafka启动运行的脚本指定配置文件
ExecStart=/usr/local/kafka/bin/kafka-server-start.sh
/usr/local/kafka/config/server.properties
ExecStop=/usr/local/kafka/bin/kafka-server-stop.sh
Restart=on-failure
[Install]
WantedBy=multi-user.target
13.重载配置并重启服务
systemctl daemon-load
systemctl restart kafka.service