# zookeeper搭建
1.准备三台机器(安装JDK):
192.168.110.21 kafka-01
192.168.110.22 kafka-02
192.168.110.23 kafka-03
2.下载zookeeper安装包
cd /opt/module
# 下载zookeeper安装包
wget http://archive.apache.org/dist/zookeeper/stable/apache-zookeeper-3.6.3-bin.tar.gz
3.解压后进入目录修改配置文件
cd ./zookeeper/conf
# 添加zookeeper配置⽂件
cp zoo_sample.cfg zoo.cfg
# 创建数据存放⽬录
mkdir /opt/module/zookeeper/conf/data
vim zoo.cfg
# 添加如下内容
server.1=192.168.110.21:2188:2888
server.2=192.168.110.22:2188:2888
server.3=192.168.110.23:2188:2888
# 修改dataDir
dataDir=/opt/module/zookeeper/conf/data
# 配置⽂件保存退出后,进⼊data⽬录
cd ../data
# ⽣成myid⽂件,指定myid服务号
echo "1" > myid
4.
将zookeeper⽬录分发到其他节点
# 分发到其他集群节点
scp -r zookeeper/ kafka-02:/opt/module
scp -r zookeeper/ kafka-03:/opt/module
5.
修改其他节点的myid⽂件
# 登录kafka-02
cd /opt/module/zookeeper/conf/data
# 指定myid服务号为 2
echo 2 > myid
# 登录kafka-03
cd /opt/module/zookeeper/conf/data
# 指定myid服务号为 3
echo 3 > myid
6.
编写操作zookeeper集群的脚本
cd /opt/module/zookeeper/bin
# 创建zookeeper启动脚本
vim zk.sh
# 添加如下内容
#!/bin/bash
case $1 in
"start"){
for i in 192.168.110.21 192.168.110.22 192.168.110.23
do
echo -------------------------------- $i zookeeper 启动 -----------------
----------
ssh $i "/opt/module/zookeeper/bin/zkServer.sh start"
done
}
;;
"stop"){
for i in 192.168.110.21 192.168.110.22 192.168.110.23
do
echo -------------------------------- $i zookeeper 停⽌ -----------------
----------
ssh $i "/opt/module/zookeeper/bin/zkServer.sh stop"
done
}
;;
"status"){
for i in 192.168.110.21 192.168.110.22 192.168.110.23
do
echo -------------------------------- $i zookeeper 状态 -----------------
----------
ssh $i "/opt/module/zookeeper/bin/zkServer.sh status"
done
}
;;
esac
# 保存退出后,修改zk.sh脚本执⾏权限
chmod +x ./zk.sh
脚本的命令说明:
# 启动集群命令
./zk.sh start
# 停⽌集群命令
./zk.sh stop
# 查看集群状态命令
./zk.sh status
7.
启动集群
# 启动zookeeper集群
cd /opt/module/zookeeper
./bin/zk.sh start
8.
连接zookeeper集群
# 连接zookeeper集群
cd /opt/module/zookeeper
./bin/zkCli.sh
9.
创建zookeeper开机⾃启
vi /etc/rc.d/init.d/zk_start.sh
#!/bin/bash
#chkconfig:2345 20 90
#description:zookeeper
#processname:zookeeper
/opt/module/zookeeper/bin/zk.sh start
chkconfig --add zk.sh
# Kafka搭建
1.
下载安装包
cd /opt/module
# 下载kafka安装包
wget https://archive.apache.org/dist/kafka/2.6.0/kafka_2.13-2.6.0.tgz
2.
解压
# 解压kafka安装包
tar -zxvf kafka_2.13-2.6.0.tgz
mv kafka_2.13-2.6.0 kafka
3.
创建kafka消息⽬录
cd kafka
mkdir kafka-logs
4.
修改配置⽂件
vim /opt/module/kafka/config/server.properties
# 修改如下参数
broker.id=0
listeners=PLAINTEXT://192.168.110.21:9092
log.dirs=/opt/module/kafka/kafka-logs
zookeeper.connect=192.168.110.21:2181,192.168.110.22:2181,192.168.110.23:
2181
参数说明:
broker.id : 集群内全局唯⼀标识,每个节点上需要设置不同的值
listeners:这个IP地址也是与本机相关的,每个节点上设置为⾃⼰的IP地址
log.dirs :存放kafka消息的
zookeeper.connect : 配置的是zookeeper集群地址
5.
分发kafka集群操作脚本
# 分发kafka安装⽬录给其他集群节点
scp -r /opt/module/kafka/ kafka-02:/opt/module
scp -r /opt/module/kafka/ kafka-03:/opt/module
分发完成后,其他集群节点都需要修改配置⽂件server.properties中的 broker.id 和listeners 参数。
6.
编写kafka集群操作脚本
# 导⼊java环境
vim /etc/profile
# 添加如下内容(注意:填写⾃⼰的java安装⽬录)
export JAVA_HOME=/usr/java/jdk1.8.0_131
export
CLASSPATH=.:${JAVA_HOME}/jre/lib/rt.jar:${JAVA_HOME}/lib/dt.jar:${JAVA_HO
ME}/lib/tools.jar
export PATH=$PATH:$JAVA_HOME/bin
cd /opt/module/kafka/bin
# 创建kafka启动脚本
vim kafka-cluster.sh
# 添加如下内容
#!/bin/bash
case $1 in
"start"){
for i in 192.168.110.21 192.168.110.22 192.168.110.23
do
echo -------------------------------- $i kafka 启动 ----------------
-----------
ssh $i "source /etc/profile;/opt/module/kafka/bin/kafka-serverstart.sh -daemon /opt/module/kafka/config/server.properties"
done
}
;;
"stop"){
for i in 192.168.110.21 192.168.110.22 192.168.110.23
do
echo -------------------------------- $i kafka 停⽌ -----------------
----------
ssh $i "/opt/module/kafka/bin/kafka-server-stop.sh"
done
}
;;
esac
# 保存退出后,修改执⾏权限
chmod +x ./kafka-cluster.sh
脚本命令说明:
启动kafka集群命令
./kafka-cluster.sh start
停⽌kafka集群命令
./kafka-cluster.sh stop
7.
启动kafka集群
⾸先启动zookeeper集群
然后执⾏kafka集群脚本启动命令
cd /opt/module/kafka/bin
./kafka-cluster.sh start
8.
添加开机⾃启
/etc/rc.d/init.d/kafka-cluster.sh
#!/bin/bash
#chkconfig:2345 20 90
#description:kafka
#processname:kafka
/opt/module/kafka/bin/kafka-cluster.sh start
chkconfig --add zk.sh