下载 Zookeeper
wget https://dlcdn.apache.org/zookeeper/zookeeper-3.8.1/apache-zookeeper-3.8.1-bin.tar.gz --no-check-certificate
注意还需要java环境
配置
mkdir conf data/{zookeeper-node1,zookeeper-node2,zookeeper-node3}/data -p
# zookeeper 主配置文件
cat >conf/zoo.cfg<<EOF
# tickTime:Zookeeper 服务器之间或客户端与服务器之间维持心跳的时间间隔,也就是每个 tickTime 时间就会发送一个心跳。tickTime以毫秒为单位。session最小有效时间为tickTime*2
tickTime=2000
# Zookeeper保存数据的目录,默认情况下,Zookeeper将写数据的日志文件也保存在这个目录里。不要使用/tmp目录
dataDir=/opt/apache/zookeeper/data
# 端口,默认就是2181
clientPort=2181
# 集群中的follower服务器(F)与leader服务器(L)之间初始连接时能容忍的最多心跳数(tickTime的数量),超过此数量没有回复会断开链接
initLimit=10
# 集群中的follower服务器与leader服务器之间请求和应答之间能容忍的最多心跳数(tickTime的数量)
syncLimit=5
# 最大客户端链接数量,0不限制,默认是0
maxClientCnxns=60
# zookeeper集群配置项,server.1,server.2,server.3是zk集群节点;zookeeper-node1,zookeeper-node2,zookeeper-node3是主机名称;2888是主从通信端口;3888用来选举leader
server.1=zookeeper-node1:2888:3888
server.2=zookeeper-node2:2888:3888
server.3=zookeeper-node3:2888:3888
EOF
# 在刚创建好的zk data数据目录下面创建一个文件 myid
# 里面内容是server.N中的N,会通过挂载的方式添加
echo 1 > ./data/zookeeper-node1/data/myid
echo 2 > ./data/zookeeper-node2/data/myid
echo 3 > ./data/zookeeper-node3/data/myid
启动脚本 bootstrap.sh
#!/usr/bin/env sh
${ZOOKEEPER_HOME}/bin/zkServer.sh start
tail -f ${ZOOKEEPER_HOME}/logs/*.out
构建镜像 Dockerfile
FROM registry.cn-hangzhou.aliyuncs.com/bigdata_cloudnative/centos:7.7.1908
RUN rm -f /etc/localtime && ln -sv /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && echo "Asia/Shanghai" > /etc/timezone
RUN export LANG=zh_CN.UTF-8
# 创建用户和用户组,跟yaml编排里的user: 10000:10000
RUN groupadd --system --gid=10000 hadoop && useradd --system --home-dir /home/hadoop --uid=10000 --gid=hadoop hadoop -m
# 安装sudo
RUN yum -y install sudo ; chmod 640 /etc/sudoers
# 给hadoop添加sudo权限
RUN echo "hadoop ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
RUN yum -y install install net-tools telnet wget nc less
RUN mkdir /opt/apache/
# 添加配置 JDK
ADD jdk-8u212-linux-x64.tar.gz /opt/apache/
ENV JAVA_HOME /opt/apache/jdk1.8.0_212
ENV PATH $JAVA_HOME/bin:$PATH
# 添加配置 trino server
ENV ZOOKEEPER_VERSION 3.8.1
ADD apache-zookeeper-${ZOOKEEPER_VERSION}-bin.tar.gz /opt/apache/
ENV ZOOKEEPER_HOME /opt/apache/zookeeper
RUN ln -s /opt/apache/apache-zookeeper-${ZOOKEEPER_VERSION}-bin $ZOOKEEPER_HOME
# 创建数据存储目录
RUN mkdir ${ZOOKEEPER_HOME}/data
# copy 配置文件
RUN cp ${ZOOKEEPER_HOME}/conf/zoo_sample.cfg ${ZOOKEEPER_HOME}/conf/zoo.cfg
# 这里的值会根据挂载的而修改
RUN echo 1 >${ZOOKEEPER_HOME}/data/myid
# copy bootstrap.sh
COPY bootstrap.sh /opt/apache/
RUN chmod +x /opt/apache/bootstrap.sh
RUN chown -R hadoop:hadoop /opt/apache
WORKDIR $ZOOKEEPER_HOME
开始构建镜像
docker build -t registry.cn-hangzhou.aliyuncs.com/bigdata_cloudnative/zookeeper:3.8.1 . --no-cache
创建网络
# 创建,注意不能使用hadoop_network,要不然启动hs2服务的时候会有问题!!!
docker network create hadoop-network
# 查看
docker network ls
编排 docker-compose.yaml
version: '3'
services:
zookeeper-node1:
image: registry.cn-hangzhou.aliyuncs.com/bigdata_cloudnative/zookeeper:3.8.1
user: "hadoop:hadoop"
container_name: zookeeper-node1
hostname: zookeeper-node1
restart: always
environment:
- TZ=Asia/Shanghai
- privileged=true
env_file:
- .env
volumes:
- ./conf/zoo.cfg:${ZOOKEEPER_HOME}/conf/zoo.cfg
- ./data/zookeeper-node1/data/myid:${ZOOKEEPER_HOME}/data/myid
ports:
- "${ZOOKEEPER_NODE1_SERVER_PORT}:2181"
expose:
- 2888
- 3888
command: ["sh","-c","/opt/apache/bootstrap.sh"]
networks:
- hadoop-network
healthcheck:
test: ["CMD-SHELL", "netstat -tnlp|grep :2181 || exit 1"]
interval: 10s
timeout: 10s
retries: 5
zookeeper-node2:
image: registry.cn-hangzhou.aliyuncs.com/bigdata_cloudnative/zookeeper:3.8.1
user: "hadoop:hadoop"
container_name: zookeeper-node2
hostname: zookeeper-node2
restart: always
environment:
- TZ=Asia/Shanghai
- privileged=true
env_file:
- .env
volumes:
- ./conf/zoo.cfg:${ZOOKEEPER_HOME}/conf/zoo.cfg
- ./data/zookeeper-node2/data/myid:${ZOOKEEPER_HOME}/data/myid
ports:
- "${ZOOKEEPER_NODE2_SERVER_PORT}:2181"
expose:
- 2888
- 3888
command: ["sh","-c","/opt/apache/bootstrap.sh"]
networks:
- hadoop-network
healthcheck:
test: ["CMD-SHELL", "netstat -tnlp|grep :2181 || exit 1"]
interval: 10s
timeout: 10s
retries: 5
zookeeper-node3:
image: registry.cn-hangzhou.aliyuncs.com/bigdata_cloudnative/zookeeper:3.8.1
user: "hadoop:hadoop"
container_name: zookeeper-node3
hostname: zookeeper-node3
restart: always
environment:
- TZ=Asia/Shanghai
- privileged=true
env_file:
- .env
volumes:
- ./conf/zoo.cfg:${ZOOKEEPER_HOME}/conf/zoo.cfg
- ./data/zookeeper-node3/data/myid:${ZOOKEEPER_HOME}/data/myid
ports:
- "${ZOOKEEPER_NODE3_SERVER_PORT}:2181"
expose:
- 2888
- 3888
command: ["sh","-c","/opt/apache/bootstrap.sh"]
networks:
- hadoop-network
healthcheck:
test: ["CMD-SHELL", "netstat -tnlp|grep :2181 || exit 1"]
interval: 10s
timeout: 10s
retries: 5
# 连接外部网络
networks:
hadoop-network:
external: true
.ENV 环境变量文件内容如下:
# 对外暴露的端口
cat << EOF > .env
ZOOKEEPER_HOME=/opt/apache/zookeeper
ZOOKEEPER_NODE1_SERVER_PORT=31181
ZOOKEEPER_NODE2_SERVER_PORT=32181
ZOOKEEPER_NODE3_SERVER_PORT=33181
EOF
开始部署
docker-compose -f docker-compose.yaml up -d
# 查看
docker-compose -f docker-compose.yaml ps
测试验证
# 检查节点
docker exec -it zookeeper-node1 bash
${ZOOKEEPER_HOME}/bin/zkServer.sh status
exit
docker exec -it zookeeper-node2 bash
${ZOOKEEPER_HOME}/bin/zkServer.sh status
exit
docker exec -it zookeeper-node3 bash
${ZOOKEEPER_HOME}/bin/zkServer.sh status
常用的 zookeeper 客户端命令
创建节点
# 随便登录一个容器节点
docker exec -it zookeeper-node1 bash
# 登录
${ZOOKEEPER_HOME}/bin/zkCli.sh -server zookeeper-node1:2181
# 【持久节点】数据节点创建后,一直存在,直到有删除操作主动清除,示例如下:
create /zk-node data
# 【持久顺序节点】节点一直存在,zk自动追加数字后缀做节点名,后缀上限 MAX(int),示例如下:
create -s /zk-node data
# 【临时节点】生命周期和会话相同,客户端会话失效,则临时节点被清除,示例如下:
create -e /zk-node-temp data
# 【临时顺序节点】临时节点+顺序节点后缀,示例如下:
create -s -e /zk-node-temp data
查看节点
# 随便登录一个容器节点
docker exec -it zookeeper-node1 bash
# 登录
${ZOOKEEPER_HOME}/bin/zkCli.sh -server zookeeper-node1:2181
# 列出zk执行节点的所有子节点,只能看到第一级子节点
ls /
# 获取zk指定节点数据内容和属性
get /zk-node
更新节点
# 表达式:set ${path} ${data} [version]
set /zk-node hello
get /zk-node
删除节点
# 对于包含子节点的节点,该命令无法成功删除,使用deleteall /zk-node
delete /zk-node
# 删除非空目录
deleteall /zk-node
退出交互式
#帮助
help
# 退出
quit
非交互式命令
# 直接后面接上命令执行即可
${ZOOKEEPER_HOME}/bin/zkCli.sh -server zookeeper-node1:2181 ls /