k8s部署zookeeper集群(3节点,1个leader,2个follower)

前言

环境:centos 7.9 k8s集群
在k8s上面安装zookeeper集群,我们还是按照k8s的官方文档来安装吧,这样比较好,网上有各种各样的安装方式,这里使用https://kubernetes.io/docs/tutorials/stateful-application/zookeeper/k8s的官方文档来安装。

使用k8s官方安装文档安装zookeeper集群

#下载k8s官网的zk的yaml文件,也可以自己去https://kubernetes.io/docs/tutorials/stateful-application/zookeeper/上面自己查看
 wget https://raw.githubusercontent.com/kubernetes/website/main/content/en/examples/application/zookeeper/zookeeper.yaml

#查看zookeeper.yaml
cat zookeeper.yaml文件会发现有一个PodDisruptionBudget资源,需要限定k8s集群中zookeeper集群最小pod数,通过PDB定义最大失效数;
还有官网的镜像下载不下来,可以拉取下面的这个镜像,然后再大标签即可:
docker pull mirrorgooglecontainers/kubernetes-zookeeper:1.0-3.4.10
docker tag mirrorgooglecontainers/kubernetes-zookeeper:1.0-3.4.10 registry.k8s.io/kubernetes-zookeeper:1.0-3.4.10
docker rmi mirrorgooglecontainers/kubernetes-zookeeper:1.0-3.4.10
#编辑zookeeper.yaml文件,我们需要根据我们实际情况做一下修改
vim zookeeper.yaml
apiVersion: v1
kind: Service				#创建了一个无头server,名称叫zk-hs,目标端口是2888和3888
metadata:
  name: zk-hs
  labels:
    app: zk
spec:
  ports:
  - port: 2888
    name: server
  - port: 3888
    name: leader-election
  clusterIP: None
  selector:
    app: zk
---
apiVersion: v1
kind: Service			#还创建了一个client的service
metadata:
  name: zk-cs
  labels:
    app: zk
spec:
  ports:
  - port: 2181
    name: client
  selector:
    app: zk
---
apiVersion: policy/v1
kind: PodDisruptionBudget	#定义PodDisruptionBudget资源
metadata:
  name: zk-pdb
spec:
  selector:
    matchLabels:
      app: zk
  maxUnavailable: 1			#最大不可用数1
---
apiVersion: apps/v1			#使用sts启动部署的zk集群
kind: StatefulSet	
metadata:
  name: zk
spec:
  selector:
    matchLabels:
      app: zk
  serviceName: zk-hs
  replicas: 3				#3个副本
  updateStrategy:
    type: RollingUpdate
  podManagementPolicy: OrderedReady
  template:
    metadata:
      labels:
        app: zk
    spec:
#      affinity:				#这里pod反亲和性被我注释掉了,因为我只有2个node节点
#        podAntiAffinity:
#          requiredDuringSchedulingIgnoredDuringExecution:
#            - labelSelector:
#                matchExpressions:
#                  - key: "app"
#                    operator: In
#                    values:
#                    - zk
#              topologyKey: "kubernetes.io/hostname"
      containers:
      - name: kubernetes-zookeeper
        imagePullPolicy: IfNotPresent		#镜像拉取策略原来是Always,改为IfNotPresent
        image: "registry.k8s.io/kubernetes-zookeeper:1.0-3.4.10"	#这个镜像拉取不下来,等下我们换个镜像
        resources:
          requests:
            memory: "300M"					#这里原本是1Gi的,被我改成300M
            cpu: "0.5"
        ports:
        - containerPort: 2181
          name: client
        - containerPort: 2888
          name: server
        - containerPort: 3888
          name: leader-election
        command:
        - sh
        - -c
        - "start-zookeeper \
          --servers=3 \
          --data_dir=/var/lib/zookeeper/data \
          --data_log_dir=/var/lib/zookeeper/data/log \
          --conf_dir=/opt/zookeeper/conf \
          --client_port=2181 \
          --election_port=3888 \
          --server_port=2888 \
          --tick_time=2000 \
          --init_limit=10 \
          --sync_limit=5 \
          --heap=512M \
          --max_client_cnxns=60 \
          --snap_retain_count=3 \
          --purge_interval=12 \
          --max_session_timeout=40000 \
          --min_session_timeout=4000 \
          --log_level=INFO"
        readinessProbe:
          exec:
            command:
            - sh
            - -c
            - "zookeeper-ready 2181"
          initialDelaySeconds: 10
          timeoutSeconds: 5
        livenessProbe:
          exec:
            command:
            - sh
            - -c
            - "zookeeper-ready 2181"
          initialDelaySeconds: 10
          timeoutSeconds: 5
        volumeMounts:
        - name: datadir
          mountPath: /var/lib/zookeeper
      securityContext:
        runAsUser: 1000
        fsGroup: 1000
  volumeClaimTemplates:
  - metadata:
      name: datadir
    spec:
      accessModes: [ "ReadWriteOnce" ]
      resources:
        requests:
          storage: 500M							#这里原来是10G的,被我改成500M
      storageClassName: "nfs-storageclass"		#这句是我新加的,指定使用的存储类
[root@matser zookeeper-cluster]# 

#部署zookeeper集群
kubectl  apply  -f zookeeper.yaml 
#查看状态,均是running
kubectl  get pods -l app=zk
NAME   READY   STATUS    RESTARTS   AGE
zk-0   1/1     Running   0          6m54s
zk-1   1/1     Running   0          6m31s
zk-2   1/1     Running   0          6m19s

验证zookeeper是否正常

#先来看下3个zookeeper的pod完整主机名是什么
[root@matser /]# for i in 0 1 2;do kubectl exec zk-$i -n default -- hostname -f;done
zk-0.zk-hs.default.svc.cluster.local
zk-1.zk-hs.default.svc.cluster.local
zk-2.zk-hs.default.svc.cluster.local
[root@matser /]# 
#我们发现,在其他pod直接ping zk-0这个pod的主机名,可以ping通
[root@matser /]# kubectl  exec -it deployment-busybox-567674bd67-lklvf -- ping zk-0.zk-hs.default.svc.cluster.local
PING zk-0.zk-hs.default.svc.cluster.local (10.244.166.134): 56 data bytes	#
64 bytes from 10.244.166.134: seq=0 ttl=63 time=0.071 ms
[root@matser /]# kubectl  get pods -o wide
NAME         READY   STATUS    RESTARTS      AGE     IP        		  NODE    NOMINATED NODE   READINESS GATES

zk-0         1/1     Running   0             36m     10.244.166.134   node1   <none>           <none>
[root@matser /]# 
#查看3个zookeeper节点的角色
[root@matser /]# for i in 0 1 2;do kubectl exec zk-$i -n default -- zkServer.sh status;done
ZooKeeper JMX enabled by default
Using config: /usr/bin/../etc/zookeeper/zoo.cfg
Mode: follower
ZooKeeper JMX enabled by default
Using config: /usr/bin/../etc/zookeeper/zoo.cfg
Mode: follower
ZooKeeper JMX enabled by default
Using config: /usr/bin/../etc/zookeeper/zoo.cfg
Mode: leader
[root@matser /]#

#查看myid
[root@matser /]# for i in 0 1 2;do echo -n "zk-$i " ;kubectl exec zk-$i -n default -- cat /var/lib/zookeeper/data/myid;done
zk-0 1
zk-1 2
zk-2 3
#查看zookeeper的配置文件
[root@matser /]# kubectl exec -it -n default  zk-0 -- cat /opt/zookeeper/conf/zoo.cfg
#This file was autogenerated DO NOT EDIT
clientPort=2181
dataDir=/var/lib/zookeeper/data
dataLogDir=/var/lib/zookeeper/data/log
tickTime=2000
initLimit=10
syncLimit=5
maxClientCnxns=60
minSessionTimeout=4000
maxSessionTimeout=40000
autopurge.snapRetainCount=3
autopurge.purgeInteval=12
server.1=zk-0.zk-hs.default.svc.cluster.local:2888:3888
server.2=zk-1.zk-hs.default.svc.cluster.local:2888:3888
server.3=zk-2.zk-hs.default.svc.cluster.local:2888:3888
[root@matser /]# 

验证zookeeper集群可用性

#进入容器
kubectl exec -it -n default  zk-0 -- bash	
zookeeper@zk-0:/$ zkCli.sh									#登录,直接回车	
[zk: localhost:2181(CONNECTED) 11] create /zk-test hdfdf	#创建一个节点并写入数据
[zk: localhost:2181(CONNECTED) 11] get  /zk-test        	#查看节点
hdfdf				#这是数据
cZxid = 0x100000003
ctime = Wed Nov 09 10:55:15 UTC 2022
mZxid = 0x100000003
mtime = Wed Nov 09 10:55:15 UTC 2022
pZxid = 0x100000003
cversion = 0
dataVersion = 0
aclVersion = 0
ephemeralOwner = 0x0
dataLength = 5
numChildren = 0
[zk: localhost:2181(CONNE
#登录其他zk节点,能看到我们上面创建的节点数据,说明zk集群是正常的
kubectl exec -it -n default  zk-1 -- bash	
zkCli.sh
[zk: localhost:2181(CONNECTED) 0] get /zk-test
hdfdf
cZxid = 0x100000003
ctime = Wed Nov 09 10:55:15 UTC 2022
mZxid = 0x100000003
mtime = Wed Nov 09 10:55:15 UTC 2022
pZxid = 0x100000003
cversion = 0
dataVersion = 0
aclVersion = 0
ephemeralOwner = 0x0
dataLength = 5
numChildren = 0
[zk: localhost:2181(CONNECTED) 1] 	

删掉pod,模拟pod挂掉验证集群是否正常

[root@matser /]# kubectl  delete  pods zk-2 zk-0
pod "zk-2" deleted
pod "zk-0" deleted
[root@matser /]# kubectl  get pods -l app=zk
NAME   READY   STATUS    RESTARTS   AGE
zk-0   1/1     Running   0          25s
zk-1   1/1     Running   0          63m
zk-2   0/1     Running   0          4s

#验证集群状态(正常)
[root@matser /]# for i in 0 1 2;do kubectl exec zk-$i -n default -- zkServer.sh status;done
ZooKeeper JMX enabled by default
Using config: /usr/bin/../etc/zookeeper/zoo.cfg
Mode: follower
ZooKeeper JMX enabled by default
Using config: /usr/bin/../etc/zookeeper/zoo.cfg
Mode: leader
ZooKeeper JMX enabled by default
Using config: /usr/bin/../etc/zookeeper/zoo.cfg
Mode: follower
[root@matser /]# 

#验证我们上面创建的节点是否还存在,数据是否还存在
kubectl exec -it -n default  zk-2 -- bash	#进入pod的容器
zookeeper@zk-2:/$ zkCli.sh					#登录zk集群
[zk: localhost:2181(CONNECTED) 0] ls /		#节点还在
[zk-test, zookeeper]
[zk: localhost:2181(CONNECTED) 1] get /zk-test		#数据也在
hdfdf
cZxid = 0x100000003
ctime = Wed Nov 09 10:55:15 UTC 2022
mZxid = 0x100000003
mtime = Wed Nov 09 10:55:15 UTC 2022
pZxid = 0x100000003
cversion = 0
dataVersion = 0
aclVersion = 0
ephemeralOwner = 0x0
dataLength = 5
numChildren = 0
[zk: localhost:2181(CONNECTED) 2] 
  • 1
    点赞
  • 25
    收藏
    觉得还不错? 一键收藏
  • 3
    评论
首先,你需要一个 Kubernetes 集群,如果没有的话,可以使用 Minikube 或者其他 Kubernetes 集群搭建方案来搭建一个本地的 Kubernetes 集群。 接下来,你需要创建一个 ZooKeeper 的 Deployment 和 Service: ```yaml apiVersion: apps/v1 kind: Deployment metadata: name: zookeeper spec: replicas: 1 selector: matchLabels: app: zookeeper template: metadata: labels: app: zookeeper spec: containers: - name: zookeeper image: zookeeper:3.6.2 ports: - containerPort: 2181 - containerPort: 2888 - containerPort: 3888 volumeMounts: - name: data mountPath: /data volumes: - name: data emptyDir: {} --- apiVersion: v1 kind: Service metadata: name: zookeeper spec: selector: app: zookeeper ports: - name: client port: 2181 protocol: TCP - name: follower port: 2888 protocol: TCP - name: leader port: 3888 protocol: TCP ``` 这个 YAML 文件定义了一个名为 zookeeper 的 Deployment 和一个名为 zookeeper 的 Service。Deployment 使用了 zookeeper:3.6.2 镜像,它会启动一个节点ZooKeeper 实例。Service 会将端口 2181、2888 和 3888 暴露出来,以便于 Kafka 集群连接 ZooKeeper。 然后,你需要创建三个 Kafka 的 Deployment 和 Service,每个 Kafka 集群都需要一个 Deployment 和一个 Service: ```yaml apiVersion: apps/v1 kind: Deployment metadata: name: kafka-cluster-1 spec: replicas: 3 selector: matchLabels: app: kafka-cluster-1 template: metadata: labels: app: kafka-cluster-1 spec: containers: - name: kafka image: wurstmeister/kafka:2.13-2.7.0 env: - name: KAFKA_BROKER_ID valueFrom: fieldRef: fieldPath: metadata.uid - name: KAFKA_ADVERTISED_HOST_NAME valueFrom: fieldRef: fieldPath: status.podIP - name: KAFKA_ZOOKEEPER_CONNECT value: zookeeper:2181 - name: KAFKA_LOG_DIRS value: /kafka/kafka-cluster-1 ports: - containerPort: 9092 volumeMounts: - name: data mountPath: /kafka volumes: - name: data emptyDir: {} --- apiVersion: v1 kind: Service metadata: name: kafka-cluster-1 spec: selector: app: kafka-cluster-1 ports: - name: kafka port: 9092 protocol: TCP ``` 你需要将上面的 YAML 文件中的相应字段替换成你自己的配置。这个 YAML 文件定义了一个名为 kafka-cluster-1 的 Deployment 和一个名为 kafka-cluster-1 的 Service。Deployment 使用了 wurstmeister/kafka:2.13-2.7.0 镜像,它会启动三个 Kafka 节点。每个节点会注册到 ZooKeeper 上,Kafka 集群会使用 ZooKeeper 来管理节点、分区和副本。Service 会将端口 9092 暴露出来,以便于外部应用程序连接 Kafka 集群。 同样的方法,你可以创建另外两个 Kafka 集群的 Deployment 和 Service。完成后,你就可以使用 Kafka 集群了。
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值