k8s有状态部署kafka+zk集群3副本

1、部署zk集群yaml文件(kubectl apply -f xxxx.yml)



#namespace和name可修改成自己的喜欢的名字

apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: local-path
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: WaitForFirstConsumer  #延迟绑定参数,很重要

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv-zookeeper-1      
spec :
  capacity:
    storage: 50Gi                                #存储空间,注意修改
  volumeMode: Filesystem
  accessModes:
    - ReadWriteOnce
  persistentVolumeReclaimPolicy: Retain
  storageClassName: local-path
  local:
    path: /data/public-midd/zookeeper-data      #存储本地的目录,注意修改
  nodeAffinity:
    required:
      nodeSelectorTerms:
        - matchExpressions:
            - key: kubernetes.io/hostname
              operator: In
              values :
                - node-1                        #worker节点注意修改

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv-zookeeper-2
spec :
  capacity:
    storage: 50Gi                                #存储空间,注意修改
  volumeMode: Filesystem
  accessModes:
    - ReadWriteOnce
  persistentVolumeReclaimPolicy: Retain
  storageClassName: local-path
  local:
    path: /data/public-midd/zookeeper-data      #存储本地的目录,注意修改
  nodeAffinity:
    required:
      nodeSelectorTerms:
        - matchExpressions:
            - key: kubernetes.io/hostname
              operator: In
              values :
                - node-2                        #worker节点注意修改
                
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv-zookeeper-3
spec :
  capacity:
    storage: 50Gi                                #存储空间,注意修改
  volumeMode: Filesystem
  accessModes:
    - ReadWriteOnce
  persistentVolumeReclaimPolicy: Retain
  storageClassName: local-path
  local:
    path: /data/public-midd/zookeeper-data      #存储本地的目录,注意修改
  nodeAffinity:
    required:
      nodeSelectorTerms:
        - matchExpressions:
            - key: kubernetes.io/hostname
              operator: In
              values :
                - node-3                        #worker节点注意修改
---

apiVersion: v1
kind: Service
metadata:
  name: zookeeper-headless
  namespace: public-midd
  labels:
    app: zookeeper
spec:
  type: ClusterIP
  clusterIP: None
  publishNotReadyAddresses: true
  ports:
    - name: tcp-client
      port: 2181
      targetPort: client
    - name: tcp-follower
      port: 2888
      targetPort: follower
    - name: tcp-election
      port: 3888
      targetPort: election
  selector:
    app: zookeeper
---

apiVersion: v1
kind: Service
metadata:
  name: zookeeper
  namespace: public-midd
  labels:
    app: zookeeper
spec:
  type: ClusterIP
  sessionAffinity: None
  ports:
    - name: tcp-client
      port: 2181
      targetPort: client
    - name: tcp-follower
      port: 2888
      targetPort: follower
    - name: tcp-election
      port: 3888
      targetPort: election
  selector:
    app: zookeeper

---

apiVersion: v1
kind: ConfigMap
metadata:
  name: zookeeper-scripts
  namespace: public-midd
  labels:
    app: zookeeper
data:
  init-certs.sh: |-
    #!/bin/bash
  setup.sh: |-
    #!/bin/bash
    if [[ -f "/bitnami/zookeeper/data/myid" ]]; then
        export ZOO_SERVER_ID="$(cat /bitnami/zookeeper/data/myid)"
    else
        HOSTNAME="$(hostname -s)"
        if [[ $HOSTNAME =~ (.*)-([0-9]+)$ ]]; then
            ORD=${BASH_REMATCH[2]}
            export ZOO_SERVER_ID="$((ORD + 1 ))"
        else
            echo "Failed to get index from hostname $HOST"
            exit 1
        fi
    fi
    exec /entrypoint.sh /run.sh

---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: zookeeper
  namespace: public-midd
  labels:
    app: zookeeper
spec:
  replicas: 3
  podManagementPolicy: Parallel
  selector:
    matchLabels:
      app: zookeeper
  serviceName: zookeeper-headless
  updateStrategy:
    rollingUpdate: {}
    type: RollingUpdate
  template:
    metadata:
      annotations:
      labels:
        app: zookeeper
    spec:
      serviceAccountName: default
      affinity:
        nodeAffinity:                                      # node亲和性
          requiredDuringSchedulingIgnoredDuringExecution:  # 硬策略
            nodeSelectorTerms:
            - matchExpressions:
              - key: node-role.kubernetes.io/worker
                operator: In
                values:
                  - "true"
        podAntiAffinity:                                    # Pod反亲和性
          preferredDuringSchedulingIgnoredDuringExecution:  # 软策略,使Pod分布在不同的节点上
          - weight: 49                                      # 权重,有多个策略通过权重控制调度
            podAffinityTerm:
              topologyKey: app.kubernetes.io/name           # 通过app.kubernetes.io/name作为域调度  
              labelSelector:
                matchExpressions:
                - key: app
                  operator: In
                  values:
                  - zookeeper
      securityContext:
        fsGroup: 1001
      initContainers:
      containers:
        - name: zookeeper
          image: bitnami/zookeeper:3.8.0-debian-10-r0
          imagePullPolicy: "IfNotPresent"
          securityContext:
            runAsNonRoot: true
            runAsUser: 1001
          command:
            - /scripts/setup.sh
          resources:                                       # QoS 最高等级
            #limits:
              #cpu: 500m
              #memory: 500Mi
            requests:
              cpu: 500m
              memory: 500Mi
          env:
            - name: BITNAMI_DEBUG
              value: "false"
            - name: ZOO_DATA_LOG_DIR
              value: ""
            - name: ZOO_PORT_NUMBER
              value: "2181"
            - name: ZOO_TICK_TIME
              value: "2000"
            - name: ZOO_INIT_LIMIT
              value: "10"
            - name: ZOO_SYNC_LIMIT
              value: "5"
            - name: ZOO_PRE_ALLOC_SIZE
              value: "65536"
            - name: ZOO_SNAPCOUNT
              value: "100000"
            - name: ZOO_MAX_CLIENT_CNXNS
              value: "60"
            - name: ZOO_4LW_COMMANDS_WHITELIST
              value: "srvr, mntr, ruok"
            - name: ZOO_LISTEN_ALLIPS_ENABLED
              value: "no"
            - name: ZOO_AUTOPURGE_INTERVAL
              value: "0"
            - name: ZOO_AUTOPURGE_RETAIN_COUNT
              value: "3"
            - name: ZOO_MAX_SESSION_TIMEOUT
              value: "40000"
            - name: ZOO_SERVERS
              value: zookeeper-0.zookeeper-headless.public-midd.svc.cluster.local:2888:3888::1 zookeeper-1.zookeeper-headless.public-midd.svc.cluster.local:2888:3888::2 zookeeper-2.zookeeper-headless.public-midd.svc.cluster.local:2888:3888::3   #zk连接
            - name: ZOO_ENABLE_AUTH
              value: "no"
            - name: ZOO_HEAP_SIZE
              value: "1024"
            - name: ZOO_LOG_LEVEL
              value: "ERROR"
            - name: ALLOW_ANONYMOUS_LOGIN
              value: "yes"
            - name: POD_NAME
              valueFrom:
                fieldRef:
                  apiVersion: v1
                  fieldPath: metadata.name
          ports:
            - name: client
              containerPort: 2181
            - name: follower
              containerPort: 2888
            - name: election
              containerPort: 3888
          livenessProbe:
            failureThreshold: 6
            initialDelaySeconds: 30
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 5
            exec:
              command: ['/bin/bash', '-c', 'echo "ruok" | timeout 2 nc -w 2 localhost 2181 | grep imok']
          readinessProbe:
            failureThreshold: 6
            initialDelaySeconds: 5
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 5
            exec:
              command: ['/bin/bash', '-c', 'echo "ruok" | timeout 2 nc -w 2 localhost 2181 | grep imok']
          volumeMounts:
            - name: scripts
              mountPath: /scripts/setup.sh
              subPath: setup.sh
            - name: zookeeper-data
              mountPath: /bitnami/zookeeper
      volumes:
        - name: scripts
          configMap:
            name: zookeeper-scripts
            defaultMode: 0755
  volumeClaimTemplates:
  - metadata:
      name: zookeeper-data
    spec:
      storageClassName: local-path
      accessModes:
      - ReadWriteOnce
      resources:
        requests:
          storage: 50Gi


---
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
  name: zookeeper-pdb
  namespace: public-midd
spec:
  selector:
    matchLabels:
      app: zookeeper
  minAvailable: 2     # 滚动更新的时候至少保留2个pod,防止宕机

2、部署kafka集群




#  local-sc.yml 
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: local-path
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: WaitForFirstConsumer  #延迟绑定参数,很重要
---

apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv-kafka-1      #修改
spec :
  capacity:
    storage: 30Gi       #确认
  volumeMode: Filesystem
  accessModes:
    - ReadWriteOnce
  persistentVolumeReclaimPolicy: Retain
  storageClassName: local-path
  local:
    path: /data/kafka-1     #修改
  nodeAffinity:
    required:
      nodeSelectorTerms:
        - matchExpressions:
            - key: kubernetes.io/hostname
              operator: In
              values :
                - k8s-worker1      #修改

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv-kafka-2  #修改
spec :
  capacity:
    storage: 30Gi  #确认
  volumeMode: Filesystem
  accessModes:
    - ReadWriteOnce
  persistentVolumeReclaimPolicy: Retain
  storageClassName: local-path
  local:
    path: /data/kafka-2   #修改
  nodeAffinity:
    required:
      nodeSelectorTerms:
        - matchExpressions:
            - key: kubernetes.io/hostname
              operator: In
              values :
                - k8s-worker2  #修改
                
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv-kafka-3          #修改
spec :
  capacity:
    storage: 30Gi   #确认
  volumeMode: Filesystem
  accessModes:
    - ReadWriteOnce
  persistentVolumeReclaimPolicy: Retain
  storageClassName: local-path
  local:
    path: /data/kafka-3 #修改
  nodeAffinity:
    required:
      nodeSelectorTerms:
        - matchExpressions:
            - key: kubernetes.io/hostname
              operator: In
              values :
                - k8s-worker3  #修改

---
#部署 Service Headless,用于Kafka间相互通信
apiVersion: v1
kind: Service
metadata:
  name: kafka-headless
  namespace: public-midd
  labels:
    app: kafka
spec:
  type: ClusterIP
  clusterIP: None
  ports:
  - name: kafka
    port: 9092
    targetPort: kafka
  selector:
    app: kafka
---
#部署 Service,用于外部访问 Kafka
apiVersion: v1
kind: Service
metadata:
  name: kafka
  namespace: public-midd
  labels:
    app: kafka
spec:
  type: NodePort
  ports:
  - name: kafka
    port: 9092
    targetPort: kafka
    nodePort: 30092
  selector:
    app: kafka
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: kafka
  namespace: public-midd
  labels:
    app: kafka
spec:
  selector:
    matchLabels:
      app: kafka
  serviceName: kafka-headless
  podManagementPolicy: "Parallel"
  replicas: 3
  updateStrategy:
    type: "RollingUpdate"
  template:
    metadata:
      name: kafka
      labels:
        app: kafka
    spec: 
      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:  
          - labelSelector:
              matchExpressions:
              - key: app
                operator: In
                values:
                - kafka
            topologyKey: kubernetes.io/hostname    
      securityContext:
        fsGroup: 1001
        runAsUser: 1001
      containers:
      - name: kafka
        image: bitnami/kafka:3.4.1          #bitnami/kafka:2.8.1-debian-11-r7 
        imagePullPolicy: "IfNotPresent"
        resources:
          #limits:
            #cpu: 500m
            #memory: 512Mi
          requests:
            cpu: 250m
            memory: 256Mi
        env:
        - name: MY_POD_IP
          valueFrom:
            fieldRef:
              fieldPath: status.podIP
        - name: MY_POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: KAFKA_CFG_ZOOKEEPER_CONNECT
          value: zookeeper-0.zookeeper-headless.public-midd:2181,zookeeper-1.zookeeper-headless.public-midd:2181,zookeeper-2.zookeeper-headless.public-midd:2181           #修改Zookeeper Service 名称
        - name: KAFKA_PORT_NUMBER
          value: "9092"
        - name: KAFKA_CFG_LISTENERS
          value: "PLAINTEXT://:$(KAFKA_PORT_NUMBER)"
        - name: KAFKA_CFG_ADVERTISED_LISTENERS
          value: 'PLAINTEXT://$(MY_POD_NAME).kafka-headless.public-midd:$(KAFKA_PORT_NUMBER)'
        - name: ALLOW_PLAINTEXT_LISTENER
          value: "yes"
        - name: KAFKA_HEAP_OPTS
          value: "-Xmx512m -Xms512m"
        - name: KAFKA_CFG_LOGS_DIRS
          value: /opt/bitnami/kafka/data

        ports:
        - name: kafka
          containerPort: 9092
        livenessProbe:
          tcpSocket:
            port: kafka
          initialDelaySeconds: 10
          periodSeconds: 10
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 2
        readinessProbe:
          tcpSocket:
            port: kafka
          initialDelaySeconds: 5
          periodSeconds: 10
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 6
        volumeMounts:
        - name: kafka-data
          mountPath: /bitnami/kafka
      restartPolicy: Always
      terminationGracePeriodSeconds: 300
  volumeClaimTemplates:
    - metadata:
        name: kafka-data
      spec:
        storageClassName: local-path    #指定为上面创建的 storageclass
        accessModes:
          - "ReadWriteOnce"
        resources:
          requests:
            storage: 30Gi        #修改

---

apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
  name: kafka-pdb
  namespace: public-midd
spec:
  selector:
    matchLabels:
      app: kafka
  minAvailable: 2

  • 10
    点赞
  • 10
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值