k8s 部署zookeeper-kafka集群 —— 筑梦之路

119 篇文章 7 订阅
108 篇文章 3 订阅
规划:

三个zookeeper
三个kafka

添加亲和性规则


#直接上yaml

#zookeeper-deploy.yaml

apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: zookeeper
  namespace: merry
  labels: {app.kubernetes.io/name: zookeeper, helm.sh/chart: zookeeper-5.22.2, app.kubernetes.io/instance: zookeeper,
    app.kubernetes.io/managed-by: Helm, app.kubernetes.io/component: zookeeper, role: zookeeper}
spec:
  serviceName: zookeeper-headless
  replicas: 3
  podManagementPolicy: Parallel
  updateStrategy: {type: RollingUpdate}
  selector:
    matchLabels: {app.kubernetes.io/name: zookeeper, app.kubernetes.io/instance: zookeeper,
      app.kubernetes.io/component: zookeeper}
  template:
    metadata:
      name: zookeeper
      labels: {app.kubernetes.io/name: zookeeper, helm.sh/chart: zookeeper-5.22.2,
        app.kubernetes.io/instance: zookeeper, app.kubernetes.io/managed-by: Helm,
        app.kubernetes.io/component: zookeeper}
    spec:
      serviceAccountName: default
      securityContext: {fsGroup: 1001}
      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            - labelSelector:
                matchExpressions:
                  - key: "app.kubernetes.io/component"
                    operator: In
                    values:
                    - kafka
              topologyKey: "kubernetes.io/hostname"
        podAffinity:
          preferredDuringSchedulingIgnoredDuringExecution:
             - weight: 1
               podAffinityTerm:
                 labelSelector:
                    matchExpressions:
                      - key: "app.kubernetes.io/component"
                        operator: In
                        values:
                        - zookeeper
                 topologyKey: "kubernetes.io/hostname"
      containers:
      - name: zookeeper
        image: zookeeper:3.6.2-debian-10-r37
        imagePullPolicy: IfNotPresent
        securityContext: {runAsUser: 1001}
        command:
        - bash
        - -ec
        - |
          # Execute entrypoint as usual after obtaining ZOO_SERVER_ID based on POD hostname
          HOSTNAME=`hostname -s`
          if [[ $HOSTNAME =~ (.*)-([0-9]+)$ ]]; then
            ORD=${BASH_REMATCH[2]}
            export ZOO_SERVER_ID=$((ORD+1))
          else
            echo "Failed to get index from hostname $HOST"
            exit 1
          fi
          exec /entrypoint.sh /run.sh
        resources:
          requests: {cpu: 150m, memory: 256Mi}
        env:
        - {name: ZOO_DATA_LOG_DIR, value: ''}
        - {name: ZOO_PORT_NUMBER, value: '2181'}
        - {name: ZOO_TICK_TIME, value: '2000'}
        - {name: ZOO_INIT_LIMIT, value: '10'}
        - {name: ZOO_SYNC_LIMIT, value: '5'}
        - {name: ZOO_MAX_CLIENT_CNXNS, value: '60'}
        - {name: ZOO_4LW_COMMANDS_WHITELIST, value: 'srvr, mntr, ruok'}
        - {name: ZOO_LISTEN_ALLIPS_ENABLED, value: 'no'}
        - {name: ZOO_AUTOPURGE_INTERVAL, value: '0'}
        - {name: ZOO_AUTOPURGE_RETAIN_COUNT, value: '3'}
        - {name: ZOO_MAX_SESSION_TIMEOUT, value: '40000'}
        - {name: ZOO_SERVERS, value: 'zookeeper-0.zookeeper-headless.trs.svc.cluster.local:2888:3888
            zookeeper-1.zookeeper-headless.trs.svc.cluster.local:2888:3888 zookeeper-2.zookeeper-headless.trs.svc.cluster.local:2888:3888'}
        - {name: ZOO_ENABLE_AUTH, value: 'no'}
        - {name: ZOO_HEAP_SIZE, value: '1024'}
        - {name: ZOO_LOG_LEVEL, value: ERROR}
        - {name: ALLOW_ANONYMOUS_LOGIN, value: 'yes'}
        - name: POD_NAME
          valueFrom:
            fieldRef: {apiVersion: v1, fieldPath: metadata.name}
        ports:
        - {name: client, containerPort: 2181}
        - {name: follower, containerPort: 2888}
        - {name: election, containerPort: 3888}
        livenessProbe:
          exec:
            command: [/bin/bash, -c, echo "ruok" | timeout 2 nc -w 2 localhost 2181
                | grep imok]
          initialDelaySeconds: 30
          periodSeconds: 10
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 6
        readinessProbe:
          exec:
            command: [/bin/bash, -c, echo "ruok" | timeout 2 nc -w 2 localhost 2181
                | grep imok]
          initialDelaySeconds: 5
          periodSeconds: 10
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 6
        volumeMounts:
        - {name: data, mountPath: /bitnami/zookeeper}
  volumeClaimTemplates:
  - metadata: {name: data}
    spec:
      accessModes: [ReadWriteOnce]
      resources:
        requests: {storage: 1000Gi}
      storageClassName: managed-nfs-storage

#zookeeper-svc.yaml

apiVersion: v1
kind: Service
metadata:
  name: zookeeper-headless
  namespace: merry
  labels:
    app.kubernetes.io/name: zookeeper
    helm.sh/chart: zookeeper-5.22.2
    app.kubernetes.io/instance: zookeeper
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/component: zookeeper
spec:
  type: ClusterIP
  clusterIP: None
  publishNotReadyAddresses: true
  ports:
    - name: tcp-client
      port: 2181
      targetPort: client    
    - name: follower
      port: 2888
      targetPort: follower
    - name: tcp-election
      port: 3888
      targetPort: election
  selector:
    app.kubernetes.io/name: zookeeper
    app.kubernetes.io/instance: zookeeper
    app.kubernetes.io/component: zookeeper
---
apiVersion: v1
kind: Service
metadata:
  name: zookeeper-svc
  namespace: merry
  labels:
    app.kubernetes.io/name: zookeeper
    helm.sh/chart: zookeeper-5.22.2
    app.kubernetes.io/instance: zookeeper
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/component: zookeeper
spec:
  type: ClusterIP
  ports:
    
    - name: tcp-client
      port: 2181
      targetPort: client
    
    
    - name: follower
      port: 2888
      targetPort: follower
    - name: tcp-election
      port: 3888
      targetPort: election
  selector:
    app.kubernetes.io/name: zookeeper
    app.kubernetes.io/instance: zookeeper
    app.kubernetes.io/component: zookeeper

#zookeeper-pdb.yaml

---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
  name: zookeeper
  namespace: merry
  labels: {app.kubernetes.io/name: zookeeper, helm.sh/chart: zookeeper-5.22.2, app.kubernetes.io/instance: zookeeper,
    app.kubernetes.io/managed-by: Helm, app.kubernetes.io/component: zookeeper}
spec:
  selector:
    matchLabels: {app.kubernetes.io/name: zookeeper, app.kubernetes.io/instance: zookeeper,
      app.kubernetes.io/component: zookeeper}
  maxUnavailable: 1

#kafka-cm.yaml
---
# Source: kafka/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  name: kafka-configuration
  namespace: merry
  labels:
    app.kubernetes.io/name: kafka
    helm.sh/chart: kafka-7.2.0
    app.kubernetes.io/instance: kafka
    app.kubernetes.io/managed-by: Helm
data:
  server.properties: |-
    broker.id=-1
    listeners=PLAINTEXT://:9092
    advertised.listeners=PLAINTEXT://:9092
    num.network.threads=3
    num.io.threads=8
    socket.send.buffer.bytes=102400
    socket.receive.buffer.bytes=102400
    socket.request.max.bytes=104857600
    log.dirs=/bitnami/kafka/data
    num.partitions=1
    num.recovery.threads.per.data.dir=1
    offsets.topic.replication.factor=1
    transaction.state.log.replication.factor=1
    transaction.state.log.min.isr=1
    log.flush.interval.messages=10000
    log.flush.interval.ms=1000
    log.retention.hours=168
    log.retention.bytes=1073741824
    log.segment.bytes=1073741824
    log.retention.check.interval.ms=300000
    zookeeper.connect=zookeeper-svc
    zookeeper.connection.timeout.ms=60000
    group.initial.rebalance.delay.ms=0
    message.max.bytes=31457280
    auto.leader.rebalance.enable=true
    unclean.leader.election.enable=true

#kafka-deploy.yaml

apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: kafka
  namespace: merry
  labels: {app.kubernetes.io/name: kafka, helm.sh/chart: kafka-7.2.0, app.kubernetes.io/instance: kafka,
    app.kubernetes.io/managed-by: Helm, app.kubernetes.io/component: kafka, role: kafka}
spec:
  selector:
    matchLabels: {app.kubernetes.io/name: kafka, app.kubernetes.io/instance: kafka,
      app.kubernetes.io/component: kafka}
  serviceName: kafka-headless
  podManagementPolicy: Parallel
  replicas: 3
  updateStrategy: {type: RollingUpdate}
  template:
    metadata:
      labels: {app.kubernetes.io/name: kafka, helm.sh/chart: kafka-7.2.0, app.kubernetes.io/instance: kafka,
        app.kubernetes.io/managed-by: Helm, app.kubernetes.io/component: kafka}
    spec:
      securityContext: {fsGroup: 1001, runAsUser: 1001}
      nodeSelector: {}
      tolerations: []
      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            - labelSelector:
                matchExpressions:
                  - key: "app.kubernetes.io/component"
                    operator: In
                    values:
                    - kafka
              topologyKey: "kubernetes.io/hostname"
        podAffinity:
          preferredDuringSchedulingIgnoredDuringExecution:
             - weight: 1
               podAffinityTerm:
                 labelSelector:
                    matchExpressions:
                      - key: "app.kubernetes.io/component"
                        operator: In
                        values:
                        - zookeeper
                 topologyKey: "kubernetes.io/hostname"
      containers:
      - name: kafka
        image: kafka:2.4.0-debian-10-r0
        imagePullPolicy: IfNotPresent
        resources:
          limits: {cpu: 1000m, memory: 9216Mi}
          requests: {cpu: 170m, memory: 1024Mi}
        env:
        - {name: BITNAMI_DEBUG, value: 'false'}
        - name: MY_POD_IP
          valueFrom:
            fieldRef: {fieldPath: status.podIP}
        - name: MY_POD_NAME
          valueFrom:
            fieldRef: {fieldPath: metadata.name}
        - {name: KAFKA_CFG_ZOOKEEPER_CONNECT}
        - {name: KAFKA_PORT_NUMBER, value: '9092'}
        - {name: KAFKA_CFG_LISTENERS, value: 'PLAINTEXT://:$(KAFKA_PORT_NUMBER)'}
        - {name: KAFKA_CFG_ADVERTISED_LISTENERS, value: 'PLAINTEXT://$(MY_POD_NAME).kafka-headless.trs.svc.cluster.local:$(KAFKA_PORT_NUMBER)'}
        - {name: ALLOW_PLAINTEXT_LISTENER, value: 'yes'}
        - {name: KAFKA_CFG_BROKER_ID, value: '-1'}
        - {name: KAFKA_CFG_DELETE_TOPIC_ENABLE, value: 'false'}
        - {name: KAFKA_HEAP_OPTS, value: -Xmx1024m -Xms1024m}
        - {name: KAFKA_CFG_LOG_FLUSH_INTERVAL_MESSAGES, value: '10000'}
        - {name: KAFKA_CFG_LOG_FLUSH_INTERVAL_MS, value: '1000'}
        - {name: KAFKA_CFG_LOG_RETENTION_BYTES, value: '1073741824'}
        - {name: KAFKA_CFG_LOG_RETENTION_CHECK_INTERVALS_MS, value: '300000'}
        - {name: KAFKA_CFG_LOG_RETENTION_HOURS, value: '168'}
        - {name: KAFKA_CFG_MESSAGE_MAX_BYTES, value: '1000012'}
        - {name: KAFKA_CFG_LOG_SEGMENT_BYTES, value: '1073741824'}
        - {name: KAFKA_CFG_LOG_DIRS, value: /bitnami/kafka/data}
        - {name: KAFKA_CFG_DEFAULT_REPLICATION_FACTOR, value: '1'}
        - {name: KAFKA_CFG_OFFSETS_TOPIC_REPLICATION_FACTOR, value: '1'}
        - {name: KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR, value: '1'}
        - {name: KAFKA_CFG_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM, value: https}
        - {name: KAFKA_CFG_TRANSACTION_STATE_LOG_MIN_ISR, value: '1'}
        - {name: KAFKA_CFG_NUM_IO_THREADS, value: '8'}
        - {name: KAFKA_CFG_NUM_NETWORK_THREADS, value: '3'}
        - {name: KAFKA_CFG_NUM_PARTITIONS, value: '1'}
        - {name: KAFKA_CFG_NUM_RECOVERY_THREADS_PER_DATA_DIR, value: '1'}
        - {name: KAFKA_CFG_SOCKET_RECEIVE_BUFFER_BYTES, value: '102400'}
        - {name: KAFKA_CFG_SOCKET_REQUEST_MAX_BYTES, value: '104857600'}
        - {name: KAFKA_CFG_SOCKET_SEND_BUFFER_BYTES, value: '102400'}
        - {name: KAFKA_CFG_ZOOKEEPER_CONNECTION_TIMEOUT_MS, value: '6000'}
        ports:
        - {name: kafka, containerPort: 9092}
        livenessProbe:
          tcpSocket: {port: kafka}
          initialDelaySeconds: 10
          periodSeconds: 10
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 2
        readinessProbe:
          tcpSocket: {port: kafka}
          initialDelaySeconds: 5
          periodSeconds: 10
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 6
        volumeMounts:
        - {name: data, mountPath: /bitnami/kafka}
        - {name: kafka-config, mountPath: /opt/bitnami/kafka/conf/server.properties,
          subPath: server.properties}
      volumes:
      - name: kafka-config
        configMap: {name: kafka-configuration}
  volumeClaimTemplates:
  - metadata: {name: data}
    spec:
      accessModes: [ReadWriteOnce]
      resources:
        requests: {storage: 1000Gi}
      storageClassName: managed-nfs-storage

#kafka-svc.yaml

# Source: kafka/templates/svc-headless.yaml
apiVersion: v1
kind: Service
metadata:
  name: kafka-headless
  namespace: merry
  labels:
    app.kubernetes.io/name: kafka
    helm.sh/chart: kafka-7.2.0
    app.kubernetes.io/instance: kafka
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/component: kafka
spec:
  type: ClusterIP
  clusterIP: None
  ports:
    - name: kafka
      port: 9092
      targetPort: kafka
  selector:
    app.kubernetes.io/name: kafka
    app.kubernetes.io/instance: kafka
    app.kubernetes.io/component: kafka
---
# Source: kafka/templates/svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: kafka-svc
  namespace: merry
  labels:
    app.kubernetes.io/name: kafka
    helm.sh/chart: kafka-7.2.0
    app.kubernetes.io/instance: kafka
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/component: kafka
  annotations: 
    {}
spec:
  type: ClusterIP
  ports:
    - name: kafka
      port: 9092
      targetPort: kafka
  selector:
    app.kubernetes.io/name: kafka
    app.kubernetes.io/instance: kafka
    app.kubernetes.io/component: kafka

#kafka-pdb-yaml

---
# Source: kafka/templates/poddisruptionbudget.yaml
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
  name: kafka
  namespace: merry
  labels:
    app.kubernetes.io/name: kafka
    helm.sh/chart: kafka-7.2.0
    app.kubernetes.io/instance: kafka
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/component: kafka
spec:
  selector:
    matchLabels:
      app.kubernetes.io/name: kafka
      app.kubernetes.io/instance: kafka
      app.kubernetes.io/component: kafka
  maxUnavailable: 1

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值