k8s集群创建kafka集群暴露外部使用

直接上yaml资源文件

创建zookeeper集群资源文件,

kubectl apply -f zk-sts.yaml -n namespace

apiVersion: v1
kind: Service
metadata:
  name: zookeeper-hs
  labels:
    app: zookeeper
spec:
  ports:
  - port: 2888
    name: server
  - port: 3888
    name: leader-election
  clusterIP: None
  selector:
    app: zookeeper
---
apiVersion: v1
kind: Service
metadata:
  name: zookeeper
  labels:
    app: zookeeper
spec:
  ports:
  - port: 2181
    name: zookeeper-client
  selector:
    app: zookeeper

---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: zookeeper
spec:
  selector:
    matchLabels:
      app: zookeeper
  serviceName: zookeeper-hs
  replicas: 3
  updateStrategy:
    type: RollingUpdate
  podManagementPolicy: OrderedReady
  template:
    metadata:
      labels:
        app: zookeeper
    spec:
      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            - labelSelector:
                matchExpressions:
                  - key: "app"
                    operator: In
                    values:
                      - zookeeper
              topologyKey: "kubernetes.io/hostname"
      containers:
      - name: zookeeper
        imagePullPolicy: Always
        image: "fastop/zookeeper:3.4.10"
        command:
          - sh
          - -c
          - "start-zookeeper \
                  --servers=3 \
                  --data_dir=/var/lib/zookeeper/data \
                  --data_log_dir=/var/lib/zookeeper/data/log \
                  --conf_dir=/opt/zookeeper/conf \
                  --client_port=2181 \
                  --election_port=3888 \
                  --server_port=2888 \
                  --tick_time=2000 \
                  --init_limit=10 \
                  --sync_limit=5 \
                  --heap=512M \
                  --max_client_cnxns=60 \
                  --snap_retain_count=3 \
                  --purge_interval=12 \
                  --max_session_timeout=40000 \
                  --min_session_timeout=4000 \
                  --log_level=INFO"
        ports:
          - containerPort: 2181
            name: client
          - containerPort: 2888
            name: server
          - containerPort: 3888
            name: leader-election
        volumeMounts:
        - name: zookeeper-data
          mountPath: /var/lib/zookeeper
        - mountPath: /etc/localtime
          readOnly: true 
          name: time-data
      securityContext:
        runAsUser: 1000
        fsGroup: 1000
      volumes: 
      - name: time-data 
        hostPath: 
          path: /usr/share/zoneinfo/Asia/Shanghai
  volumeClaimTemplates:
  - metadata:
      name: zookeeper-data
      labels:
        app: zk
    spec:
      accessModes: [ "ReadWriteOnce" ]
      storageClassName: glusterfs
      resources:
        requests:
          storage: 10Gi

创建kafka集群资源文件

apiVersion: v1
kind: Service
metadata:
  name: kafka-svc
  labels:
    app: kafka
spec:
  ports:
  - port: 9092
    name: server
  clusterIP: None
  selector:
    app: kafka

---
apiVersion: v1
kind: Service
metadata:
  name: kafka-0
  labels:
    app: kafka
spec:
  ports:
  - port: 9092
    targetPort: 9092
    nodePort: 30092
    name: server
  type: NodePort
  selector:
    statefulset.kubernetes.io/pod-name: kafka-0
---
apiVersion: v1
kind: Service
metadata:
  name: kafka-1
  labels:
    app: kafka
spec:
  ports:
  - port: 9092
    targetPort: 9092
    nodePort: 30093
    name: server
  type: NodePort
  selector:
    statefulset.kubernetes.io/pod-name: kafka-1
---
apiVersion: v1
kind: Service
metadata:
  name: kafka-2
  labels:
    app: kafka
spec:
  ports:
  - port: 9092
    targetPort: 9092
    nodePort: 30094
    name: server
  type: NodePort
  selector:
    statefulset.kubernetes.io/pod-name: kafka-2
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: kafka
spec:
  selector:
    matchLabels:
      app: kafka
  serviceName: kafka
  replicas: 3
  updateStrategy:
    type: RollingUpdate
  podManagementPolicy: OrderedReady
  template:
    metadata:
      labels:
        app: kafka
    spec:
      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            - labelSelector:
                matchExpressions:
                  - key: "app"
                    operator: In
                    values:
                      - kafka
              topologyKey: "kubernetes.io/hostname"
      containers:
      - name: kafka
        command:
          - bash
          - -ec
          - |
            HOSTNAME=`hostname -s`
            if [[ $HOSTNAME =~ (.*)-([0-9]+)$ ]]; then
              ORD=${BASH_REMATCH[2]}
              PORT=$((ORD + 30092))
              export KAFKA_CFG_ADVERTISED_LISTENERS="PLAINTEXT://192.168.1.106:$PORT"
            else
              echo "Failed to get index from hostname $HOST"
              exit 1
            fi
            exec /entrypoint.sh /run.sh
        image: "bitnami/kafka:2"
        env:
          - name: ALLOW_PLAINTEXT_LISTENER
            value: "yes"
          - name: KAFKA_CFG_ZOOKEEPER_CONNECT
            value: "zookeeper-0.zookeeper-hs:2181,zookeeper-1.zookeeper-hs:2181,zookeeper-2.zookeeper-hs:2181"
          - name: KAFKA_CFG_OFFSETS_TOPIC_REPLICATION_FACTOR
            value: "3"
          - name: KAFKA_CFG_TRANSACTION_STATE_LOG_MIN_ISR
            value: "3"
          - name: KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR
            value: "3"
          - name: KAFKA_HEAP_OPTS
            value: "-Xmx6g -Xms6g"
          - name: KAFKA_CFG_MESSAGE_MAX_BYTES
            value: "100000000"
          - name: KAFKA_CFG_LOG_RETENTION_HOURS
            value: "24"
          - name: KAFKA_CFG_MAX_PARTITION_FETCH_BYTES
            value: "200000000"
        ports:
          - containerPort: 9092
        volumeMounts:
          - name: kafka-data
            mountPath: /bitnami
          - mountPath: /etc/localtime
            readOnly: true 
            name: time-data
      securityContext:
        runAsUser: 1000
        fsGroup: 1000
      volumes: 
      - name: time-data 
        hostPath: 
          path: /usr/share/zoneinfo/Asia/Shanghai
  volumeClaimTemplates:
  - metadata:
      name: kafka-data
    spec:
      accessModes: [ "ReadWriteOnce" ]
      storageClassName: glusterfs
      resources:
        requests:
          storage:  40G

执行kubeclt apply -f ka-sts.yaml -n namespace

访问方式通过nodeIp:30092,nodeIp30093,nodeIp30094

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

奔跑的窝窝牛

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值