0.前置准备
基础学习与集群搭建:
https://gitee.com/moxi159753/LearningNotes/blob/master/K8S/3_%E4%BD%BF%E7%94%A8kubeadm%E6%96%B9%E5%BC%8F%E6%90%AD%E5%BB%BAK8S%E9%9B%86%E7%BE%A4/README.md
我的集群共三个节点:
k8smaster,k8snode1,k8snode2
由于节点数不足,设置其中master节点去除污点,可被scheduler调度
1.基本准备与持久化
创建namespace
kubectl create namespace kafka
kubectl create ns monitoring
创建动态持久化存储卷(storage class)
-
创建nfs(服务端和客户端)
nfs相关知识:
https://blog.csdn.net/wangmiaoyan/article/details/102840639
https://blog.csdn.net/qq_36357820/article/details/78488077服务端:192.168.88.161 /usr/local/nfs目录
-
创建storage class
-
使用rbac实现鉴权
vi rbac.yaml
apiVersion: v1 #创建服务账号 kind: ServiceAccount metadata: name: nfs-provisioner namespace: monitoring --- kind: ClusterRole #创建clusterrole apiVersion: rbac.authorization.k8s.io/v1 #和k8s查询到的相对应(kubectl api-versions) metadata: name: nfs-provisioner-runner namespace: monitoring rules: - apiGroups: [""] resources: ["persistentvolumes"] verbs: ["get", "list", "watch", "create", "delete"] - apiGroups: [""] resources: ["persistentvolumeclaims"] verbs: ["get", "list", "watch", "update"] - apiGroups: ["storage.k8s.io"] resources: ["storageclasses"] verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["events"] verbs: ["watch", "create", "update", "patch"] - apiGroups: [""] resources: ["services", "endpoints"] verbs: ["get","create","list", "watch","update"] - apiGroups: ["extensions"] resources: ["podsecuritypolicies"] resourceNames: ["nfs-provisioner"] verbs: ["use"] --- kind: ClusterRoleBinding #绑定serviceaccount和ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: run-nfs-provisioner subjects: - kind: ServiceAccount name: nfs-provisioner namespace: monitoring roleRef: kind: ClusterRole name: nfs-provisioner-runner apiGroup: rbac.authorization.k8s.io
kubectl apply -f rbac.yaml
-
创建provisioner
vi nfs-deployment.yaml
apiVersion: apps/v1 kind: Deployment metadata: name: nfs-client-provisioner namespace: monitoring spec: replicas: 1 selector: selector: matchLabels: app: nfs-client-provisioner strategy: type: Recreate template: metadata: labels: app: nfs-client-provisioner spec: serviceAccount: nfs-provisioner containers: - name: nfs-client-provisioner image: quay.io/external_storage/nfs-client-provisioner:latest volumeMounts: #容器内部需挂载的目录 - name: nfs-client-root mountPath: /persistentvolumes env: #相关配置 - name: PROVISIONER_NAME value: fuseim.pri/ifs - name: NFS_SERVER #按实际情况配置,nfs服务器 value: 192.168.88.161 - name: NFS_PATH #按实际情况配置 value: /usr/local/nfs volumes: - name: nfs-client-root nfs: server: 192.168.88.161 path: /usr/local/nfs
-
创建storageClass
vi storageClass.yaml
apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: nfs-storage namespace: monitoring provisioner: fuseim.pri/ifs reclaimPolicy: Retain
-
-
综上,则可以利用storageclass实现动态持久化存储,不需再创建静态pv,只需要创建pods时声明pvc即可
2.搭建zookeeper集群
https://kubernetes.io/zh/docs/tutorials/stateful-application/zookeeper/,使用statefulset控制器
-
创建zookeeper.yaml(all in one版本)
apiVersion: v1 #创建headless service kind: Service metadata: name: zk-hs namespace: kafka labels: app: zk spec: ports: - port: 2888 name: server - port: 3888 name: leader-election clusterIP: None selector: app: zk --- apiVersion: v1 kind: Service metadata: name: zk-cs namespace: kafka labels: app: zk spec: ports: - port: 2181 name: client selector: app: zk --- apiVersion: policy/v1beta1 kind: PodDisruptionBudget metadata: name: zk-pdb namespace: kafka spec: selector: matchLabels: app: zk maxUnavailable: 1 --- apiVersion: apps/v1 kind: StatefulSet metadata: name: zk namespace: kafka spec: selector: matchLabels: app: zk serviceName: zk-hs replicas: 3 updateStrategy: type: RollingUpdate podManagementPolicy: Parallel template: metadata: labels: app: zk spec: # nodeSelector: # travis.io/schedule-only: "kafka" # tolerations: # - key: "travis.io/schedule-only" # operator: "Equal" # value: "kafka" # effect: "NoSchedule" # - key: "travis.io/schedule-only" # operator: "Equal" # value: "kafka" # effect: "NoExecute" # tolerationSeconds: 3600 # - key: "travis.io/schedule-only" # operator: "Equal" # value: "kafka" # effect: "PreferNoSchedule" affinity: podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: matchExpressions: - key: "app" operator: In values: - zk topologyKey: "kubernetes.io/hostname" containers: - name: kubernetes-zookeeper imagePullPolicy: IfNotPresent image: fastop/zookeeper:3.4.10 resources: requests: memory: "1G" cpu: "0.5" ports: - containerPort: 2181 name: client - containerPort: 2888 name: server - containerPort: 3888 name: leader-election command: - sh - -c - "start-zookeeper \ --servers=3 \ --data_dir=/var/lib/zookeeper/data \ --data_log_dir=/var/lib/zookeeper/data/log \ --conf_dir=/opt/zookeeper/conf \ --client_port=2181 \ --election_port=3888 \ --server_port=2888 \ --tick_time=2000 \ --init_limit=10 \ --sync_limit=5 \ --heap=512M \ --max_client_cnxns=60 \ --snap_retain_count=3 \ --purge_interval=12 \ --max_session_timeout=40000 \ --min_session_timeout=4000 \ --log_level=INFO" readinessProbe: exec: command: - sh - -c - "zookeeper-ready 2181" initialDelaySeconds: 10 timeoutSeconds: 5 livenessProbe: exec: command: - sh - -c - "zookeeper-ready 2181" initialDelaySeconds: 10 timeoutSeconds: 5 volumeMounts: - name: datadir mountPath: /var/lib/zookeeper securityContext: runAsUser: 1000 fsGroup: 1000 volumeClaimTemplates: - metadata: name: datadir #This annotation is still working, however it will become fully deprecated in a future Kubernetes release.just use storageClassName. # annotations: # volume.beta.kubernetes.io/storage-class: nfs-storage # volume.beta.kubernetes.io/storage-provisioner: fuseim.pri/ifs spec: accessModes: [ "ReadWriteMany" ] storageClassName: nfs-storage #使用已有的storageClass resources: requests: storage: 5G
3.搭建kafka集群
-
创建kafka.yaml
--- apiVersion: v1 kind: Service metadata: name: kafka-svc namespace: kafka labels: app: kafka spec: ports: - port: 9092 name: server clusterIP: None selector: app: kafka --- apiVersion: policy/v1beta1 kind: PodDisruptionBudget metadata: name: kafka-pdb namespace: kafka spec: selector: matchLabels: app: kafka minAvailable: 2 --- apiVersion: apps/v1 kind: StatefulSet metadata: name: kafka namespace: kafka spec: selector: matchLabels: app: kafka serviceName: kafka-svc replicas: 3 template: metadata: labels: app: kafka spec: # nodeSelector: # travis.io/schedule-only: "kafka" # tolerations: # - key: "travis.io/schedule-only" # operator: "Equal" # value: "kafka" # effect: "NoSchedule" # - key: "travis.io/schedule-only" # operator: "Equal" # value: "kafka" # effect: "NoExecute" # tolerationSeconds: 3600 # - key: "travis.io/schedule-only" # operator: "Equal" # value: "kafka" # effect: "PreferNoSchedule" affinity: podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: matchExpressions: - key: "app" operator: In values: - kafka topologyKey: "kubernetes.io/hostname" podAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 1 podAffinityTerm: labelSelector: matchExpressions: - key: "app" operator: In values: - zk topologyKey: "kubernetes.io/hostname" terminationGracePeriodSeconds: 300 containers: - name: k8s-kafka imagePullPolicy: IfNotPresent image: fastop/kafka:2.2.0 resources: requests: memory: "1G" cpu: 500m ports: - containerPort: 9092 name: server command: - sh - -c - "exec kafka-server-start.sh /opt/kafka/config/server.properties --override broker.id=${HOSTNAME##*-} \ --override listeners=PLAINTEXT://:9092 \ --override zookeeper.connect=zk-0.zk-hs.kafka.svc.cluster.local:2181,zk-1.zk-hs.kafka.svc.cluster.local:2181,zk-2.zk-hs.kafka.svc.cluster.local:2181 \ --override log.dir=/var/lib/kafka \ --override auto.create.topics.enable=true \ --override auto.leader.rebalance.enable=true \ --override background.threads=10 \ --override compression.type=producer \ --override delete.topic.enable=false \ --override leader.imbalance.check.interval.seconds=300 \ --override leader.imbalance.per.broker.percentage=10 \ --override log.flush.interval.messages=9223372036854775807 \ --override log.flush.offset.checkpoint.interval.ms=60000 \ --override log.flush.scheduler.interval.ms=9223372036854775807 \ --override log.retention.bytes=-1 \ --override log.retention.hours=168 \ --override log.roll.hours=168 \ --override log.roll.jitter.hours=0 \ --override log.segment.bytes=1073741824 \ --override log.segment.delete.delay.ms=60000 \ --override message.max.bytes=1000012 \ --override min.insync.replicas=1 \ --override num.io.threads=8 \ --override num.network.threads=3 \ --override num.recovery.threads.per.data.dir=1 \ --override num.replica.fetchers=1 \ --override offset.metadata.max.bytes=4096 \ --override offsets.commit.required.acks=-1 \ --override offsets.commit.timeout.ms=5000 \ --override offsets.load.buffer.size=5242880 \ --override offsets.retention.check.interval.ms=600000 \ --override offsets.retention.minutes=1440 \ --override offsets.topic.compression.codec=0 \ --override offsets.topic.num.partitions=50 \ --override offsets.topic.replication.factor=3 \ --override offsets.topic.segment.bytes=104857600 \ --override queued.max.requests=500 \ --override quota.consumer.default=9223372036854775807 \ --override quota.producer.default=9223372036854775807 \ --override replica.fetch.min.bytes=1 \ --override replica.fetch.wait.max.ms=500 \ --override replica.high.watermark.checkpoint.interval.ms=5000 \ --override replica.lag.time.max.ms=10000 \ --override replica.socket.receive.buffer.bytes=65536 \ --override replica.socket.timeout.ms=30000 \ --override request.timeout.ms=30000 \ --override socket.receive.buffer.bytes=102400 \ --override socket.request.max.bytes=104857600 \ --override socket.send.buffer.bytes=102400 \ --override unclean.leader.election.enable=true \ --override zookeeper.session.timeout.ms=6000 \ --override zookeeper.set.acl=false \ --override broker.id.generation.enable=true \ --override connections.max.idle.ms=600000 \ --override controlled.shutdown.enable=true \ --override controlled.shutdown.max.retries=3 \ --override controlled.shutdown.retry.backoff.ms=5000 \ --override controller.socket.timeout.ms=30000 \ --override default.replication.factor=1 \ --override fetch.purgatory.purge.interval.requests=1000 \ --override group.max.session.timeout.ms=300000 \ --override group.min.session.timeout.ms=6000 \ --override inter.broker.protocol.version=2.2.0 \ --override log.cleaner.backoff.ms=15000 \ --override log.cleaner.dedupe.buffer.size=134217728 \ --override log.cleaner.delete.retention.ms=86400000 \ --override log.cleaner.enable=true \ --override log.cleaner.io.buffer.load.factor=0.9 \ --override log.cleaner.io.buffer.size=524288 \ --override log.cleaner.io.max.bytes.per.second=1.7976931348623157E308 \ --override log.cleaner.min.cleanable.ratio=0.5 \ --override log.cleaner.min.compaction.lag.ms=0 \ --override log.cleaner.threads=1 \ --override log.cleanup.policy=delete \ --override log.index.interval.bytes=4096 \ --override log.index.size.max.bytes=10485760 \ --override log.message.timestamp.difference.max.ms=9223372036854775807 \ --override log.message.timestamp.type=CreateTime \ --override log.preallocate=false \ --override log.retention.check.interval.ms=300000 \ --override max.connections.per.ip=2147483647 \ --override num.partitions=4 \ --override producer.purgatory.purge.interval.requests=1000 \ --override replica.fetch.backoff.ms=1000 \ --override replica.fetch.max.bytes=1048576 \ --override replica.fetch.response.max.bytes=10485760 \ --override reserved.broker.max.id=1000 " env: - name: KAFKA_HEAP_OPTS value : "-Xmx512M -Xms512M" - name: KAFKA_OPTS value: "-Dlogging.level=INFO" volumeMounts: - name: datadir mountPath: /var/lib/kafka readinessProbe: tcpSocket: port: 9092 timeoutSeconds: 1 initialDelaySeconds: 5 securityContext: runAsUser: 1000 fsGroup: 1000 volumeClaimTemplates: - metadata: name: datadir #This annotation is still working, however it will become fully deprecated in a future Kubernetes release.just use storageClassName. # annotations: # volume.beta.kubernetes.io/storage-class: nfs-storage # volume.beta.kubernetes.io/storage-provisioner: fuseim.pri/ifs spec: accessModes: [ "ReadWriteMany" ] storageClassName: nfs-storage resources: requests: storage: 1G
4.外部可访问
-
修改zookeeper 的service zk-cs
kubectl edit svc zk-cs -n kafka
##修改spec部分: spec: clusterIP: 10.97.9.47 externalTrafficPolicy: Cluster ports: - name: client nodePort: 30004 #! port: 2181 protocol: TCP targetPort: 2181 selector: app: zk sessionAffinity: None type: NodePort #!
-
增加kafka nodePort
编写kafka-read.yaml
apiVersion: v1 kind: Service metadata: name: kafka-read namespace: kafka labels: app: kafka spec: selector: app: kafka ports: - port: 9092 name: kafka-read targetPort: 9092 nodePort: 30003 type: NodePort
5.命令行测试
#分别启动两个终端
#分别进入容器:
kubectl exec -it kafka-0 -n kafka -- bash
kubectl exec -it kafka-1 -n kafka -- bash
#创建topic
kafka-topics.sh --create --topic test --zookeeper 192.168.88.150:30004 --partitions 3 --replication-factor 2
#查看topic
kafka-topics.sh --list --zookeeper 192.168.88.150:30004
#创建消费者
kafka-console-consumer.sh --topic test --bootstrap-server 192.168.88.150:30003
#创建生产者
kafka-console-producer.sh --topic test --broker-list 192.168.88.150:30003
#生产者生产消息,查看消费者是否能够消费