kubesphere+kubernetes搭建生产环境高可用集群(四-2)

k8s常用中间件的部署(二)

大数据相关中间件的部署

Gitlab

  1. 创建gitlab服务的存储pvc文件
[root@k8s-master-1 gitlab]# cat pvc.yaml
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: gitlab-pvc-data				##用于存储git数据
  namespace: ops
  labels:
    app: gitlab
spec:
  storageClassName: glusterfs
  accessModes:
  - ReadWriteMany
  resources:
    requests:
      storage: 200Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: gitlab-pvc-log				##用于存储git日志
  namespace: ops
  labels:
    app: gitlab
spec:
  storageClassName: glusterfs
  accessModes:
  - ReadWriteMany
  resources:
    requests:
      storage: 10Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: gitlab-pvc-conf				##用于存储git配置
  namespace: ops
  labels:
    app: gitlab
spec:
  storageClassName: glusterfs
  accessModes:
  - ReadWriteMany
  resources:
    requests:
      storage: 2Gi
  1. 创建gitlab服务的部署deployment文件
[root@k8s-master-1 gitlab]# cat gitlab.yaml

---

# ------------------------定义Gitlab的部署 -----------------------

apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2
kind: Deployment
metadata:
  name: gitlab
  namespace: ops
spec:
  selector:
    matchLabels:
      app: gitlab
  revisionHistoryLimit: 2
  template:
    metadata:
      labels:
        app: gitlab
    spec:
      containers:
      # 应用的镜像
      - image: dockerhub.dsj.com:18443/library/gitlab-ce			##镜像从dockerhub拉取的镜像为gitlab/gitlab-ce
        name: gitlab
        imagePullPolicy: IfNotPresent
        env:
        - name: "GITLAB_OMNIBUS_CONFIG"
          value: "external_url 'https://gitlab.dsj.com'"			##git外部访问域名
        # 应用的内部端口
        ports:
        - containerPort: 443
          name: gitlab443
        - containerPort: 80
          name: gitlab80
        - containerPort: 22
          name: gitlab22
        volumeMounts:
        - name: gitlab-persistent-data
          mountPath: /var/opt/gitlab
        - name: gitlab-persistent-log
          mountPath: /var/log/gitlab
        - name: gitlab-persistent-conf
          mountPath: /etc/gitlab
        - name: localtime
          mountPath: /etc/localtime
      volumes:
      - name: gitlab-persistent-data
        persistentVolumeClaim:
          claimName: gitlab-pvc-data
      - name: gitlab-persistent-log
        persistentVolumeClaim:
          claimName: gitlab-pvc-log
      - name: gitlab-persistent-conf
        persistentVolumeClaim:
          claimName: gitlab-pvc-conf
      - name: localtime							##挂载时间同步文件
        hostPath:
          path: /etc/localtime
  1. 创建gitlab服务的service服务访问文件
[root@k8s-master-1 gitlab]# cat service.yaml
apiVersion: v1
kind: Service
metadata:
  name: gitlab
  namespace: ops
spec:
  type: NodePort
  ports:
  # Port上的映射端口
  - port: 443
    targetPort: 443
    nodePort: 31443				##映射k8s集群外部访问端口
    name: gitlab443
#  - port: 80
#    targetPort: 80
#    name: gitlab80
#    nodePort: 30080
  - port: 22
    targetPort: 22
    name: gitlab22
  selector:
    app: gitlab
  1. 部署gitlab
[root@k8s-master-1 gitlab]# kubectl apply -f pvc.yaml
[root@k8s-master-1 gitlab]# kubectl apply -f gitlab.yaml
[root@k8s-master-1 gitlab]# kubectl apply -f service.yaml
  1. 验证部署
  • 创建gitlab后可能会遇到启动失败重启的问题,这里多等待一段时间就好
[root@k8s-master-1 gitlab]# kubectl get pod -n ops
NAME                      READY   STATUS    RESTARTS      AGE
gitlab-8668fc99b8-bs74c   1/1     Running   7 (53d ago)   53d
  • 查看gitlab的root账户的初始化密码
[root@k8s-master-1 gitlab]# kubectl exec -it gitlab-8668fc99b8-bs74c -n ops /bin/bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
root@gitlab-8668fc99b8-bs74c:/# cat /etc/gitlab/initial_root_password
# WARNING: This value is valid only in the following conditions
#          1. If provided manually (either via `GITLAB_ROOT_PASSWORD` environment variable or via `gitlab_rails['initial_root_password']` setting in `gitlab.rb`, it was provided before database was seeded for the first time (usually, the first reconfigure run).
#          2. Password hasn't been changed manually, either via UI or via command line.
#
#          If the password shown here doesn't work, you must reset the admin password following https://docs.gitlab.com/ee/security/reset_user_password.html#reset-your-root-password.

Password: JWZo7uqNTxISEqJRaSGqTYsRIoodUYOeD+G4yMM+Las=

# NOTE: This file will be automatically deleted in the first reconfigure run after 24 hours.

  • 登录gitlab

使用浏览器访问https://任意节点ip+31443,输入账户密码即可(账户root,密码为上方初始化密码)

Flink

Flink使用Helm部署方案,参考:https://artifacthub.io/packages/helm/riskfocus/flink

  1. 在联网的机器上配置helm源,并下载离线包
[root@master-2 ~]# helm repo add riskfocus https://riskfocus.github.io/helm-charts-public/
"riskfocus" has been added to your repositories

[root@master-2 ~]# helm pull riskfocus/flink --version 0.2.0
flink-0.2.0.tgz
  1. 解压,修改默认配置
  • 修改为如下配置
[root@k8s-master-1 yaml]# cd flink
[root@k8s-master-1 flink]# vim values.yaml
# Default values for flink.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.

nameOverride: ""
fullnameOverride: ""

image:
  repository: dockerhub.dsj.com:18443/library/flink							###修改镜像为私有镜像tag
  tag: 1.14.4-scala_2.12
  pullPolicy: Always														###拉取镜像策略,每次启动都拉取
imagePullSecrets: []

# Pod Security Context
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
securityContext: {}
# securityContext:
#   fsGroup: 1000
#   runAsUser: 1000
#   runAsNonRoot: true

# For general configuration
flink:
  # logging, log4j configuration copied from Flink distribution
  logging:
    log4j_properties: |+
      # This affects logging for both user code and Flink
      rootLogger.level = INFO
      rootLogger.appenderRef.console.ref = ConsoleAppender
      rootLogger.appenderRef.rolling.ref = RollingFileAppender

      # Uncomment this if you want to _only_ change Flink's logging
      #logger.flink.name = org.apache.flink
      #logger.flink.level = INFO

      # The following lines keep the log level of common libraries/connectors on
      # log level INFO. The root logger does not override this. You have to manually
      # change the log levels here.
      logger.akka.name = akka
      logger.akka.level = INFO
      logger.kafka.name= org.apache.kafka
      logger.kafka.level = INFO
      logger.hadoop.name = org.apache.hadoop
      logger.hadoop.level = INFO
      logger.zookeeper.name = org.apache.zookeeper
      logger.zookeeper.level = INFO

      # Log all infos to the console
      appender.console.name = ConsoleAppender
      appender.console.type = CONSOLE
      appender.console.layout.type = PatternLayout
      appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n

      # Log all infos in the given rolling file
      appender.rolling.name = RollingFileAppender
      appender.rolling.type = RollingFile
      appender.rolling.append = false
      appender.rolling.fileName = ${sys:log.file}
      appender.rolling.filePattern = ${sys:log.file}.%i
      appender.rolling.layout.type = PatternLayout
      appender.rolling.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
      appender.rolling.policies.type = Policies
      appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
      appender.rolling.policies.size.size=100MB
      appender.rolling.strategy.type = DefaultRolloverStrategy
      appender.rolling.strategy.max = 10

      # Suppress the irrelevant (wrong) warnings from the Netty channel handler
      logger.netty.name = org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline
      logger.netty.level = OFF

  # monitoring is exporting metrics in Prometheus format
  monitoring:
    enabled: true
    # port for metrics
    port: 9999
    # latency monitoring
    latency:
      enabled: false
      probingInterval: 1000
    # system is additional system metrics
    system:
      enabled: true
      probingInterval: 5000
    rocksdb:
      enabled: false
  workDir: /opt/flink
  # In case of issue - Metaspace OOM increase this param according to your memory limits
  # params: |+
  #   taskmanager.memory.jvm-metaspace.size: 256mb
  params: ""
  state:
    # backend for state. Available options: filesystem, rocksdb, memory; empty - for default(memory)
    backend:
    # These values are default excludes file pathes
    # https://ci.apache.org/projects/flink/flink-docs-stable/dev/stream/state/checkpointing.html#related-config-options
    params: |+
      state.checkpoints.dir: file:///flink_state/checkpoints
      state.savepoints.dir: file:///flink_state/savepoints
      state.backend.async: true
      state.backend.fs.memory-threshold: 1024
      state.backend.fs.write-buffer-size: 4096
      state.backend.incremental: false
      state.backend.local-recovery: false
      state.checkpoints.num-retained: 1
      taskmanager.state.local.root-dirs: file:///flink_state/local-recovery
    # https://ci.apache.org/projects/flink/flink-docs-stable/ops/state/state_backends.html#rocksdb-state-backend-config-options
    # * state.backend.rocksdb.localdir doesn't have a prefix - file://
    rocksdb: |+
      state.backend.rocksdb.checkpoint.transfer.thread.num: 1
      state.backend.rocksdb.localdir: /flink_state/rocksdb
      state.backend.rocksdb.options-factory: org.apache.flink.contrib.streaming.state.DefaultConfigurableOptionsFactory
      state.backend.rocksdb.predefined-options: DEFAULT
      state.backend.rocksdb.timer-service.factory: HEAP
      state.backend.rocksdb.ttl.compaction.filter.enabled: false

# extraEnvs passes envs to both Jobmanagers and Taskmanager
# for example
# extraEnvs:
#  - name: KAFKA_BOOTSTRAP_SERVERS
#    value: dest-kafka-bootstrap:9092
#
extraEnvs: []

jobmanager:
  # Statefulset option will create Jobmanager as a StatefulSet
  statefulset: true															####将jobmanager修改为statefulset方便管理
  # Init containers
  initContainers: {}
  # Example
  #  test:
  #    image: busybox:1.28
  #    command:
  #      - /bin/sh
  #      - -c
  #      - "echo test"
  # highAvailability configuration based on zookeeper
  highAvailability:
    # enabled also will enable zookeeper Dependency
    enabled: true															###启用高可用模式,需要链接zookeeper支持
    zookeeperConnect: "{{ .Release.Name }}-zookeeper:{{ .Values.zookeeper.env.ZOO_PORT }}"											####zk链接地址,可根据实际修改
    zookeeperRootPath: /flink
    clusterId: /flink
    # storageDir for Jobmanagers. DFS expected.
    # Docs - Storage directory (required): JobManager metadata is persisted in the file system storageDir and only a pointer to this state is stored in ZooKeeper
    storageDir: /flink/ha_data												###定义JobManager的工作目录
    # syncPort is a rpc port in HA configuration
    syncPort: 6150
    # command for HA configuration
    # this trick with sed required because taskmanagers read jobmanager.rpc.address from Zookeeper.
    # For configuration with one jobmanager (it's enough stable because Kubernetes will restart Jobmanager on falures)
    # 'sed' can be changed to use flink service name, e.g. {{ include "flink.name" . }}-jobmanager
    command: >-
      sed 's/REPLACE_HOSTNAME/'${FLINK_POD_IP}'/'
      $FLINK_HOME/conf/flink-conf.yaml.tpl > $FLINK_HOME/conf/flink-conf.yaml &&
      $FLINK_HOME/bin/jobmanager.sh start-foreground;
  # Additional param for JVM to support security.properties override
  # check configMap for more information
  jvmArgs: "-Djava.security.properties={{ .Values.flink.workDir }}/conf/security.properties"
  # extraEnvs passes envs to Jobmanagers
  extraEnvs: []
  ports:
    rpc: 6123
    # blob port uses for Liveness probe
    blob: 6124
    ui: 8081
    nodeport: 30081													###新增nodeport端口配置,方便外部访问
  replicaCount: 2													###jobmanager的副本数量
  # heapSize params for Jobmanager
  # keep in mind that Flink can use offheap memory
  # e.g. in case of checkpoint usage
  heapSize: 1g
  resources: {}
  # Example
  #    limits:
  #      cpu: 3800m
  #      memory: 8000Mi
  additionalCommand: >-
    cp /opt/flink/opt/flink-s3-fs-presto-*.jar
    /opt/flink/lib/													####这里因为部署环境为离线环境,故这里将下载jar包命令注释,否则会报错。可以在有外网的环境中,启动镜像并下载jar包,然后再导入离线环境中
#    wget https://repo1.maven.org/maven2/com/github/oshi/oshi-core/3.4.0/oshi-core-3.4.0.jar
#    -O /opt/flink/lib/oshi-core-3.4.0.jar &&
#    wget https://repo1.maven.org/maven2/net/java/dev/jna/jna/5.4.0/jna-5.4.0.jar
#    -O /opt/flink/lib/jna-5.4.0.jar &&
#    wget https://repo1.maven.org/maven2/net/java/dev/jna/jna-platform/5.4.0/jna-platform-5.4.0.jar
#    -O /opt/flink/lib/jna-platform-5.4.0.jar
  command: >-
    cp $FLINK_HOME/conf/flink-conf.yaml.tpl $FLINK_HOME/conf/flink-conf.yaml &&
    $FLINK_HOME/bin/jobmanager.sh start-foreground;
  service:
    type: ClusterIP
    annotations: {}
    # rest is additional service which exposes only HTTP port
    # can be using for cases of using exposeController
    rest:
      enabled: true
      annotations: {}
    headless:
      annotations: {}
  nodeSelector: {}
  affinity: {}
  tolerations: []
  persistent:
    enabled: true
    # https://kubernetes.io/docs/concepts/storage/persistent-volumes/#class-1
    storageClass: glusterfs										###修改使用的动态存储为glusterfs
    size: 100Gi													###挂载的pvc大小改为100G
    mountPath: "/flink_state"
  podManagementPolicy: Parallel
  annotations: {}
  # Example
  #  "cluster-autoscaler.kubernetes.io/safe-to-evict": "false"
  serviceAccount:
    # Specifies whether a ServiceAccount should be created
    create: true
    # The name of the ServiceAccount to use.
    # If not set and create is true, a name is generated using the fullname template
    name:
    annotations: {}
  #livenessProbe will conduct checks for rpc port as tcpSocket probe
  livenessProbe:
    initialDelaySeconds: 10
    periodSeconds: 15
  readinessProbe:
    periodSeconds: 10
    initialDelaySeconds: 20
  podAnnotations: {}

taskmanager:
  # Statefulset option will create Taskmanager as a StatefulSet
  # A necessary option for Persistent
  statefulset: true
  # Additional param for JVM to support security.properties override
  # check configMap for more information
  jvmArgs: "-Djava.security.properties={{ .Values.flink.workDir }}/conf/security.properties"
  # extraEnvs passes envs to Taskmanagers
  extraEnvs: []
  ports:
    rpc: 6122
  replicaCount: 4
  numberOfTaskSlots: 1
  memoryProcessSize: 1g
  memoryFlinkSize:
  resources: {}
  # Example
  #    limits:
  #      cpu: 3800m
  #      memory: 8000Mi
  additionalCommand: >-
    cp /opt/flink/opt/flink-s3-fs-presto-*.jar
    /opt/flink/lib/											####这里因为部署环境为离线环境,故这里将下载jar包命令注释,否则会报错。可以在有外网的环境中,启动镜像并下载jar包,然后再导入离线环境中
#    wget https://repo1.maven.org/maven2/com/github/oshi/oshi-core/3.4.0/oshi-core-3.4.0.jar
#    -O /opt/flink/lib/oshi-core-3.4.0.jar &&
#    wget https://repo1.maven.org/maven2/net/java/dev/jna/jna/5.4.0/jna-5.4.0.jar
#    -O /opt/flink/lib/jna-5.4.0.jar &&
#    wget https://repo1.maven.org/maven2/net/java/dev/jna/jna-platform/5.4.0/jna-platform-5.4.0.jar
#    -O /opt/flink/lib/jna-platform-5.4.0.jar
  command: >-
    $FLINK_HOME/bin/taskmanager.sh start-foreground;
  service:
    type: ClusterIP
  nodeSelector: {}
  affinity: {}
  tolerations: []
  persistent:
    enabled: true
    # https://kubernetes.io/docs/concepts/storage/persistent-volumes/#class-1
    storageClass: glusterfs									###修改动态存储为glusterfs
    size: 100Gi												###修改挂载的pvc为100G
    mountPath: "/flink_state"
  podManagementPolicy: Parallel
  annotations:
    "cluster-autoscaler.kubernetes.io/safe-to-evict": "false"
  serviceAccount:
    # Specifies whether a ServiceAccount should be created
    create: true
    # The name of the ServiceAccount to use.
    # If not set and create is true, a name is generated using the fullname template
    name:
    annotations: {}
  #livenessProbe will conduct checks for rpc port as tcpSocket probe
  livenessProbe:
    initialDelaySeconds: 30
    periodSeconds: 60
  podAnnotations: {}

ingress:
  enabled: false
  annotations: {}
  path: /
  hosts: []
  tls: []

prometheus:
  # serviceMonitor provides service discovery for prometheus operatored installations
  serviceMonitor:
    enabled: false
    namespace:
    interval: 5s
    selector:
      # According to default selector for prometheus operator
      prometheus: kube-prometheus

zookeeper:
  enabled: true													###开启zookeeper,以提供高可用服务
  replicaCount: 3
  env:
    ZK_HEAP_SIZE: "1G"
    ZOO_PORT: 2181
  resources:
    limits:
      cpu: 400m
      memory: 1256Mi
  persistence:
    enabled: true

secrets:
#  Plain predefined secrets example
#  kubernetesSecrets:
#    - name: kerberos
#      mountPath: /kerberos
  bitnamiSealedSecrets:
    enabled: false
    # The encrypted raw file sealed secrets generated for example with
    # kubeseal --raw --from-file=... --controller-name sealed-secrets --scope namespace-wide
    sealedSecretFiles: {}
    # file1: encypted_file1
    # file2: encypted_file2
    sealedSecretFilesPath: /etc/sealed 
    sealedSecretFilesAnnotations:
      sealedsecrets.bitnami.com/namespace-wide: true
    # The encrypted raw env sealed secrets generated for example with
    # echo -n password | kubeseal --raw --from-file=/dev/stdin --controller-name sealed-secrets --scope namespace-wide
    sealedSecretEnvs: {}
    # env1: encypted_env1
    # env2: encypted_env2
    sealedSecretEnvsAnnotations:
      sealedsecrets.bitnami.com/namespace-wide: true


  • 修改jobmanager-service-reset的service类型为nodeport,以便外部访问
[root@k8s-master-1 flink]# cd templates/
[root@k8s-master-1 templates]# vim jobmanager-rest-service.yaml
{{ if .Values.jobmanager.service.rest.enabled }}
apiVersion: v1
kind: Service
metadata:
  name: {{ include "flink.fullname" . }}-jobmanager-rest
  labels:
{{ include "flink.labels" . | indent 4 }}
    component: jobmanager
{{- if .Values.jobmanager.service.rest.annotations }}
  annotations:
{{ toYaml .Values.jobmanager.service.rest.annotations | indent 4 }}
{{- end }}
spec:
  type: NodePort
  ports:
    - port: {{ .Values.jobmanager.ports.ui }}
      targetPort: {{ .Values.jobmanager.ports.ui }}
      protocol: TCP
      nodePort: {{ .Values.jobmanager.ports.nodeport }}
      name: ui
  selector:
    app.kubernetes.io/name: {{ include "flink.name" . }}
    app.kubernetes.io/instance: {{ .Release.Name }}
    component: jobmanager
{{- end }}


  • 修改zookeeper配置
[root@k8s-master-1 zookeeper]# vim values.yaml
.........
image:
  repository: dockerhub.dsj.com:18443/library/zookeeper     # Container image repository for zookeeper container.
  tag: 3.5.5                # Container image tag for zookeeper container.
  pullPolicy: IfNotPresent  # Image pull criteria for zookeeper container.
..........

  1. 离线镜像制作
  • 登录可连接外网的机器
[root@k8s-master-2 flink]# vim dockerfile
FROM flink:1.11.2-scala_2.12
RUN wget https://repo1.maven.org/maven2/com/github/oshi/oshi-core/3.4.0/oshi-core-3.4.0.jar -O /opt/flink/lib/oshi-core-3.4.0.jar && wget https://repo1.maven.org/maven2/net/java/dev/jna/jna/5.4.0/jna-5.4.0.jar -O /opt/flink/lib/jna-5.4.0.jar && wget https://repo1.maven.org/maven2/net/java/dev/jna/jna-platform/5.4.0/jna-platform-5.4.0.jar -O /opt/flink/lib/jna-platform-5.4.0.jar ##可以在此添加其他需要的jar包
[root@k8s-master-2 flink]# docker build -t dockerhub.dsj.com:18443/library/flink:1.14.4-scala_2.12 .
[root@k8s-master-2 flink]# docker save -o flink.tar dockerhub.dsj.com:18443/library/flink:1.14.4-scala_2.12
[root@k8s-master-2 flink]# docker pull zookeeper:3.5.5
[root@k8s-master-2 flink]# docker save -o zookeeper.tar zookeeper:3.5.5

  • 上传至离线环境
#导入镜像
[root@k8s-master-1 flink]# docker load -i flink.tar
[root@k8s-master-1 flink]# docker load -i zookeeper.tar

#将镜像推送至镜像仓库
[root@k8s-master-1 flink]# docker push dockerhub.dsj.com:18443/library/flink:1.14.4-scala_2.12
[root@k8s-master-1 flink]# docker tag zookeeper:3.5.5 dockerhub.dsj.com:18443/library/zookeeper:3.5.5

  1. 安装
#指定安装的flink命名空间,需要事先进入helm项目目录
[root@k8s-master-1 flink]# ls
charts  Chart.yaml  OWNERS  README.md  requirements.lock  requirements.yaml  templates  values.yaml
[root@k8s-master-1 flink]# pwd
/root/yaml/flink

[root@k8s-master-1 flink]# helm install --name flink --namespace flink .

#查看已安装的helm项目
[root@k8s-master-1 trino-1.14.0]# helm list
NAME                    NAMESPACE       REVISION        UPDATED                                 STATUS          CHART                   APP VERSION
dolphinscheduler        default         1               2022-03-25 13:48:14.477718902 +0530 IST deployed        dolphinscheduler-2.0.3  2.0.5
dremio                  default         1               2022-03-25 13:17:22.182284837 +0530 IST deployed        dremio-2.0.2
flink                   default         1               2022-03-25 13:29:42.943273464 +0530 IST deployed        flink-0.2.0             1.11.2
trino                   default         1               2022-04-21 08:33:53.005101636 +0530 IST deployed        trino-1.14.0            375

#卸载flink
[root@k8s-master-1 flink]# helm uninstall flink

#更新helm项目
[root@k8s-master-1 flink]# helm upgrade flink .

  1. 查看部署结果
#查看部署的pod资源
[root@k8s-master-1 zookeeper]# kubectl get pod
flink-jobmanager-0                       1/1     Running   1 (60d ago)   60d
flink-jobmanager-1                       1/1     Running   1 (60d ago)   60d
flink-taskmanager-0                      1/1     Running   1 (60d ago)   60d
flink-taskmanager-1                      1/1     Running   1 (60d ago)   60d
flink-taskmanager-2                      1/1     Running   1 (60d ago)   60d
flink-taskmanager-3                      1/1     Running   1 (60d ago)   60d
flink-zookeeper-0                        1/1     Running   0             60d
flink-zookeeper-1                        1/1     Running   0             60d
flink-zookeeper-2                        1/1     Running   0             60d
#查看部署的pvc存储卷
[root@k8s-master-1 zookeeper]# kubectl get pvc
data-flink-zookeeper-0                                   Bound    pvc-5bfa1dae-c190-493d-b186-6221ae4defd5   100Gi      RWO            glusterfs      60d
data-flink-zookeeper-1                                   Bound    pvc-10281647-676c-42b5-b89b-9f127b412a7d   100Gi      RWO            glusterfs      60d
data-flink-zookeeper-2                                   Bound    pvc-462dac00-0376-4da2-8d7b-ae59f72a34b7   100Gi      RWO            glusterfs      60d
jobmanager-data-flink-jobmanager-0                       Bound    pvc-055a0f3f-0fd1-42b5-9ea8-7f384e5bbba3   100Gi      RWO            glusterfs      60d
jobmanager-data-flink-jobmanager-1                       Bound    pvc-8a16a71a-058b-4529-899c-12eb06fee4aa   100Gi      RWO            glusterfs      60d
taskmanager-data-flink-taskmanager-0                     Bound    pvc-3d34b91d-d72e-4359-a200-68f5ff0cd241   100Gi      RWO            glusterfs      60d
taskmanager-data-flink-taskmanager-1                     Bound    pvc-51820972-1d03-4a14-93eb-b2a3637629ef   100Gi      RWO            glusterfs      60d
taskmanager-data-flink-taskmanager-2                     Bound    pvc-e449b2f4-fbd2-4c4e-8e8a-2c5a2a2e515c   100Gi      RWO            glusterfs      60d
taskmanager-data-flink-taskmanager-3                     Bound    pvc-4a523a26-efb4-4756-bd93-f8074ec244f1   100Gi      RWO            glusterfs      60d
#查看部署的svc服务
[root@k8s-master-1 zookeeper]# kubectl get svc
flink-jobmanager                                         ClusterIP      10.233.28.152   <none>        6124/TCP,30081/TCP,6123/TCP,8081/TCP,6150/TCP,9999/TCP   60d
flink-jobmanager-headless                                ClusterIP      None            <none>        6124/TCP,30081/TCP,6123/TCP,8081/TCP,9999/TCP            60d
flink-jobmanager-rest                                    NodePort       10.233.4.209    <none>        8081:30081/TCP                                           60d
flink-taskmanager                                        ClusterIP      10.233.30.129   <none>        6122/TCP,9999/TCP                                        60d
flink-zookeeper                                          ClusterIP      10.233.34.82    <none>        2181/TCP                                                 60d
flink-zookeeper-headless                                 ClusterIP      None            <none>        2181/TCP,3888/TCP,2888/TCP                               60d
#查看部署的cm配置文件
[root@k8s-master-1 dolphinscheduler]# kubectl get cm
flink-config                 5      61d
flink-zookeeper              3      61d


Trino

trino使用Helm部署方案,参考:https://artifacthub.io/packages/helm/statcan/trino

  1. 在联网的机器上配置helm源,并下载离线包
[root@master-2 ~]# helm repo add valeriano-manassero https://valeriano-manassero.github.io/helm-charts
"valeriano-manassero" has been added to your repositories
[root@master-2 ~]# helm pull valeriano-manassero/trino --version 1.15.0
trino-1.15.0.tgz

  1. 解压,修改默认配置
  • 修改为如下配置
[root@master-2 ~]# tar xf trino-1.15.0.tgz
[root@master-2 trino]# ls
Chart.yaml  LICENSE  README.md  templates  values.yaml
[root@master-2 trino]# vim values.yaml
image:
  repository: dockerhub.dsj.com:18443/library/trino					###修改镜像为私有仓库tag
  tag: 375
  pullPolicy: Always												###修改镜像拉取策略为每次启动都拉取
  securityContext:
    runAsUser: 1000
    runAsGroup: 1000

imagePullSecrets: []
# For example:
# imagePullSecrets:
#   - name: registry-credentials

env: []
# Both worker & coordinator can use environment variables to expose information about itself to Containers running in the Pod

ingress:
  enabled: false
  annotations: {}
  host: ""
  tls:
    secretName: ""

server:
  workers: 5
  node:
    environment: production
    dataDir: /data/trino
    pluginDir: /usr/lib/trino/plugin
  log:
    trino:
      level: INFO
  config:
    path: /etc/trino
    http:
      port: 8080
    processForwarded: false
    # Trino supports multiple authentication types: PASSWORD, CERTIFICATE, OAUTH2, JWT, KERBEROS
    # For more info: https://trino.io/docs/current/security/authentication-types.html
    # authenticationType: "PASSWORD"
    httpsServer:
      enabled: false
      port: 8443
      keystore:
        path: "/usr/local/certs/clustercoord.pem"
        # JKS keystores always require a password, while PEM format certificates can optionally require a password
        key: ""
    query:
      maxMemory: "3GB"
      maxMemoryPerNode: "1GB"
      maxTotalMemory: "6GB"
      maxTotalMemoryPerNode: "2GB"
    prestoCompatibleHeader: false
  workerExtraConfig: ""
  coordinatorExtraConfig: ""
  jvm:
    maxHeapSize: "7G"
    gcMethod:
      type: "UseG1GC"
      g1:
        heapRegionSize: "32M"
  autoscaler:
    enabled: false
    maxReplicas: 5
    targetCPUUtilizationPercentage: 50

initContainers: {}
  # coordinator:
  #   - name: init-coordinator
  #     image: busybox:1.28
  #     imagePullPolicy: IfNotPresent
  #     command: ['sh', '-c', "until nslookup myservice.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; do echo waiting for myservice; sleep 2; done"]
  # worker:
  #   - name: init-worker
  #     image: busybox:1.28
  #     command: ['sh', '-c', 'echo The worker is running! && sleep 3600']

auth: {}
  # Set username and password
  # https://trino.io/docs/current/security/password-file.html#file-format
  # passwordAuth: "username:encrypted-password-with-htpasswd"

accessControl:
  # # Supported types: pvc or configmap
  #type: pvc
  # refreshPeriod: 1s
  # # Rules file is mounted to /etc/trino/access-control
  # configFile: "/access-control/rules.json"
  # # If you use pvc as the type, you have to specify the pvcName field:
  #pvcName: access-control-pvc
  #pvcStorage: 50G
  #pvcStorageClassName: glusterfs
  # # If you use pvc as the type, you can specify the name of the volume with the pvcVolumeName:
  # pvcVolumeName: access-control-pvc-volume
  # # If you use configmap as the type, you have to specify the rules field:
  # rules:
  #   rules.json: |-
  #     {
  #       "catalogs": [
  #         {
  #           "user": "admin",
  #           "catalog": "(mysql|system)",
  #           "allow": "all"
  #         },
  #         {
  #           "group": "finance|human_resources",
  #           "catalog": "postgres",
  #           "allow": true
  #         },
  #         {
  #           "catalog": "hive",
  #           "allow": "all"
  #         },
  #         {
  #           "user": "alice",
  #           "catalog": "postgresql",
  #           "allow": "read-only"
  #         },
  #         {
  #           "catalog": "system",
  #           "allow": "none"
  #         }
  #       ],
  #       "schemas": [
  #         {
  #           "user": "admin",
  #           "schema": ".*",
  #           "owner": true
  #         },
  #         {
  #           "user": "guest",
  #           "owner": false
  #         },
  #         {
  #           "catalog": "default",
  #           "schema": "default",
  #           "owner": true
  #         }
  #       ]
  #     }

resourceGroups:
  # # Supported types: pvc or configmap
  # type: pvc
  # # Rules file is mounted to /etc/trino/resource-groups
  # configFile: "/resource-groups/resource-groups.json"
  # # If you use pvc as the type, you have to specify the pvcName field:
  # pvcName: resource-groups-pvc
  # pvcStorage: 50G
  #pvcStorageClassName: glusterfs
  # # If you use pvc as the type, you can specify the name of the volume with the pvcVolumeName:
  # pvcVolumeName:
  # # If you use configmap as the type, you have to specify the rules field:
  # rules:
  #   resource-groups.json: |-
  #     {
  #       "rootGroups": [
  #         {
  #           "name": "global",
  #           "softMemoryLimit": "80%",
  #           "hardConcurrencyLimit": 100,
  #           "maxQueued": 1000,
  #           "schedulingPolicy": "weighted",
  #           "jmxExport": true,
  #           "subGroups": [
  #             {
  #               "name": "data_definition",
  #               "softMemoryLimit": "10%",
  #               "hardConcurrencyLimit": 5,
  #               "maxQueued": 100,
  #               "schedulingWeight": 1
  #             },
  #             {
  #               "name": "adhoc",
  #               "softMemoryLimit": "10%",
  #               "hardConcurrencyLimit": 50,
  #               "maxQueued": 1,
  #               "schedulingWeight": 10,
  #               "subGroups": [
  #                 {
  #                   "name": "other",
  #                   "softMemoryLimit": "10%",
  #                   "hardConcurrencyLimit": 2,
  #                   "maxQueued": 1,
  #                   "schedulingWeight": 10,
  #                   "schedulingPolicy": "weighted_fair",
  #                   "subGroups": [
  #                     {
  #                       "name": "${USER}",
  #                       "softMemoryLimit": "10%",
  #                       "hardConcurrencyLimit": 1,
  #                       "maxQueued": 100
  #                     }
  #                   ]
  #                 },
  #                 {
  #                   "name": "bi-${toolname}",
  #                   "softMemoryLimit": "10%",
  #                   "hardConcurrencyLimit": 10,
  #                   "maxQueued": 100,
  #                   "schedulingWeight": 10,
  #                   "schedulingPolicy": "weighted_fair",
  #                   "subGroups": [
  #                     {
  #                       "name": "${USER}",
  #                       "softMemoryLimit": "10%",
  #                       "hardConcurrencyLimit": 3,
  #                       "maxQueued": 10
  #                     }
  #                   ]
  #                 }
  #               ]
  #             },
  #             {
  #               "name": "pipeline",
  #               "softMemoryLimit": "80%",
  #               "hardConcurrencyLimit": 45,
  #               "maxQueued": 100,
  #               "schedulingWeight": 1,
  #               "jmxExport": true,
  #               "subGroups": [
  #                 {
  #                   "name": "pipeline_${USER}",
  #                   "softMemoryLimit": "50%",
  #                   "hardConcurrencyLimit": 5,
  #                   "maxQueued": 100
  #                 }
  #               ]
  #             }
  #           ]
  #         },
  #         {
  #           "name": "admin",
  #           "softMemoryLimit": "100%",
  #           "hardConcurrencyLimit": 50,
  #           "maxQueued": 100,
  #           "schedulingPolicy": "query_priority",
  #           "jmxExport": true
  #         }
  #       ],
  #       "selectors": [
  #         {
  #           "user": "bob",
  #           "group": "admin"
  #         },
  #         {
  #           "userGroup": "admin",
  #           "group": "admin"
  #         },
  #         {
  #           "source": ".*pipeline.*",
  #           "queryType": "DATA_DEFINITION",
  #           "group": "global.data_definition"
  #         },
  #         {
  #           "source": ".*pipeline.*",
  #           "group": "global.pipeline.pipeline_${USER}"
  #         },
  #         {
  #           "source": "jdbc#(?<toolname>.*)",
  #           "clientTags": ["hipri"],
  #           "group": "global.adhoc.bi-${toolname}.${USER}"
  #         },
  #         {
  #           "group": "global.adhoc.other.${USER}"
  #         }
  #       ],
  #       "cpuQuotaPeriod": "1h"
  #     }

# If you want to provide your own secrets resource, you can use this field:
# connectorsSecret:

connectors: {}
  # Connectors configuration usually contains sensitive data (like passwords, usernames, ...)
  # so data is stored in a secret
  # mysql.properties: |-
  #   connector.name=mysql
  #   connection-url=jdbc:mysql://mysqlserver:3306
  #   connection-user=mysqluser
  #   connection-password=mysqlpassword
  # elk.properties: |-
  #   connector.name=elasticsearch
  #   elasticsearch.host=elasticsearchserver
  #   elasticsearch.port=9200
  #   elasticsearch.default-schema-name=default
  #   elasticsearch.security=PASSWORD
  #   elasticsearch.auth.user=elastiuser
  #   elasticsearch.auth.password=elasticpassword
  #   elasticsearch.tls.enabled=true

schemas: {}
  # Custom schemas that will be mounted in /etc/trino/schemas
  # testschema.json: |-
  #   {
  #     "tableName": "testtable",
  #     "schemaName": "testschema",
  #     "topicName": "testtopic",
  #     "key": {
  #         "dataFormat": "json",
  #         "fields": [
  #             {
  #                 "name": "_key",
  #                 "dataFormat": "VARCHAR",
  #                 "type": "VARCHAR",
  #                 "hidden": "false"
  #             }
  #         ]
  #     },
  #     "message": {
  #         "dataFormat": "json",
  #         "fields": [
  #             {
  #                 "name": "id",
  #                 "mapping": "id",
  #                 "type": "BIGINT"
  #             },
  #             {
  #                 "name": "test_field",
  #                 "mapping": "test_field",
  #                 "type": "VARCHAR"
  #             }
  #         ]
  #     }
  #   }

service:
  type: NodePort							####修改端口访问类型为nodeprot

resources: {}
  # We usually recommend not to specify default resources and to leave this as a conscious
  # choice for the user. This also increases chances charts run on environments with little
  # resources, such as Minikube. If you do want to specify resources, uncomment the following
  # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
  # limits:
  #  cpu: 100m
  #  memory: 128Mi
  # requests:
  #  cpu: 100m
  #  memory: 128Mi

nodeSelector: {}

tolerations: []

affinity: {}

secretMounts: []
  # - name: ssl-cert
  #   secretName: ssl-cert
  #   path: /usr/local/certs/

serviceAccount:
  # Specifies whether a service account should be created
  create: true
  # Annotations to add to the service account
  annotations: {}
  # The name of the service account to use.
  # If not set and create is true, a name is generated using the fullname template
  name: ""

podAnnotations: {}


  1. 离线镜像制作
#在联网的机器上下载
[root@master-2 ~]# docker pull trinodb/trino:378
#打包镜像
[root@master-2 ~]# docker save -o trino.tar trinodb/trino:378


  • 将离线包上传到离线环境机器
#导入镜像
[root@k8s-master-1 ~]# docker load -i trino.tar
#上传镜像
[root@k8s-master-1 ~]# docker tag trinodb/trino:378 dockerhub.dsj.com:18443/library/trino:375
[root@k8s-master-1 ~]# docker push dockerhub.dsj.com:18443/library/trino:375

  1. 部署安装
[root@k8s-master-1 trino-1.14.0]# pwd
/root/yaml/trino-1.14.0
[root@k8s-master-1 trino-1.14.0]# ls
Chart.yaml  README.md  templates  values.yaml
[root@k8s-master-1 trino-1.14.0]# helm install --name trino --namespace trino . 

#查看已安装的项目
[root@k8s-master-1 trino-1.14.0]# helm list
NAME                    NAMESPACE       REVISION        UPDATED                                 STATUS          CHART                   APP VERSION
dolphinscheduler        default         1               2022-03-25 13:48:14.477718902 +0530 IST deployed        dolphinscheduler-2.0.3  2.0.5
dremio                  default         1               2022-03-25 13:17:22.182284837 +0530 IST deployed        dremio-2.0.2
flink                   default         2               2022-05-25 06:07:20.941169711 +0530 IST deployed        flink-0.2.0             1.11.2
trino                   default         1               2022-04-21 08:33:53.005101636 +0530 IST deployed        trino-1.14.0            375

#卸载trino
[root@k8s-master-1 trino-1.14.0]# helm uninstall trino

#更新helm项目
[root@k8s-master-1 trino-1.14.0]# helm upgrade trino .


  1. 查看部署结果
#查看部署的pod
[root@k8s-master-1 trino-1.14.0]# kubectl get pod
NAME                                     READY   STATUS    RESTARTS      AGE
trino-coordinator-7fbbcf646c-kxjqx       1/1     Running   0             34d
trino-worker-777c758c6b-2lq2j            1/1     Running   0             7d5h
trino-worker-777c758c6b-2tw6c            1/1     Running   0             7d5h
trino-worker-777c758c6b-g55gh            1/1     Running   0             7d5h
trino-worker-777c758c6b-gj5jc            1/1     Running   0             7d5h
trino-worker-777c758c6b-rnxw4            1/1     Running   0             7d5h
#查看部署的svc
[root@k8s-master-1 trino-1.14.0]# kubectl get svc
trino                                                    NodePort       10.233.43.226   <none>        8080:30653/TCP                                           34d

#查看部署的comfigmap
[root@k8s-master-1 dolphinscheduler]# kubectl get cm
trino-coordinator            4      34d
trino-worker                 4      34d
trino-worker-health-check    1      34d


dremio

dremio使用Helm部署方案,参考:https://artifacthub.io/packages/helm/statcan/dremio

  1. 在联网的机器上配置helm源,并下载离线包
#添加helm源
[root@master-2 ~]# helm repo add statcan https://statcan.github.io/charts
#下载dremio离线包
[root@master-2 ~]# helm pull statcan/dremio --version 2.0.2
dremio-2.0.2.tgz

  1. 修改配置
[root@master-2 ~]# cd dremio
[root@master-2 dremio]# ls
Chart.yaml  config  docs  LICENSE.md  README.md  templates  values.yaml

[root@master-2 dremio]# vi values.yaml
# The Dremio image used in the cluster.
#
# It is *highly* recommended to update the version tag to
# the version that you are using. This will ensure that all
# the pods are using the same version of the software.
#
# Using latest will cause Dremio to potentially upgrade versions
# automatically during redeployments and may negatively impact
# the cluster.
image: dockerhub.dsj.com:18443/library/dremio-oss								###修改镜像tag为私有仓库镜像
imageTag: latest

# Annotations, labels, node selectors, and tolerations
#
# annotations: Annotations are applied to the StatefulSets that are deployed.
# podAnnotations: Pod annotations are applied to the pods that are deployed.
# labels: Labels operate much like annotations.
# podLabels: Labels that are applied to the pods that are deployed.
# nodeSelector: Target pods to nodes based on labels set on the nodes. For more
#   information, see https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodese
# tolerations: Tolerations allow the negation of taints that have been applied to some set of
#   in the Kubernetes cluster so that pods can be scheduled on those tainted nodes.
annotations: {}
podAnnotations: {}
labels: {}
podLabels: {}
nodeSelector: {}
tolerations: []

# Dremio Coordinator
coordinator:
  # CPU & Memory
  # Memory allocated to each coordinator, expressed in MB.
  # CPU allocated to each coordinator, expressed in CPU cores.
  cpu: 15
  memory: 122800

  # This count is used for slave coordinators only.
  # The total number of coordinators will always be count + 1.
  count: 0

  # Coordinator data volume size (applies to the master coordinator only).
  # In most managed Kubernetes environments (AKS, GKE, etc.), the size of the disk has a direc
  # the provisioned and maximum performance of the disk.
  volumeSize: 128Gi

  # Kubernetes Service Account
  # Uncomment below to use a custom Kubernetes service account for the coordinator.
  #serviceAccount: ""

  # Uncomment the lines below to use a custom set of extra startup parameters for the coordina
  #extraStartParams: >-
  #  -DsomeKey=someValue

  # Extra Init Containers
  # Uncomment the below lines to use a custom set of extra init containers for the coordinator
  #extraInitContainers: |
  #  - name: extra-init-container
  #    image: {{ $.Values.image }}:{{ $.Values.imageTag }}
  #    command: ["echo", "Hello World"]

  # Extra Volumes
  # Uncomment below to use a custom set of extra volumes for the coordinator.
  #extraVolumes: []

  # Extra Volume Mounts
  # Uncomment below to use a custom set of extra volume mounts for the coordinator.
  #extraVolumeMounts: []

  # Uncomment this value to use a different storage class for the coordinator.
  #storageClass:

  # These values, when defined, override the provided shared annotations, labels, node selecto
  # Uncomment only if you are trying to override the chart's shared values.
  #annotations: {}
  #podAnnotations: {}
  #labels: {}
  #podLabels: {}
  #nodeSelector: {}
  #tolerations: []

  # Web UI
  web:
    port: 9047
    tls:
      # To enable TLS for the web UI, set the enabled flag to true and provide
      # the appropriate Kubernetes TLS secret.
      enabled: false

      # To create a TLS secret, use the following command:
      # kubectl create secret tls ${TLS_SECRET_NAME} --key ${KEY_FILE} --cert ${CERT_FILE}
      secret: dremio-tls-secret-ui

  # ODBC/JDBC Client
  client:
    port: 31010
    tls:
      # To enable TLS for the client endpoints, set the enabled flag to
      # true and provide the appropriate Kubernetes TLS secret. Client
      # endpoint encryption is available only on Dremio Enterprise
      # Edition and should not be enabled otherwise.
      enabled: false

      # To create a TLS secret, use the following command:
      # kubectl create secret tls ${TLS_SECRET_NAME} --key ${KEY_FILE} --cert ${CERT_FILE}
      secret: dremio-tls-secret-client

  # Flight Client
  flight:
    port: 32010
    tls:
      # To enable TLS for the Flight endpoints, set the enabled flag to
      # true and provide the appropriate Kubernetes TLS secret.
      enabled: false

      # To create a TLS secret, use the following command:
      # kubectl create secret tls ${TLS_SECRET_NAME} --key ${KEY_FILE} --cert ${CERT_FILE}
      secret: dremio-tls-secret-flight

# Dremio Executor
executor:
  # CPU & Memory
  # Memory allocated to each executor, expressed in MB.
  # CPU allocated to each executor, expressed in CPU cores.
  cpu: 15
  memory: 122800

  # Engines
  # Engine names be 47 characters or less and be lowercase alphanumber characters or '-'.
  # Note: The number of executor pods will be the length of the array below * count.
  engines: ["default"]
  count: 3

  # Executor volume size.
  volumeSize: 128Gi

  # Kubernetes Service Account
  # Uncomment below to use a custom Kubernetes service account for executors.
  #serviceAccount: ""

  # Uncomment the lines below to use a custom set of extra startup parameters for executors.
  #extraStartParams: >-
  #  -DsomeKey=someValue

  # Extra Init Containers
  # Uncomment the below lines to use a custom set of extra init containers for executors.
  #extraInitContainers: |
  #  - name: extra-init-container
  #    image: {{ $.Values.image }}:{{ $.Values.imageTag }}
  #    command: ["echo", "Hello World"]

  # Extra Volumes
  # Uncomment below to use a custom set of extra volumes for executors.
  #extraVolumes: []

  # Extra Volume Mounts
  # Uncomment below to use a custom set of extra volume mounts for executors.
  #extraVolumeMounts: []

  # Uncomment this value to use a different storage class for executors.
  #storageClass:

  # Dremio C3
  # Designed for use with NVMe storage devices, performance may be impacted when using
  # persistent volume storage that resides far from the physical node.
  cloudCache:
    enabled: true

    # Uncomment this value to use a different storage class for C3.
    #storageClass:

    # Volumes to use for C3, specify multiple volumes if there are more than one local
    # NVMe disk that you would like to use for C3.
    #
    # The below example shows all valid options that can be provided for a volume.
    # volumes:
    # - name: "dremio-default-c3"
    #   size: 100Gi
    #   storageClass: "local-nvme"
    volumes:
    - size: 100Gi

  # These values, when defined and not empty, override the provided shared annotations, labels
  # Uncomment only if you are trying to override the chart's shared values.
  #annotations: {}
  #podAnnotations: {}
  #labels: {}
  #podLabels: {}
  #nodeSelector: {}
  #tolerations: []

  # Engine Overrides
  #
  # The settings above are overridable on a per-engine basis. These
  # values here will take precedence and *override* the configured values
  # on a per-engine basis. Engine overrides are matched with the name in the above
  # list of engines.
  #
  # Special per-engine parameters:
  # volumeClaimName: For each engine, you can optionally specify a value for the volume claim
  #   this value must be unique to each engine or may cause unintended consequences. This valu
  #   primarily intended for transitioning an existing single engine to a multi-engine configu
  #   where there may already have been existing persistent volumes.
  #
  # The below example shows all valid options that can be overridden on a per-engine basis.
  # engineOverride:
  #   engineNameHere:
  #     cpu: 1
  #     memory: 122800
  #
  #     count: 1
  #
  #     annotations: {}
  #     podAnnotations: {}
  #     labels: {}
  #     podLabels: {}
  #     nodeSelector: {}
  #     tolerations: []
  #
  #     serviceAccount: ""
  #
  #     extraStartParams: >-
  #       -DsomeCustomKey=someCustomValue
  #
  #     extraInitContainers: |
  #       - name: extra-init-container
  #         image: {{ $.Values.image }}:{{ $.Values.imageTag }}
  #         command: ["echo", "Hello World"]
  #
  #
  #     extraVolumes: []
  #     extraVolumeMounts: []
  #
  #     volumeSize: 50Gi
  #     storageClass: managed-premium
  #     volumeClaimName: dremio-default-executor-volume
  #
  #     cloudCache:
  #       enabled: true
  #
  #       storageClass: ""
  #
  #       volumes:
  #       - name: "default-c3"
  #         size: 100Gi
  #         storageClass: ""

# Zookeeper
zookeeper:
  # The Zookeeper image used in the cluster.
  image: dockerhub.dsj.com:18443/library/kubernetes-zookeeper				###修改镜像tag为私有仓库镜像
  imageTag: 1.0-3.4.10

  # CPU & Memory
  # Memory allocated to each zookeeper, expressed in MB.
  # CPU allocated to each zookeeper, expressed in CPU cores.
  cpu: 0.5
  memory: 1024
  count: 3

  volumeSize: 50Gi									###修改zk存储卷大小

  # Kubernetes Service Account
  # Uncomment below to use a custom Kubernetes service account for Zookeeper.
  #serviceAccount: ""

  # Uncomment this value to use a different storage class for Zookeeper.
  #storageClass:

  # These values, when defined, override the provided shared annotations, labels, node selecto
  # Uncomment only if you are trying to override the chart's shared values.
  #annotations: {}
  #podAnnotations: {}
  #labels: {}
  #podLabels: {}
  #nodeSelector: {}
  #tolerations: []

# Control where uploaded files are stored for Dremio.
# For more information, see https://docs.dremio.com/deployment/distributed-storage.html
distStorage:
  # The supported distributed storage types are: local, aws, azure, gcp, or azureStorage.
  #
  # local: Not recommended for production use. When using local, dist-caching is disabled.
  # aws: AWS S3, additional parameters required, see "aws" section.
  # azure: ADLS Gen 1, additional parameters required, see "azure" section.
  # azureStorage: Azure Storage Gen2, additional paramters required, see "azureStorage" sectio
  # gcp: Google Cloud Storage, additional parameters required, see "gcp" section.
  type: "local"

  # Google Cloud Storage
  #
  # bucketName: The name of the GCS bucket for distributed storage.
  # path: The path, relative to the bucket, to create Dremio's directories.
  # authentication: Valid types are: serviceAccountKeys or auto.
  #   - When using "auto" authentication, Dremio uses Google Application Default Credentials t
  #     authenticate. This is platform dependent and may not be available in all Kubernetes cl
  #   - Note: When using a GCS bucket on GKE, we recommend enabling Workload Identity and conf
  #       a Kubernetes Service Accountfor Dremio with an associated workload identity that
  #       has access to the GCS bucket.
  # credentials: If using serviceAccountKeys authentication, uncomment the credentials section
  gcp:
    bucketName: "GCS Bucket Name"
    path: "/"
    authentication: "auto"

    # If using serviceAccountKeys, uncomment the section below, referencing the values from
    # the service account credentials JSON file that you generated:
    #
    #credentials:
    #  projectId: GCP Project ID that the Google Cloud Storage bucket belongs to.
    #  clientId: Client ID for the service account that has access to Google Cloud Storage buc
    #  clientEmail: Email for the service account that has access to Google Cloud Storage buck
    #  privateKeyId: Private key ID for the service account that has access to Google Cloud St
    #  privateKey: |-
    #    -----BEGIN PRIVATE KEY-----\n Replace me with full private key value. \n-----END PRIV

    # Extra Properties
    # Use the extra properties block to provide additional parameters to configure the distrib
    # storage in the generated core-site.xml file.
    #
    #extraProperties: |
    #  <property>
    #    <name></name>
    #    <value></value>
    #  </property>

  # AWS S3
  # For more details of S3 configuration, see https://docs.dremio.com/deployment/dist-store-co
  #
  # bucketName: The name of the S3 bucket for distributed storage.
  # path: The path, relative to the bucket, to create Dremio's directories.
  # authentication: Valid types are: accessKeySecret, instanceMetadata, or awsProfile.
  #   - Note: Instance metadata is only supported in AWS EKS and requires that the
  #       EKS worker node IAM role is configured with sufficient access rights. At this time,
  #       Dremio does not support using an K8s service account based IAM role.
  # credentials: If using accessKeySecret authentication, uncomment the credentials section be
  aws:
    bucketName: "AWS Bucket Name"
    path: "/"
    authentication: "metadata"
    # If using accessKeySecret for authentication against S3, uncomment the lines below and us
    # to configure the appropriate credentials.
    #
    #credentials:
    #  accessKey: "AWS Access Key"
    #  secret: "AWS Secret"
    #
    # If using awsProfile for authentication against S3, uncomment the lines below and use the
    # to choose the appropriate profile.
    #
    #credentials:
    #  awsProfileName: "default"
    #
    # Extra Properties
    # Use the extra properties block to provide additional parameters to configure the distrib
    # storage in the generated core-site.xml file.
    #
    #extraProperties: |
    #  <property>
    #    <name></name>
    #    <value></value>
    #  </property>

  # Azure ADLS Gen 1
  # For more details of Azure ADLS Gen 1 storage configuration, see
  # https://docs.dremio.com/deployment/dist-store-config.html#azure-data-lake-storage-gen1
  #
  # datalakeStoreName: The ADLS Gen 1
  azure:
    datalakeStoreName: "Azure DataLake Store Name"
    path: "/"
    credentials:
      applicationId: "Azure Application ID"
      secret: "Azure Application Secret"
      oauth2Endpoint: "Azure OAuth2 Endpoint"

    # Extra Properties
    # Use the extra properties block to provide additional parameters to configure the distrib
    # storage in the generated core-site.xml file.
    #
    #extraProperties: |
    #  <property>
    #    <name></name>
    #    <value></value>
    #  </property>

  # Azure Storage Gen2
  # For more details of Azure Storage Gen2 storage configuration, see
  # https://docs.dremio.com/deployment/dist-store-config.html#azure-storage
  #
  # accountName: The name of the storage account.
  # filesystem: The name of the blob container to use within the storage account.
  # path: The path, relative to the filesystem, to create Dremio's directories.
  # credentials:
  azureStorage:
    accountName: "Azure Storage Account Name"
    filesystem: "Azure Storage Account Blob Container"
    path: "/"
    credentials:
      accessKey: "Azure Storage Account Access Key"

    # Extra Properties
    # Use the extra properties block to provide additional parameters to configure the distrib
    # storage in the generated core-site.xml file.
    #
    #extraProperties: |
    #  <property>
    #    <name></name>
    #    <value></value>
    #  </property>

# Dremio Start Parameters
# Uncomment the below lines to provide extra start paramaters to be passed directly to Dremio
#extraStartParams: >-
#  -DsomeKey=someValue

# Extra Init Containers
# Uncomment the below lines to provide extra init containers to be run first.
#extraInitContainers: |
#  - name: extra-init-container
#    image: {{ $.Values.image }}:{{ $.Values.imageTag }}
#    command: ["echo", "Hello World"]

# Kubernetes Service Account
# Uncomment the below line to provide a Kubernetes service account that Dremio should run with
#serviceAccount: ""

# Extra Volumes
# Array to add extra volumes to all Dremio resources.
extraVolumes: []

# Extra Volume Mounts
# Array to add extra volume mounts to all Dremio resources, normally used in conjunction wtih
extraVolumeMounts: []

# Dremio Service
# The dremio-client service exposes the service for access outside of the Kubernetes cluster.
service:
  type: LoadBalancer

  # These values, when defined and not empty, override the provided shared annotations and lab
  # Uncomment only if you are trying to override the chart's shared values.
  #annotations: {}
  #labels: {}

  # If the loadBalancer supports sessionAffinity and you have more than one coordinator,
  # uncomment the below line to enable session affinity.
  #sessionAffinity: ClientIP

  # Enable the following flag if you wish to route traffic through a shared VPC
  # for the LoadBalancer's external IP.
  # The chart is setup for internal IP support for AKS, EKS, GKE.
  # For more information, see https://kubernetes.io/docs/concepts/services-networking/service/
  #internalLoadBalancer: true

  # If you have a static IP allocated for your load balancer, uncomment the following
  # line and set the IP to provide the static IP used for the load balancer.
  # Note: The service type must be set to LoadBalancer for this value to be used.
  #loadBalancerIP: 0.0.0.0

# To use custom storage class, uncomment below.
# Otherwise the default storage class configured for your K8s cluster is used.
#storageClass: managed-premium

# For private and protected docker image repository, you should store
# the credentials in a kubernetes secret and provide the secret name
# here.  For more information, see
# https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
# imagePullSecrets:
#  - secretname


  1. 离线镜像制作
#在联网的机器上下载
[root@master-2 ~]# docker pull dremio/dremio-oss
[root@master-2 ~]# docker pull k8s.gcr.io/kubernetes-zookeeper:1.0-3.4.10
#打包镜像
[root@master-2 ~]# docker save -o dremio.tar dremio/dremio-oss:latest
[root@master-2 ~]# docker save -o zookeeper-1.0-3.4.10.tar k8s.gcr.io/kubernetes-zookeeper:1.0-3.4.10

  • 将离线包上传到离线环境机器
#导入镜像
[root@k8s-master-1 ~]# docker load -i dremio.tar
[root@k8s-master-1 ~]# docker load -i zookeeper-1.0-3.4.10.tar
#上传镜像
[root@k8s-master-1 ~]# docker tag dremio/dremio-oss:latest dockerhub.dsj.com:18443/library/dremio-oss:latest
[root@k8s-master-1 ~]# docker push dockerhub.dsj.com:18443/library/dremio-oss:latest
[root@k8s-master-1 ~]# docker tag k8s.gcr.io/kubernetes-zookeeper:1.0-3.4.10 dockerhub.dsj.com:18443/library/kubernetes-zookeeper:1.0-3.4.10
[root@k8s-master-1 ~]# docker push dockerhub.dsj.com:18443/library/kubernetes-zookeeper:1.0-3.4.10

  1. 部署安装
[root@k8s-master-1 dremio]# pwd
/root/yaml/dremio
[root@k8s-master-1 dremio]# ls
Chart.yaml  config  docs  LICENSE.md  README.md  templates  values.yaml
[root@k8s-master-1 dremio]# helm install --name dremio --namespace dremio . 

#查看已安装的项目
[root@k8s-master-1 dremio]# helm list
NAME                    NAMESPACE       REVISION        UPDATED                                 STATUS          CHART                   APP VERSION
dolphinscheduler        default         1               2022-03-25 13:48:14.477718902 +0530 IST deployed        dolphinscheduler-2.0.3  2.0.5
dremio                  default         1               2022-03-25 13:17:22.182284837 +0530 IST deployed        dremio-2.0.2
flink                   default         2               2022-05-25 06:07:20.941169711 +0530 IST deployed        flink-0.2.0             1.11.2
trino                   default         1               2022-04-21 08:33:53.005101636 +0530 IST deployed        trino-1.14.0            375

#卸载dremio
[root@k8s-master-1 dremio]# helm uninstall dremio

#更新helm项目
[root@k8s-master-1 dremio]# helm upgrade dremio .


  1. 查看安装结果
[root@k8s-master-1 dremio]# kubectl get pod
NAME                                     READY   STATUS    RESTARTS      AGE
dremio-executor-0                        1/1     Running   0             16h
dremio-executor-1                        1/1     Running   0             16h
dremio-executor-2                        1/1     Running   0             16h
dremio-master-0                          1/1     Running   0             16h

[root@k8s-master-1 dremio]# kubectl get pvc
dremio-default-executor-c3-0-dremio-executor-0           Bound    pvc-fc0afdf3-a80f-4492-b3bd-04e9b1afe4ea   100Gi      RWO            glusterfs      61d
dremio-default-executor-c3-0-dremio-executor-1           Bound    pvc-193c7a04-655d-4ccb-9f43-0790a15ef825   100Gi      RWO            glusterfs      61d
dremio-default-executor-c3-0-dremio-executor-2           Bound    pvc-5766159d-6d33-4f65-ae54-3d862273a34f   100Gi      RWO            glusterfs      61d
dremio-default-executor-volume-dremio-executor-0         Bound    pvc-f9e4da84-21b1-445f-9ba0-2785fbc074e3   128Gi      RWO            glusterfs      61d
dremio-default-executor-volume-dremio-executor-1         Bound    pvc-73cbdbf7-3523-4aa5-bf76-5331990dbc82   128Gi      RWO            glusterfs      61d
dremio-default-executor-volume-dremio-executor-2         Bound    pvc-299f4ba8-8c87-4648-9137-e14b4a88c119   128Gi      RWO            glusterfs      61d
dremio-master-volume-dremio-master-0                     Bound    pvc-d9ed1bdb-eed2-4963-ae94-739019e5f10b   128Gi      RWO            glusterfs      61d

[root@k8s-master-1 dremio]# kubectl get svc
dremio-client                                            LoadBalancer   10.233.60.72    <pending>     31010:31732/TCP,9047:31867/TCP,32010:30424/TCP           16h
dremio-cluster-pod                                       ClusterIP      None            <none>        9999/TCP                                                 16h

[root@k8s-master-1 dremio]# kubectl get cm
dremio-config                6      16h
dremio-hive2-config          1      16h
dremio-hive3-config          1      16h



dolphinscheduler

dolphinscheduler使用Helm部署方案,参考:https://dolphinscheduler.apache.org/zh-cn/docs/2.0.5/user_doc/guide/installation/kubernetes.html

  1. 在联网的机器上下载离线包
#下载离线源码
[root@master-2 ~]# wget https://dlcdn.apache.org/dolphinscheduler/2.0.5/apache-dolphinscheduler-2.0.5-src.tar.gz
[root@master-2 ~]# cd apache-dolphinscheduler-2.0.5-src/docker/kubernetes/dolphinscheduler
[root@master-2 ~]# helm repo add bitnami https://charts.bitnami.com/bitnami
[root@master-2 ~]# helm dependency update .

  1. 修改配置
[root@k8s-master-1 dolphinscheduler]# vim values.yaml
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements.  See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License.  You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

# Default values for dolphinscheduler-chart.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.

timezone: "Asia/Shanghai"

image:
  repository: "dockerhub.dsj.com:18443/library/dolphinscheduler"			##修改镜像tag为私有仓库
  tag: "2.0.5"
  pullPolicy: "Always"														##修改镜像拉取策略
  pullSecret: ""

## If not exists external database, by default, Dolphinscheduler's database will use it.
postgresql:
  enabled: true
  postgresqlUsername: "root"
  postgresqlPassword: "root"
  postgresqlDatabase: "dolphinscheduler"
  persistence:
    enabled: true
    size: "100Gi"											##调整pvc存储卷大小
    storageClass: "glusterfs"								##修改动态存储为glusterfs

## If exists external database, and set postgresql.enable value to false.
## external database will be used, otherwise Dolphinscheduler's database will be used.
externalDatabase:
  type: "postgresql"
  driver: "org.postgresql.Driver"
  host: "localhost"
  port: "5432"
  username: "root"
  password: "root"
  database: "dolphinscheduler"
  params: "characterEncoding=utf8"

## If not exists external zookeeper, by default, Dolphinscheduler's zookeeper will use it.
zookeeper:
  enabled: true
  tickTime: 3000
  maxSessionTimeout: 60000
  initLimit: 300
  maxClientCnxns: 2000
  fourlwCommandsWhitelist: "srvr,ruok,wchs,cons"
  persistence:
    enabled: true
    size: "100Gi"								##调整存储卷大小
    storageClass: "glusterfs"					##修改动态存储为glusterfs
  zookeeperRoot: "/dolphinscheduler"

## If exists external zookeeper, and set zookeeper.enable value to false.
## If zookeeper.enable is false, Dolphinscheduler's zookeeper will use it.
externalZookeeper:
  zookeeperQuorum: "127.0.0.1:2181"
  zookeeperRoot: "/dolphinscheduler"

common:
  ## Configmap
  configmap:
    DOLPHINSCHEDULER_OPTS: ""
    DATA_BASEDIR_PATH: "/tmp/dolphinscheduler"
    RESOURCE_STORAGE_TYPE: "HDFS"
    RESOURCE_UPLOAD_PATH: "/dolphinscheduler"
    FS_DEFAULT_FS: "file:///"
    FS_S3A_ENDPOINT: "s3.xxx.amazonaws.com"
    FS_S3A_ACCESS_KEY: "xxxxxxx"
    FS_S3A_SECRET_KEY: "xxxxxxx"
    HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE: "false"
    JAVA_SECURITY_KRB5_CONF_PATH: "/opt/krb5.conf"
    LOGIN_USER_KEYTAB_USERNAME: "hdfs@HADOOP.COM"
    LOGIN_USER_KEYTAB_PATH: "/opt/hdfs.keytab"
    KERBEROS_EXPIRE_TIME: "2"
    HDFS_ROOT_USER: "hdfs"
    RESOURCE_MANAGER_HTTPADDRESS_PORT: "8088"
    YARN_RESOURCEMANAGER_HA_RM_IDS: ""
    YARN_APPLICATION_STATUS_ADDRESS: "http://ds1:%s/ws/v1/cluster/apps/%s"
    YARN_JOB_HISTORY_STATUS_ADDRESS: "http://ds1:19888/ws/v1/history/mapreduce/jobs/%s"
    DATASOURCE_ENCRYPTION_ENABLE: "false"
    DATASOURCE_ENCRYPTION_SALT: "!@#$%^&*"
    SUDO_ENABLE: "true"
    # dolphinscheduler env
    HADOOP_HOME: "/opt/soft/hadoop"
    HADOOP_CONF_DIR: "/opt/soft/hadoop/etc/hadoop"
    SPARK_HOME1: "/opt/soft/spark1"
    SPARK_HOME2: "/opt/soft/spark2"
    PYTHON_HOME: "/usr/bin/python"
    JAVA_HOME: "/usr/local/openjdk-8"
    HIVE_HOME: "/opt/soft/hive"
    FLINK_HOME: "/opt/soft/flink"
    DATAX_HOME: "/opt/soft/datax"
    SESSION_TIMEOUT_MS: 60000
    ORG_QUARTZ_THREADPOOL_THREADCOUNT: "25"
    ORG_QUARTZ_SCHEDULER_BATCHTRIGGERACQUISTITIONMAXCOUNT: "1"
  ## Shared storage persistence mounted into api, master and worker, such as Hadoop, Spark, Flink and DataX binary package
  sharedStoragePersistence:
    enabled: true
    mountPath: "/opt/soft"
    accessModes:
    - "ReadWriteMany"
    ## storageClassName must support the access mode: ReadWriteMany
    storageClassName: "glusterfs"						###修改动态存储为glusterfs
    storage: "100Gi"									###调整pvc存储卷大小
  ## If RESOURCE_STORAGE_TYPE is HDFS and FS_DEFAULT_FS is file:///, fsFileResourcePersistence should be enabled for resource storage
  fsFileResourcePersistence:
    enabled: true										###开启
    accessModes:
    - "ReadWriteMany"
    ## storageClassName must support the access mode: ReadWriteMany
    storageClassName: "glusterfs"						###修改动态存储为glusterfs
    storage: "100Gi"									###调整pvc存储卷大小

master:
  ## PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down.
  podManagementPolicy: "Parallel"
  ## Replicas is the desired number of replicas of the given Template.
  replicas: "3"
  ## You can use annotations to attach arbitrary non-identifying metadata to objects.
  ## Clients such as tools and libraries can retrieve this metadata.
  annotations: {}
  ## Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints.
  ## More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
  affinity: {}
  ## NodeSelector is a selector which must be true for the pod to fit on a node.
  ## Selector which must match a node's labels for the pod to be scheduled on that node.
  ## More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
  nodeSelector: {}
  ## Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
  ## effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
  tolerations: []
  ## Compute Resources required by this container. Cannot be updated.
  ## More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container
  resources: {}
  # resources:
  #   limits:
  #     memory: "8Gi"
  #     cpu: "4"
  #   requests:
  #     memory: "2Gi"
  #     cpu: "500m"
  ## Configmap
  configmap:
    LOGGER_SERVER_OPTS: "-Xms512m -Xmx512m -Xmn256m"
    MASTER_SERVER_OPTS: "-Xms1g -Xmx1g -Xmn512m"
    MASTER_EXEC_THREADS: "100"
    MASTER_EXEC_TASK_NUM: "20"
    MASTER_DISPATCH_TASK_NUM: "3"
    MASTER_HOST_SELECTOR: "LowerWeight"
    MASTER_HEARTBEAT_INTERVAL: "10"
    MASTER_TASK_COMMIT_RETRYTIMES: "5"
    MASTER_TASK_COMMIT_INTERVAL: "1000"
    MASTER_MAX_CPULOAD_AVG: "-1"
    MASTER_RESERVED_MEMORY: "0.3"
    MASTER_FAILOVER_INTERVAL: 10
    MASTER_KILL_YARN_JOB_WHEN_HANDLE_FAILOVER: "true"
    ORG_QUARTZ_THREADPOOL_THREADCOUNT: "25"
    ORG_QUARTZ_SCHEDULER_BATCHTRIGGERACQUISTITIONMAXCOUNT: "1"
    MASTER_PERSIST_EVENT_STATE_THREADS: 10
  ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
  ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  livenessProbe:
    enabled: true
    initialDelaySeconds: "30"
    periodSeconds: "30"
    timeoutSeconds: "5"
    failureThreshold: "3"
    successThreshold: "1"
  ## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
  ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  readinessProbe:
    enabled: true
    initialDelaySeconds: "30"
    periodSeconds: "30"
    timeoutSeconds: "5"
    failureThreshold: "3"
    successThreshold: "1"
  ## PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace.
  ## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
  ## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
  ## A claim in this list takes precedence over any volumes in the template, with the same name.
  persistentVolumeClaim:
    enabled: true
    accessModes:
    - "ReadWriteOnce"
    storageClassName: "glusterfs"					##修改动态存储为glusterfs
    storage: "100Gi"								##修改存储卷pvc

worker:
  ## PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down.
  podManagementPolicy: "Parallel"
  ## Replicas is the desired number of replicas of the given Template.
  replicas: "3"
  ## You can use annotations to attach arbitrary non-identifying metadata to objects.
  ## Clients such as tools and libraries can retrieve this metadata.
  annotations: {}
  ## Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints.
  ## More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
  affinity: {}
  ## NodeSelector is a selector which must be true for the pod to fit on a node.
  ## Selector which must match a node's labels for the pod to be scheduled on that node.
  ## More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
  nodeSelector: {}
  ## Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
  ## effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
  tolerations: []
  ## Compute Resources required by this container. Cannot be updated.
  ## More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container
  resources: {}
  # resources:
  #   limits:
  #     memory: "8Gi"
  #     cpu: "4"
  #   requests:
  #     memory: "2Gi"
  #     cpu: "500m"
  ## Configmap
  configmap:
    LOGGER_SERVER_OPTS: "-Xms512m -Xmx512m -Xmn256m"
    WORKER_SERVER_OPTS: "-Xms1g -Xmx1g -Xmn512m"
    WORKER_EXEC_THREADS: "100"
    WORKER_HEARTBEAT_INTERVAL: "10"
    WORKER_HOST_WEIGHT: "100"
    WORKER_MAX_CPULOAD_AVG: "-1"
    WORKER_RESERVED_MEMORY: "0.3"
    WORKER_GROUPS: "default"
    WORKER_RETRY_REPORT_TASK_STATUS_INTERVAL: 600
  ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
  ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  livenessProbe:
    enabled: true
    initialDelaySeconds: "30"
    periodSeconds: "30"
    timeoutSeconds: "5"
    failureThreshold: "3"
    successThreshold: "1"
  ## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
  ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  readinessProbe:
    enabled: true
    initialDelaySeconds: "30"
    periodSeconds: "30"
    timeoutSeconds: "5"
    failureThreshold: "3"
    successThreshold: "1"
  ## PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace.
  ## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
  ## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
  ## A claim in this list takes precedence over any volumes in the template, with the same name.
  persistentVolumeClaim:
    enabled: true									###开启
    ## dolphinscheduler data volume
    dataPersistentVolume:
      enabled: true									###开启
      accessModes:
      - "ReadWriteOnce"
      storageClassName: "glusterfs"					##修改动态存储为glusterfs
      storage: "100Gi"								##修改pvc大小
    ## dolphinscheduler logs volume
    logsPersistentVolume:
      enabled: true									###开启
      accessModes:
      - "ReadWriteOnce"
      storageClassName: "glusterfs"					##修改动态存储为glusterfs
      storage: "100Gi"								##修改pvc大小

alert:
  ## Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.
  replicas: "1"
  ## The deployment strategy to use to replace existing pods with new ones.
  strategy:
    type: "RollingUpdate"
    rollingUpdate:
      maxSurge: "25%"
      maxUnavailable: "25%"
  ## You can use annotations to attach arbitrary non-identifying metadata to objects.
  ## Clients such as tools and libraries can retrieve this metadata.
  annotations: {}
  ## NodeSelector is a selector which must be true for the pod to fit on a node.
  ## Selector which must match a node's labels for the pod to be scheduled on that node.
  ## More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
  affinity: {}
  ## Compute Resources required by this container. Cannot be updated.
  ## More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container
  nodeSelector: {}
  ## Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
  ## effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
  tolerations: []
  ## Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints.
  ## More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
  resources: {}
  # resources:
  #   limits:
  #     memory: "2Gi"
  #     cpu: "1"
  #   requests:
  #     memory: "1Gi"
  #     cpu: "500m"
  ## Configmap
  configmap:
    ALERT_SERVER_OPTS: "-Xms512m -Xmx512m -Xmn256m"
  ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
  ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  livenessProbe:
    enabled: true
    initialDelaySeconds: "30"
    periodSeconds: "30"
    timeoutSeconds: "5"
    failureThreshold: "3"
    successThreshold: "1"
  ## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
  ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  readinessProbe:
    enabled: true
    initialDelaySeconds: "30"
    periodSeconds: "30"
    timeoutSeconds: "5"
    failureThreshold: "3"
    successThreshold: "1"
  ## PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace.
  ## More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
  persistentVolumeClaim:
    enabled: true										##开启
    accessModes:
    - "ReadWriteOnce"
    storageClassName: "glusterfs"						###修改动态存储为glusterfs
    storage: "100Gi"									###调整pvc大小

api:
  ## Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.
  replicas: "1"
  ## The deployment strategy to use to replace existing pods with new ones.
  strategy:
    type: "RollingUpdate"
    rollingUpdate:
      maxSurge: "25%"
      maxUnavailable: "25%"
  ## You can use annotations to attach arbitrary non-identifying metadata to objects.
  ## Clients such as tools and libraries can retrieve this metadata.
  annotations: {}
  ## NodeSelector is a selector which must be true for the pod to fit on a node.
  ## Selector which must match a node's labels for the pod to be scheduled on that node.
  ## More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
  affinity: {}
  ## Compute Resources required by this container. Cannot be updated.
  ## More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container
  nodeSelector: {}
  ## Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
  ## effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
  tolerations: []
  ## Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints.
  ## More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
  resources: {}
  # resources:
  #   limits:
  #     memory: "2Gi"
  #     cpu: "1"
  #   requests:
  #     memory: "1Gi"
  #     cpu: "500m"
  ## Configmap
  configmap:
    API_SERVER_OPTS: "-Xms512m -Xmx512m -Xmn256m"
  ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
  ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  livenessProbe:
    enabled: true
    initialDelaySeconds: "30"
    periodSeconds: "30"
    timeoutSeconds: "5"
    failureThreshold: "3"
    successThreshold: "1"
  ## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
  ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
  readinessProbe:
    enabled: true
    initialDelaySeconds: "30"
    periodSeconds: "30"
    timeoutSeconds: "5"
    failureThreshold: "3"
    successThreshold: "1"
  ## PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace.
  ## More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
  persistentVolumeClaim:
    enabled: true								###开启
    accessModes:
    - "ReadWriteOnce"
    storageClassName: "glusterfs"				###修改动态存储为glusterfs
    storage: "100Gi"							###调整pvc卷大小
  service:
    ## type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer
    type: "NodePort"							##修改service类型为nodeport
    ## clusterIP is the IP address of the service and is usually assigned randomly by the master
    clusterIP: ""
    ## nodePort is the port on each node on which this service is exposed when type=NodePort
    nodePort: "30045"							##自定义端口
    ## externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service
    externalIPs: []
    ## externalName is the external reference that kubedns or equivalent will return as a CNAME record for this service, requires Type to be ExternalName
    externalName: ""
    ## loadBalancerIP when service.type is LoadBalancer. LoadBalancer will get created with the IP specified in this field
    loadBalancerIP: ""
    ## annotations may need to be set when service.type is LoadBalancer
    ## service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:us-east-1:EXAMPLE_CERT
    annotations: {}

ingress:
  enabled: true
  host: "dolphinscheduler.org"
  path: "/dolphinscheduler"
  tls:
    enabled: false
    secretName: "dolphinscheduler-tls"


  • 开启zookeeper依赖
[root@master-2 dolphinscheduler]# cd charts/
[root@master-2 charts]# tar xf zookeeper-6.5.3.tgz
[root@master-2 charts]# ls
postgresql-10.3.18.tgz  zookeeper  zookeeper-6.5.3.tgz
[root@master-2 charts]# cd zookeeper

  • 修改配置文件
[root@master-2 zookeeper]# vim values.yaml
image:
  registry: dockerhub.dsj.com:18443
  repository: library/zookeeper
  tag: 3.6.2-debian-10-r185

  ## Specify a imagePullPolicy
  ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
  ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
  ##
  pullPolicy: Always

-----------------------------------------------------------


  • 开启postgresql依赖
[root@master-2 dolphinscheduler]# cd charts/
[root@master-2 charts]# ls
postgresql  postgresql-10.3.18.tgz  zookeeper  zookeeper-6.5.3.tgz
[root@master-2 charts]# cd postgresql
[root@master-2 postgresql]# ls
Chart.lock  charts  Chart.yaml  ci  files  README.md  templates  values.schema.json  values.yaml

  • 修改配置文件
[root@k8s-master-1 postgresql]# vim values.yaml

image:
  registry: dockerhub.dsj.com:18443
  repository: library/postgresql
  tag: 11.11.0-debian-10-r71
  ## Specify a imagePullPolicy
  ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
  ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
  ##
  pullPolicy: Always

------------------------------------------------------


  1. 离线镜像制作
#在联网的机器上下载
[root@master-2 ~]# docker pull apache/dolphinscheduler:2.0.5
[root@master-2 ~]# docker pull docker.io/bitnami/zookeeper:3.6.2-debian-10-r185
[root@master-2 ~]# docker pull docker.io/bitnami/postgresql:11.11.0-debian-10-r71
#打包镜像
[root@master-2 ~]# docker save -o dolphinscheduler.tar apache/dolphinscheduler:2.0.5
[root@master-2 ~]# docker save -o zookeeper-3.6.2-debian-10-r185.tar docker.io/bitnami/zookeeper:3.6.2-debian-10-r185
[root@master-2 ~]# docker save -o postgresql-11.11.0-debian-10-r71.tar docker.io/bitnami/postgresql:11.11.0-debian-10-r71

  • 将离线包上传到离线环境机器
#导入镜像
[root@k8s-master-1 ~]# docker load -i dolphinscheduler.tar 
[root@k8s-master-1 ~]# docker load -i zookeeper-3.6.2-debian-10-r185.tar
[root@k8s-master-1 ~]# docker load -i postgresql-11.11.0-debian-10-r71.tar
#上传镜像
[root@k8s-master-1 ~]# docker tag apache/dolphinscheduler:2.0.5 dockerhub.dsj.com:18443/library/dolphinscheduler:2.0.5
[root@k8s-master-1 ~]# docker tag docker.io/bitnami/zookeeper:3.6.2-debian-10-r185 dockerhub.dsj.com:18443/library/zookeeper:3.6.2-debian-10-r185
[root@k8s-master-1 ~]# docker tag docker.io/bitnami/postgresql:11.11.0-debian-10-r71 dockerhub.dsj.com:18443/library/postgresql:11.11.0-debian-10-r71
[root@k8s-master-1 ~]# docker push dockerhub.dsj.com:18443/library/dolphinscheduler:2.0.5
[root@k8s-master-1 ~]# docker push dockerhub.dsj.com:18443/library/zookeeper:3.6.2-debian-10-r185
[root@k8s-master-1 ~]# docker push dockerhub.dsj.com:18443/library/postgresql:11.11.0-debian-10-r71

  1. 部署安装
[root@k8s-master-1 dolphinscheduler]# pwd
/root/yaml/dolphinscheduler

[root@k8s-master-1 dolphinscheduler]# ls
Chart.lock  charts  Chart.yaml  templates  values.yaml

[root@k8s-master-1 dolphinscheduler]# helm install --name dolphinscheduler --namespace dolphinscheduler . 

#查看已安装的项目
[root@k8s-master-1 dolphinscheduler]# helm list
NAME                    NAMESPACE       REVISION        UPDATED                                 STATUS          CHART                   APP VERSION
dolphinscheduler        default         1               2022-03-25 13:48:14.477718902 +0530 IST deployed        dolphinscheduler-2.0.3  2.0.5
dremio                  default         1               2022-03-25 13:17:22.182284837 +0530 IST deployed        dremio-2.0.2
flink                   default         2               2022-05-25 06:07:20.941169711 +0530 IST deployed        flink-0.2.0             1.11.2
trino                   default         1               2022-04-21 08:33:53.005101636 +0530 IST deployed        trino-1.14.0            375

#卸载dolphinscheduler
[root@k8s-master-1 dolphinscheduler]# helm uninstall dolphinscheduler

#更新helm项目
[root@k8s-master-1 dolphinscheduler]# helm upgrade dolphinscheduler .


  1. 查看部署结果
[root@k8s-master-1 dolphinscheduler]# kubectl get po
dolphinscheduler-alert-56684c96f-cvvhf   1/1     Running   0             61d
dolphinscheduler-api-75c94dcf57-24jmk    1/1     Running   0             61d
dolphinscheduler-master-0                1/1     Running   0             61d
dolphinscheduler-master-1                1/1     Running   0             61d
dolphinscheduler-master-2                1/1     Running   0             61d
dolphinscheduler-postgresql-0            1/1     Running   0             61d
dolphinscheduler-worker-0                1/1     Running   0             61d
dolphinscheduler-worker-1                1/1     Running   0             61d
dolphinscheduler-worker-2                1/1     Running   0             61d
dolphinscheduler-zookeeper-0             1/1     Running   0             61d

[root@k8s-master-1 dolphinscheduler]# kubectl get svc
dolphinscheduler-alert                                   ClusterIP      10.233.15.6     <none>        50052/TCP                                                61d
dolphinscheduler-api                                     NodePort       10.233.53.169   <none>        12345:30045/TCP                                          61d
dolphinscheduler-master-headless                         ClusterIP      None            <none>        5678/TCP                                                 61d
dolphinscheduler-postgresql                              ClusterIP      10.233.14.21    <none>        5432/TCP                                                 61d
dolphinscheduler-postgresql-headless                     ClusterIP      None            <none>        5432/TCP                                                 61d
dolphinscheduler-worker-headless                         ClusterIP      None            <none>        1234/TCP,50051/TCP                                       61d
dolphinscheduler-zookeeper                               ClusterIP      10.233.61.75    <none>        2181/TCP,2888/TCP,3888/TCP                               61d
dolphinscheduler-zookeeper-headless                      ClusterIP      None            <none>        2181/TCP,2888/TCP,3888/TCP                               61d

[root@k8s-master-1 dolphinscheduler]# kubectl get pvc
dolphinscheduler-alert                                   Bound    pvc-cafb6191-5912-4785-8c10-0e55e2a90ecb   100Gi      RWO            glusterfs      61d
dolphinscheduler-api                                     Bound    pvc-eaadaf3c-c9bd-469a-9156-54ceea31b0b7   100Gi      RWO            glusterfs      61d
dolphinscheduler-fs-file                                 Bound    pvc-ae98e14f-b1df-467b-95cc-95f614860e60   100Gi      RWX            glusterfs      61d
dolphinscheduler-master-dolphinscheduler-master-0        Bound    pvc-fe14bdaf-5ff5-4c72-a57d-b8718239a7b6   100Gi      RWO            glusterfs      61d
dolphinscheduler-master-dolphinscheduler-master-1        Bound    pvc-f550d128-ccc0-4684-9068-5fdf1ed4c165   100Gi      RWO            glusterfs      61d
dolphinscheduler-master-dolphinscheduler-master-2        Bound    pvc-4a98f191-9325-4340-84a3-9cfd484a67db   100Gi      RWO            glusterfs      61d
dolphinscheduler-shared                                  Bound    pvc-d5ceb050-ef7e-43fe-be2d-b3b56351587f   100Gi      RWX            glusterfs      61d
dolphinscheduler-worker-data-dolphinscheduler-worker-0   Bound    pvc-42188bc4-84dc-40a5-a476-922b31aa7b4b   100Gi      RWO            glusterfs      61d
dolphinscheduler-worker-data-dolphinscheduler-worker-1   Bound    pvc-636163d8-9a61-46f7-a1c3-f19d1a9309f2   100Gi      RWO            glusterfs      61d
dolphinscheduler-worker-data-dolphinscheduler-worker-2   Bound    pvc-b2ade0f1-b594-4017-b522-c513b2fba04b   100Gi      RWO            glusterfs      61d
dolphinscheduler-worker-logs-dolphinscheduler-worker-0   Bound    pvc-226ac643-be64-4429-8561-76605a6c9a9b   100Gi      RWO            glusterfs      61d
dolphinscheduler-worker-logs-dolphinscheduler-worker-1   Bound    pvc-210484a0-f72a-4e3c-a8f5-0b2a092ba414   100Gi      RWO            glusterfs      61d
dolphinscheduler-worker-logs-dolphinscheduler-worker-2   Bound    pvc-36cd9148-491f-4ccb-bfc9-dd7a104dab75   100Gi      RWO            glusterfs      61d

[root@k8s-master-1 dolphinscheduler]# kubectl get cm
dolphinscheduler-alert       1      61d
dolphinscheduler-api         1      61d
dolphinscheduler-common      32     61d
dolphinscheduler-master      16     61d
dolphinscheduler-worker      9      61d


Minio

minio使用Operator的方式进行部署,参考:https://docs.min.io/minio/k8s/deployment/deploy-minio-operator.html

  1. 在联网的机器上下载离线资源
#下载kubectl-minio
[root@master-2 ~]# wget https://github.com/minio/operator/releases/download/v4.4.16/kubectl-minio_4.4.16_linux_amd64 -O kubectl-minio

  • 上传kubectl-mino
[root@k8s-master-1 ~]# chmod +x kubectl-minio
[root@k8s-master-1 ~]# mv kubectl-minio /usr/local/bin/

  • 版本验证
[root@k8s-master-1 ~]# kubectl-minio version
v4.4.13

  1. 离线镜像制作,在联网的机器上操作
#下载
[root@k8s-master-1 ~]# docker pull minio/operator:v4.4.13
[root@k8s-master-1 ~]# docker pull minio/minio:RELEASE.2022-03-26T06-49-28Z
[root@k8s-master-1 ~]# docker pull minio/console:v0.15.6
[root@k8s-master-1 ~]# docker pull busybox:1.33.1
[root@k8s-master-1 ~]# docker pull minio/kes:v0.17.6
[root@k8s-master-1 ~]# docker pull minio/operator:v4.4.10
[root@k8s-master-1 ~]# docker pull postgres:13
[root@k8s-master-1 ~]# docker pull quay.io/prometheus/prometheus:latest
#打包
[root@master-2 ~]# mkdir minio-images && cd minio-images
[root@master-2 minio-images]# docker save -o operator-4.4.13.tar minio/operator:v4.4.13
[root@master-2 minio-images]# docker save -o minio-RELEASE.tar minio/minio:RELEASE.2022-03-26T06-49-28Z
[root@master-2 minio-images]# docker save -o minio-console.tar minio/console:v0.15.6
[root@master-2 minio-images]# docker save -o kes.tar minio/kes:v0.17.6
[root@master-2 minio-images]# docker save -o operator.tar minio/operator:v4.4.10
[root@master-2 minio-images]# docker save -o postgres.tar library/postgres:13
[root@master-2 minio-images]# docker save -o busybox.tar library/busybox:1.33.1
[root@master-2 minio-images]# docker save -o prometheus.tar quay.io/prometheus/prometheus:latest
#上传到离线环境。导入镜像
[root@k8s-master-1 minio-images]# docker load -i operator-4.4.13.tar 
[root@k8s-master-1 minio-images]# docker load -i minio-RELEASE.tar
[root@k8s-master-1 minio-images]# docker load -i minio-console.tar 
[root@k8s-master-1 minio-images]# docker load -i busybox.tar
Loaded image: busybox:1.33.1
[root@k8s-master-1 minio-images]# docker load -i kes.tar
Loaded image: minio/kes:v0.17.6
[root@k8s-master-1 minio-images]# docker load -i operator.tar
Loaded image: minio/operator:v4.4.10
[root@k8s-master-1 minio-images]# docker load -i postgres.tar
Loaded image: postgres:13
[root@k8s-master-1 minio-images]# docker load -i prometheus.tar
Loaded image: quay.io/prometheus/prometheus:latest
#修改镜像tag
[root@k8s-master-1 minio-images]# docker tag minio/operator:v4.4.13 dockerhub.dsj.com:18443/library/minio-operator:v4.4.13
[root@k8s-master-1 minio-images]# docker tag minio/minio:RELEASE.2022-03-26T06-49-28Z dockerhub.dsj.com:18443/library/minio:RELEASE.2022-03-26T06-49-28Z
[root@k8s-master-1 minio-images]# docker tag minio/console:v0.15.6 dockerhub.dsj.com:18443/library/minio-console:v0.15.6
[root@k8s-master-1 minio-images]# docker tag minio/kes:v0.17.6 dockerhub.dsj.com:18443/library/minio-kes:v0.17.6
[root@k8s-master-1 minio-images]# docker tag busybox:1.33.1 dockerhub.dsj.com:18443/library/minio-busybox:1.33.1
[root@k8s-master-1 minio-images]# docker tag minio/operator:v4.4.10 dockerhub.dsj.com:18443/library/minio-operator:v4.4.10
[root@k8s-master-1 minio-images]# docker tag postgres:13 dockerhub.dsj.com:18443/library/minio-postgres:13
[root@k8s-master-1 minio-images]# docker tag quay.io/prometheus/prometheus:latest dockerhub.dsj.com:18443/library/minio-prometheus:latest
#上传镜像至私有仓库
[root@k8s-master-1 minio-images]# docker push dockerhub.dsj.com:18443/library/minio-operator:v4.4.13
[root@k8s-master-1 minio-images]# docker push dockerhub.dsj.com:18443/library/minio:RELEASE.2022-03-26T06-49-28Z
[root@k8s-master-1 minio-images]# docker push dockerhub.dsj.com:18443/library/minio-console:v0.15.6
[root@k8s-master-1 minio-images]# docker push dockerhub.dsj.com:18443/library/minio-kes:v0.17.6
[root@k8s-master-1 minio-images]# docker push dockerhub.dsj.com:18443/library/minio-busybox:1.33.1
[root@k8s-master-1 minio-images]# docker push dockerhub.dsj.com:18443/library/minio-operator:v4.4.10
[root@k8s-master-1 minio-images]# docker push dockerhub.dsj.com:18443/library/minio-postgres:13
[root@k8s-master-1 minio-images]# docker push dockerhub.dsj.com:18443/library/minio-prometheus:latest

  1. 初始化 MinIO Operator
[root@k8s-master-1 ~]# kubectl minio init

namespace/minio-operator created
serviceaccount/minio-operator created
clusterrole.rbac.authorization.k8s.io/minio-operator-role created
clusterrolebinding.rbac.authorization.k8s.io/minio-operator-binding created
customresourcedefinition.apiextensions.k8s.io/tenants.minio.min.io created
service/operator created
deployment.apps/minio-operator created
serviceaccount/console-sa created
clusterrole.rbac.authorization.k8s.io/console-sa-role created
clusterrolebinding.rbac.authorization.k8s.io/console-sa-binding created
configmap/console-env created
service/console created
deployment.apps/console created
-----------------

To open Operator UI, start a port forward using this command:

kubectl minio proxy -n minio-operator

-----------------


该命令使用以下默认设置初始化 MinIO Operator:

  • 将 Operator 部署到minio-operator命名空间中。指定参数以将运算符部署到不同的命名空间。[kubectl minio init --namespace]
  • cluster.local配置运营商的 DNS 主机名时用作集群域。指定 参数以设置不同的集群域值。[kubectl minio init --cluster-domain]
  1. 验证安装
[root@k8s-master-1 ~]# kubectl get all -n minio-operator
NAME                                 READY   STATUS    RESTARTS   AGE
pod/console-785968f468-4929k         1/1     Running   0          20s
pod/minio-operator-9fb5c6cc5-hw2g6   1/1     Running   0          20s
pod/minio-operator-9fb5c6cc5-lch2g   1/1     Running   0          20s

NAME               TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)             AGE
service/console    ClusterIP   10.233.7.108    <none>        9090/TCP,9443/TCP   14s
service/operator   ClusterIP   10.233.52.252   <none>        4222/TCP,4221/TCP   14s

NAME                             READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/console          1/1     1            1           14s
deployment.apps/minio-operator   2/2     2            2           14s

NAME                                       DESIRED   CURRENT   READY   AGE
replicaset.apps/console-785968f468         1         1         1       20s
replicaset.apps/minio-operator-9fb5c6cc5   2         2         2       20s


  1. 登录minio-operator控制台
[root@k8s-master-1 ~]# kubectl minio proxy
Starting port forward of the Console UI.

To connect open a browser and go to http://localhost:9090

Current JWT to login: eyJhbGciOiJSUzI1NiIsImtpZCI6InlmMjhfNndXcVJjdW5nVVk5Sm80YmsxX1g1OXdZdkZRb1BCSXotcXp3NncifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJtaW5pby1vcGVyYXRvciIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJjb25zb2xlLXNhLXRva2VuLW1ra3Y0Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImNvbnNvbGUtc2EiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiI5NjZmN2UwNS0zMWMyLTRlMzItYjQzYy00N2MxNWU2MjNmNTEiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6bWluaW8tb3BlcmF0b3I6Y29uc29sZS1zYSJ9.YVTYp-x03NAFpUyAJUkJ3kOTEYQw84ERLmRZkPgA_W_d_dQMwsN_7DYdIHwARK7WRq9-rPShJoxNcqn4uFmpe6dAYQFQkgffOWMWqwIbdx-ncjoYLqaL_vHl12u8fUf8WZzv9ferB0EHz5W-alrKazA0NRFR6k5pgIywC4y8_O6D949l3EygZZ5MekBQsGvZid3aWVXe9NPnvFn8x861NSk_-hl5HofgjXsGotTQdxiS0kOIRwMRKXzGLdoARQQTkXw1CaLm4HLg7h9pV5PeFrPyT8Gk8tsl-hWfv9oQ0Etg_1K2lRHNt94PXh_fLtf2baqJ-6T6kTP9iYyWrsm0sg

Forwarding from 0.0.0.0:9090 -> 9090

#记录上方jwt令牌
ctrl +c 退出

#编辑console svc配置为外网访问模式
[root@k8s-master-1 ~]# kubectl edit svc/console -n minio-operator
spec:
  clusterIP: 10.233.7.108
  clusterIPs:
  - 10.233.7.108
  internalTrafficPolicy: Cluster
  ipFamilies:
  - IPv4
  ipFamilyPolicy: SingleStack
  ports:
  - name: http
    nodePort: 30990										###添加nodeport端口
    port: 9090
    protocol: TCP
    targetPort: 9090
  - name: https
    port: 9443
    protocol: TCP
    targetPort: 9443
  selector:
    app: console
  sessionAffinity: None
  type: NodePort										###修改service类型为NodePort

#保存,退出
service/console edited

#查看结果
[root@k8s-master-1 ~]# kubectl get svc -n minio-operator
NAME       TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)                         AGE
console    NodePort    10.233.7.108    <none>        9090:30990/TCP,9443:30619/TCP   6m5s
operator   ClusterIP   10.233.52.252   <none>        4222/TCP,4221/TCP               6m5s



  1. 访问http://任一节点ip+30990访问minio-operator控制台,输入jwt令牌登录

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-iPDQ6vOd-1656495208811)(./images/minio_1.jpg)]

  • 点击创建租户,可以自定义配置进行minio的创建、删除、管理
  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值