六、k8s部署mongo集群

一、storageclass+headless部署mongo高可用集群

参考:https://github.com/cvallance/mongo-k8s-sidecar
storageclass+headless部署mongo高可用集群

storageclass.yaml

---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nfs-client-provisioner
  namespace: kube-system
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  namespace: kube-system
rules:
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  namespace: kube-system
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    namespace: kube-system
roleRef:
  kind: Role
  name: leader-locking-nfs-client-provisioner
  apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nfs-client-provisioner-runner
rules:
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: run-nfs-client-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    namespace: kube-system
roleRef:
  kind: ClusterRole
  name: nfs-client-provisioner-runner
  apiGroup: rbac.authorization.k8s.io
---
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
  name: nfs-client-provisioner
  namespace: kube-system
spec:
  replicas: 1
  strategy:
    type: Recreate
  template:
    metadata:
      labels:
        app: nfs-client-provisioner
    spec:
      serviceAccountName: nfs-client-provisioner
      containers:
        - name: nfs-client-provisioner
          image: quay.io/external_storage/nfs-client-provisioner:latest
          imagePullPolicy: IfNotPresent
          volumeMounts:
            - name: nfs-client-root
              mountPath: /persistentvolumes
          env:
            - name: PROVISIONER_NAME
              value: fuseim.pri/ifs
            - name: NFS_SERVER
              value: 192.168.86.81
            - name: NFS_PATH
              value: /data-nfs
      volumes:
        - name: nfs-client-root
          nfs:
            server: 192.168.86.81
            path: /data-nfs
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: nfs-storage
provisioner: fuseim.pri/ifs
parameters:
  archiveOnDelete: "false"

mongo-ha.yaml

---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: mongo-account
  namespace: default
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  name: mongo-role
rules:
- apiGroups:
  - '*'
  resources:
  - configmaps
  verbs:
  - '*'
- apiGroups:
  - '*'
  resources:
  - deployments
  verbs:
  - list
  - watch
- apiGroups:
  - '*'
  resources:
  - services
  verbs:
  - '*'
- apiGroups:
  - '*'
  resources:
  - pods
  verbs:
  - get
  - list
  - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: mongo_role_binding
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: mongo-role
subjects:
- kind: ServiceAccount
  name: mongo-account
  namespace: default
---
apiVersion: v1
data:
  mongo-user.sh: |
    mongo admin -u ${MONGO_INITDB_ROOT_USERNAME} -p ${MONGO_INITDB_ROOT_PASSWORD} <<EOF
        use infra_db
        db.createUser({user: "infra", pwd: "${SECOND_USER_DB_PASSWORD}", roles: [
            { role: "readWrite", db: "infra_db" }
        ]});
    EOF
kind: ConfigMap
metadata:
  name: mongo-init
  namespace: default
---
apiVersion: v1
data:
  mongo.key: |
    ahaksdnqsakdqnajhvckqaafnxasxaxaxmaskdadadsasfsdsdfsf
    schcacnctcacncuadasdadadfbsasddfbadadwsioweewvaas
    dfasasakjsvnaa
kind: ConfigMap
metadata:
  name: mongo-key
  namespace: default
---
apiVersion: v1
data:
  mongo-data-dir-permission.sh: |
    chown -R mongodb:mongodb ${MONGO_DATA_DIR}
    cp -r /var/lib/mongoKeyTemp /var/lib/mongoKey
    chown -R mongodb:mongodb /var/lib/mongoKey
    chmod 400 /var/lib/mongoKey/mongo.key
    chown -R mongodb:mongodb /var/lib/mongoKey/mongo.key
kind: ConfigMap
metadata:
  name: mongo-scripts
  namespace: default
---
apiVersion: v1
data:
  mongoRootPassword: c2hhbnRhbnViYW5zYWw=
  infraDbPassword: aW5mcmEK
kind: Secret
metadata:
  name: mongosecret
  namespace: default
type: Opaque
---
apiVersion: v1
kind: Service
metadata:
  labels:
    name: mongo
  name: mongo
  namespace: default
spec:
  clusterIP: None
  ports:
  - port: 27017
    targetPort: 27017
  selector:
    role: mongo
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: mongo
  namespace: default
spec:
  podManagementPolicy: Parallel
  replicas: 3
  selector:
    matchLabels:
      role: mongo
  serviceName: mongo
  template:
    metadata:
      labels:
        role: mongo
    spec:
      affinity:
         podAntiAffinity:
           requiredDuringSchedulingIgnoredDuringExecution:
           - labelSelector:
               matchExpressions:
               - key: "app"
                 operator: NotIn
                 values:
                 - mongo
             topologyKey: "kubernetes.io/hostname"
      containers:
      - args:
        - /home/mongodb/mongo-data-dir-permission.sh && docker-entrypoint.sh mongod
          --replSet=rs0 --dbpath=/var/lib/mongodb --bind_ip=0.0.0.0 --wiredTigerCacheSizeGB=2 --keyFile=/var/lib/mongoKey/mongo.key
        command:
        - /bin/sh
        - -c
        env:
        - name: MONGO_INITDB_ROOT_USERNAME
          value: root
        - name: MONGO_DATA_DIR
          value: /var/lib/mongodb
        - name: MONGO_INITDB_ROOT_PASSWORD
          valueFrom:
            secretKeyRef:
              key: mongoRootPassword
              name: mongosecret
        - name: SECOND_USER_DB_PASSWORD
          valueFrom:
            secretKeyRef:
              key: infraDbPassword
              name: mongosecret
        image: mongo:4.2.14
        imagePullPolicy: IfNotPresent
        name: mongo
        ports:
        - containerPort: 27017
        volumeMounts:
        - mountPath: /var/lib/mongodb
          name: mongo-data
        - mountPath: /docker-entrypoint-initdb.d
          name: mongoinit
        - mountPath: /home/mongodb
          name: mongopost
        - mountPath: /var/lib/mongoKeyTemp
          name: mongokey
      - env:
        - name: MONGO_SIDECAR_POD_LABELS
          value: role=mongo
        - name: KUBE_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        - name: KUBERNETES_MONGO_SERVICE_NAME
          value: mongo
        - name: MONGODB_USERNAME
          value: root
        - name: MONGODB_DATABASE
          value: admin
        - name: MONGODB_PASSWORD
          valueFrom:
            secretKeyRef:
              key: mongoRootPassword
              name: mongosecret
        image: cvallance/mongo-k8s-sidecar:latest
        imagePullPolicy: IfNotPresent
        name: mongo-sidecar
      serviceAccountName: mongo-account
      terminationGracePeriodSeconds: 30
      volumes:
      - configMap:
          defaultMode: 493
          name: mongo-init
        name: mongoinit
      - configMap:
          defaultMode: 493
          name: mongo-scripts
        name: mongopost
      - configMap:
          defaultMode: 493
          name: mongo-key
        name: mongokey
  volumeClaimTemplates:
  - metadata:
      name: mongo-data
      annotations:
        volume.beta.kubernetes.io/storage-class: "nfs-storage"
    spec:
      accessModes:
      - ReadWriteOnce
      resources:
        requests:
          storage: 20Gi

faq:
1、如果集群默认域名不是cluster.local

二、静态pod+hostPath部署高可用集群

环境信息:

主机名IPk8s角色mongo角色mongo名称
rook01192.168.86.36master,slavePRIMARYmongodb-1
rook02192.168.86.37master,slaveSECONDARYmongodb-2
rook03192.168.86.38master,slaveSECONDARYmongodb-3

三个节点mongo数据目录: /data/mongodb-data

mkdir -p /etc/kubernetes/manifests
mkdir -p  /data/mongodb-data/

1、三个节点分布部署mongo静态pod
mkdir -p /etc/kubernetes/manifests/mongo-(1、2、3).yaml

apiVersion: v1
kind: Pod
metadata:
  labels:
    app: mongodb
  name: mongodb-1 #根据规划修改未mongo-(1、2、3)
spec:
  terminationGracePeriodSeconds: 60
  containers:
  - name: mongo
    image: mongo:4.2.14
    command:
    - mongod
    - --replSet
    - rs0
    - --wiredTigerCacheSizeGB=2
    - --bind_ip_all
    - --port=27017
    resources:
      limits:
        memory: 2Gi
        cpu: 1000m
      requests:
        memory: 1Gi
        cpu: 500m
    volumeMounts:
    - name: data
      mountPath: /data/db
  hostNetwork: true
  volumes:
  - name: data
    hostPath:
      path: /data/mongodb-data

2、初始化PRIMARY节点

kubectl exec -it mongodb-1-rook01 bash
mongo
use admin
rs.initiate({ _id: "rs0", members: [ { _id: 0, host : "192.168.86.36:27017" } ] } )

3、创建管理员和权限用户

use admin
db = db.getSiblingDB("admin");db.createUser({user:"root",pwd:"rootPassw0rd",roles:["root"]});
db.createUser({user:"useradmin",pwd:"adminPassw0rd",roles:["root"]})

4、添加节点

use admin
rs.add('192.168.86.37:27017')
rs.add('192.168.86.38:27017')
rs.status()

5、配置鉴权
1>生成key

openssl rand -base64 745 >>/data/mongodb-data/key
chmod 600 /data/mongodb-data/key
scp /data/mongodb-data/key root@192.168.86.37:/data/mongodb-data/
scp /data/mongodb-data/key root@192.168.86.38:/data/mongodb-data/

2>修改mongo启动参数,添加鉴权配置

sed -i '/rs0/a\    - --keyFile=/data/db/key' /etc/kubernetes/manifests/mongodb-1.yaml

sed -i '/rs0/a\    - --keyFile=/data/db/key' /etc/kubernetes/manifests/mongodb-2.yaml

sed -i '/rs0/a\    - --keyFile=/data/db/key' /etc/kubernetes/manifests/mongodb-3.yaml

6、添加监控

apiVersion: apps/v1
kind: Deployment
metadata:
  name: mongo-exporter
  namespace: monitoring
  labels:
    k8s-app: mongo-exporter
spec:
  selector:
    matchLabels:
      k8s-app: mongo-exporter
  template:
    metadata:
      labels:
        k8s-app: mongo-exporter
    spec:
      tolerations:
        - effect: NoSchedule
          key: kubernetes.io/os
          value: "linux"
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
            - matchExpressions:
              - key: "kubernetes.io/os"
                operator: In
                values:
                - "linux"
      containers:
      - name: mongodb-node01
        image: registry.baidubce.com/docker-hub/bitnami/mongodb-exporter:0.20.7
        args: ["--web.listen-address=:9104", "--compatible-mode=true","--mongodb.uri", "mongodb://root:rootPassw0rd@192.168.86.xxx:27017/admin?replicaSet=rs0"]
        ports:
        - containerPort: 9104
          name: http01
        resources:
          limits:
            cpu: "1"
            memory: 2Gi
          requests:
            cpu: 500m
            memory: 1Gi
      - name: mongo-node02
        image: registry.baidubce.com/docker-hub/bitnami/mongodb-exporter:0.20.7
        args: ["--web.listen-address=:9105", "--compatible-mode=true","--mongodb.uri", "mongodb://root:rootPassw0rd@192.168.86.xxx:27018/admin?replicaSet=rs0"]
        ports:
        - containerPort: 9105
          name: http02
        resources:
          limits:
            cpu: "1"
            memory: 2Gi
          requests:
            cpu: 500m
            memory: 1Gi
      - name: mongo-node03
        image: registry.baidubce.com/docker-hub/bitnami/mongodb-exporter:0.20.7
        args: ["--web.listen-address=:9106", "--compatible-mode=true","--mongodb.uri", "mongodb://root:rootPassw0rd@192.168.86.xxx:27018/admin?replicaSet=rs0"]
        ports:
        - containerPort: 9106
          name: http03
        resources:
          limits:
            cpu: "1"
            memory: 2Gi
          requests:
            cpu: 500m
            memory: 1Gi
---
apiVersion: v1
kind: Service
metadata:
  labels:
    k8s-app: mongo-exporter
  name: mongo-exporter
  namespace: monitoring
spec:
  type: ClusterIP
  clusterIP: None
  ports:
  - name: http01
    port: 9104
  - name: http02
    port: 9105
  - name: http03
    port: 9106
  selector:
    k8s-app: mongo-exporter
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
  labels:
    k8s-app: mongo-exporter
    release: prometheus-operator
  name: mongo-exporter
  namespace: monitoring
spec:
  endpoints:
  - interval: 15s
    port: http01
    path: /metrics
    relabelings:
    - action: labeldrop
      regex: "(pod|service)"
  - interval: 15s
    port: http02
    path: /metrics
    relabelings:
    - action: labeldrop
      regex: "(pod|service)"
  - interval: 15s
    port: http03
    path: /metrics
    relabelings:
    - action: labeldrop
      regex: "(pod|service)"
  jobLabel: k8s-app
  selector:
    matchLabels:
      k8s-app: mongo-exporter
---

更多用法:
shard模式,监控指定collection等用法参考:
https://github.com/percona/mongodb_exporter

mongodb改密码

show dbs;
use dbname;
show users;
db.changeUserPassword("usertest","changepass");
三、helm部署mongodb分片集群

1、添加helm repo

helm repo add bitnami https://charts.bitnami.com/bitnami
helm repo update

2、下载mongodb分片chat并安装

helm search repo mongodb
NAME                    CHART VERSION   APP VERSION     DESCRIPTION                                       
bitnami/mongodb         14.13.0         7.0.6           MongoDB(R) is a relational open source NoSQL da...
bitnami/mongodb-sharded 7.9.1           7.0.6           MongoDB(R) is an open source NoSQL database tha...

helm pull bitnami/mongodb-sharded --version=7.9.1 --untar
cp mongodb-sharded/values.yaml ./values.yaml
helm --create-namespace mongodb  -n mongodb  install ./mongodb-sharded/ -f ./values.yaml

3、values.yaml中添加亲和性(sort和hard可选)

kubectl label node node01 platform.chandz.com/infra=true
kubectl taint node node01 platform.chandz.com/infra=true:NoSchedule
shards: 1
mongos:
  replicaCount: 1
  useStatefulSet: true
  servicePerReplica:
    enabled: false
  nodeAffinityPreset:
    type: "sofr"
    key: "platform.chandz.com/infra"
    values:
      - "true"
  tolerations:
    - key: "platform.chandz.com/infra"
      operator: "Exists"
      effect: "NoSchedule"
shardsvr:
  dataNode:
    replicaCount: 1
    nodeAffinityPreset:
      type: "soft"
      key: "platform.chandz.com/infra"
      values:
        - "true"
    tolerations:
      - key: "platform.chandz.com/infra"
        operator: "Exists"
        effect: "NoSchedule"
  persistence:
    accessModes:
      - ReadWriteOnce
    size: 2Gi
configsvr:
  replicaCount: 1
  persistence:
    accessModes:
      - ReadWriteOnce
    size: 1Gi
  nodeAffinityPreset:
    type: "soft"
    key: "platform.chandz.com/infra"
    values:
      - "true"
  tolerations:
    - key: "platform.chandz.com/infra"
      operator: "Exists"
      effect: "NoSchedule"
volumePermissions:
  enabled: true

4、更新mongodb


export MONGODB_ROOT_PASSWORD=$(kubectl get secret --namespace "mongodb" mongodb-mongodb-sharded -o jsonpath="{.data.mongodb-root-password}" | base64 --decode)

export MONGODB_REPLICA_SET_KEY=$(kubectl get secret --namespace "mongodb" mongodb-mongodb-sharded -o jsonpath="{.data.mongodb-replica-set-key}" | base64 --decode)

helm -n mongodb upgrade mongodb ./mongodb-sharded/ -f ./values.yaml --set auth.rootPassword=$MONGODB_ROOT_PASSWORD --set auth.replicaSetKey=$MONGODB_REPLICA_SET_KEY
或者
helm -n mongodb upgrade mongodb ./mongodb-sharded -f ./values.yaml

5、faq

1、helm部署的mongo在k8s缩减节点不会在mongodb集群内删除对应节点,需要手动删除
场景1: 比较幸运发现这个问题,需要手动删除这个节点
rs.remove('mongodb-mongodb-sharded-shard0-data-4.mongodb-mongodb-sharded-headless.mongodb.svc.cluster.local:27017')
rs.remove('mongodb-mongodb-sharded-shard0-data-3.mongodb-mongodb-sharded-headless.mongodb.svc.cluster.local:27017')

场景2: 出门忘看黄历不走运集群全部变为secondary, 只能读
解决办法:强制删除已经下线的节点
rs0:SECONDARY> use admin
rs0:SECONDARY> cfg = rs.conf()
rs0:SECONDARY> printjson(cfg)

#删除member为3、4的节点printjson(cfg) 查看id,别删错了
rs0:SECONDARY> cfg.members.splice(4,1)
rs0:SECONDARY> rs.reconfig(cfg, {force : true})
rs0:SECONDARY> cfg.members.splice(3,1)
rs0:SECONDARY> rs.reconfig(cfg, {force : true})
rs0:PRIMARY> rs.status()
  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值