K8S使用NFS动态创建PVC

简介

K8S中容器的数据的持久化对于有状态的应用是很重要的,考虑到横向扩展,所以一般都会使用网络存储来作为持久化的介质。NFS(有的叫NAS)就是经常使用的一种。本文介绍两种使用NFS作为后端存储,动态创建PVC的方法。两种方法我在产线环境都验证过。

 

第一种: nfs-client-provisioner   (较老)

第二种: csi-nfs-driver (较新)

 

准备

Kubernetes : 1.17.6

 

实施

nfs-client-provisioner

1. 部署nfs-client-provisioner , 文件里面的nfs的服务器地址和路径换成自己的

kubectl apply -f nfs-storageclass-allInOne.yaml
kind: ServiceAccount
apiVersion: v1
metadata:
  name: nfs-client-provisioner
  namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nfs-client-provisioner-runner
rules:
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: run-nfs-client-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    namespace: kube-system
roleRef:
  kind: ClusterRole
  name: nfs-client-provisioner-runner
  apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  namespace: kube-system
  name: leader-locking-nfs-client-provisioner
rules:
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  namespace: kube-system
  name: leader-locking-nfs-client-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: kube-system
roleRef:
  kind: Role
  name: leader-locking-nfs-client-provisioner
  apiGroup: rbac.authorization.k8s.io
---
kind: Deployment
apiVersion: apps/v1
metadata:
  name: nfs-client-provisioner
  namespace: kube-system
spec:
  replicas: 2
  strategy:
    type: Recreate
  selector:
    matchLabels:
      app: nfs-client-provisioner
  template:
    metadata:
      labels:
        app: nfs-client-provisioner
    spec:
      serviceAccountName: nfs-client-provisioner
      affinity:
        podAntiAffinity:
          preferredDuringSchedulingIgnoredDuringExecution:
          - weight: 100
            podAffinityTerm:
              topologyKey: kubernetes.io/hostname
              labelSelector:
                matchLabels:
                  app: nfs-client-provisioner
        # nodeAffinity:
          # requiredDuringSchedulingIgnoredDuringExecution:
            # nodeSelectorTerms:
            # - matchExpressions:
              # - key: dedicated
                # operator: In
                # values:
                # - "cmp"
      containers:
        - name: nfs-client-provisioner
          image: quay.io/external_storage/nfs-client-provisioner:v3.1.0-k8s1.11
          volumeMounts:
            - name: nfs-client-root
              mountPath: /persistentvolumes
          env:
            - name: PROVISIONER_NAME
              value: nfs-client-provisioner
            - name: NFS_SERVER
              value: 10.159.1.105  #nfs服务器的地址
            - name: NFS_PATH
              value: /data/nfs_basedir/ #nfs的export 的目录
      volumes:
        - name: nfs-client-root
          nfs:
            server: 10.159.1.105   #nfs服务器的地址
            path: /data/nfs_basedir/ #nfs的export 的目录
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: nfs
  annotations:
     storageclass.kubernetes.io/is-default-class: true   #是否是默认的SC
provisioner: nfs-client-provisioner
reclaimPolicy: Delete #有两种选择, Delete是PV删除以后,文件也从NFS里面删除;Retain是保留文件
mountOptions:
- noresvport
- vers=3
parameters:
  archiveOnDelete: "false" #删除的时候是否归档,如果是true就是软删除,只是会重命名pv的文件目录,这样会造成很多垃圾数据

2. 使用新创建的StorageClass,动态创建PVC

kubectl apply -f test-nfs-sc.yaml
apiVersion: v1
kind: Pod
metadata:
  name: nginx
spec:
  containers:
  - name: nginx
    image: nginx:latest
    ports:
    - containerPort: 80
    volumeMounts:
      - name: www
        mountPath: /usr/share/nginx/html
  volumes:
    - name: www
      persistentVolumeClaim:
        claimName: nginx
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: nginx
spec:
  storageClassName: "nfs"
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 5Gi

3. 验证PVC是否创建成功。进入到容器中,创建文件,然后把nfs挂载到本地,看看文件是否存在

kubectl get pvc

[root@cmp-k8s-prd-10-128-148-230 nfs-ksyun]# kubectl get pvc
NAME                  STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
nginx                 Bound    pvc-b58182db-a9ca-434b-88ae-6d28c9795242   5Gi        RWX            nfs      2s

csi-nfs-driver

1. 部署csi-nfs-driver

kubectl apply -f rbac-csi-nfs-controller.yaml

 rbac-csi-nfs-controller.yaml

---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: csi-nfs-controller-sa
  namespace: kube-system

---

kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nfs-external-provisioner-role
rules:
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["get", "list", "watch", "create", "update", "patch"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["csinodes"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["nodes"]
    verbs: ["get", "list", "watch"]
  - apiGroups: ["coordination.k8s.io"]
    resources: ["leases"]
    verbs: ["get", "list", "watch", "create", "update", "patch"]
---

kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nfs-csi-provisioner-binding
subjects:
  - kind: ServiceAccount
    name: csi-nfs-controller-sa
    namespace: kube-system
roleRef:
  kind: ClusterRole
  name: nfs-external-provisioner-role
  apiGroup: rbac.authorization.k8s.io
kubectl apply -f csi-nfs-driverinfo.yaml
kubectl apply -f csi-nfs-node.yaml
kubectl apply -f csi-nfs-controller.yaml
kubectl apply -f storageclass-nfs.yaml

csi-nfs-driverinfo.yaml

---
apiVersion: storage.k8s.io/v1beta1
kind: CSIDriver
metadata:
  name: nfs.csi.k8s.io
spec:
  attachRequired: false
  volumeLifecycleModes:
    - Persistent
  podInfoOnMount: true

csi-nfs-node.yaml

---
# This YAML file contains driver-registrar & csi driver nodeplugin API objects
# that are necessary to run CSI nodeplugin for nfs
kind: DaemonSet
apiVersion: apps/v1
metadata:
  name: csi-nfs-node
  namespace: kube-system
spec:
  selector:
    matchLabels:
      app: csi-nfs-node
  template:
    metadata:
      labels:
        app: csi-nfs-node
    spec:
      hostNetwork: true  # original nfs connection would be broken without hostNetwork setting
      dnsPolicy: ClusterFirstWithHostNet
      containers:
        - name: liveness-probe
          image: registry.cn-hangzhou.aliyuncs.com/imagesfromgoogle/livenessprobe:v2.1.0
          args:
            - --csi-address=/csi/csi.sock
            - --probe-timeout=3s
            - --health-port=29653
            - --v=5
          volumeMounts:
            - name: socket-dir
              mountPath: /csi
          resources:
            limits:
              cpu: 100m
              memory: 100Mi
            requests:
              cpu: 10m
              memory: 20Mi
        - name: node-driver-registrar
          image: registry.cn-hangzhou.aliyuncs.com/imagesfromgoogle/csi-node-driver-registrar:v2.0.1
          lifecycle:
            preStop:
              exec:
                command: ["/bin/sh", "-c", "rm -rf /registration/csi-nfsplugin /registration/csi-nfsplugin-reg.sock"]
          args:
            - --v=5
            - --csi-address=/csi/csi.sock
            - --kubelet-registration-path=/var/lib/kubelet/plugins/csi-nfsplugin/csi.sock
          env:
            - name: KUBE_NODE_NAME
              valueFrom:
                fieldRef:
                  fieldPath: spec.nodeName
          volumeMounts:
            - name: socket-dir
              mountPath: /csi
            - name: registration-dir
              mountPath: /registration
        - name: nfs
          securityContext:
            privileged: true
            capabilities:
              add: ["SYS_ADMIN"]
            allowPrivilegeEscalation: true
          image: registry.cn-hangzhou.aliyuncs.com/imagesfromgoogle/nfsplugin:amd64-linux-canary
          args:
            - "-v=5"
            - "--nodeid=$(NODE_ID)"
            - "--endpoint=$(CSI_ENDPOINT)"
          env:
            - name: NODE_ID
              valueFrom:
                fieldRef:
                  fieldPath: spec.nodeName
            - name: CSI_ENDPOINT
              value: unix:///csi/csi.sock
          imagePullPolicy: "IfNotPresent"
          volumeMounts:
            - name: socket-dir
              mountPath: /csi
            - name: pods-mount-dir
              mountPath: /var/lib/kubelet/pods
              mountPropagation: "Bidirectional"
      volumes:
        - name: socket-dir
          hostPath:
            path: /var/lib/kubelet/plugins/csi-nfsplugin
            type: DirectoryOrCreate
        - name: pods-mount-dir
          hostPath:
            path: /var/lib/kubelet/pods
            type: Directory
        - hostPath:
            path: /var/lib/kubelet/plugins_registry
            type: Directory
          name: registration-dir

csi-nfs-controller.yaml

---
kind: Deployment
apiVersion: apps/v1
metadata:
  name: csi-nfs-controller
  namespace: kube-system
spec:
  replicas: 2
  selector:
    matchLabels:
      app: csi-nfs-controller
  template:
    metadata:
      labels:
        app: csi-nfs-controller
    spec:
      serviceAccountName: csi-nfs-controller-sa
      nodeSelector:
        kubernetes.io/os: linux
      priorityClassName: system-cluster-critical
      tolerations:
        - key: "node-role.kubernetes.io/master"
          operator: "Equal"
          value: "true"
          effect: "NoSchedule"
      containers:
        - name: csi-provisioner
          image: registry.cn-hangzhou.aliyuncs.com/imagesfromgoogle/csi-provisioner:v2.0.4
          args:
            - "-v=5"
            - "--csi-address=$(ADDRESS)"
            - "--leader-election"
          env:
            - name: ADDRESS
              value: /csi/csi.sock
          volumeMounts:
            - mountPath: /csi
              name: socket-dir
          resources:
            limits:
              cpu: 100m
              memory: 100Mi
            requests:
              cpu: 10m
              memory: 20Mi
        - name: liveness-probe
          image: registry.cn-hangzhou.aliyuncs.com/imagesfromgoogle/livenessprobe:v2.1.0
          args:
            - --csi-address=/csi/csi.sock
            - --probe-timeout=3s
            - --health-port=29652
            - --v=5
          volumeMounts:
            - name: socket-dir
              mountPath: /csi
          resources:
            limits:
              cpu: 100m
              memory: 100Mi
            requests:
              cpu: 10m
              memory: 20Mi
        - name: nfs
          image: registry.cn-hangzhou.aliyuncs.com/imagesfromgoogle/nfsplugin:amd64-linux-canary
          securityContext:
            privileged: true
            capabilities:
              add: ["SYS_ADMIN"]
            allowPrivilegeEscalation: true
          imagePullPolicy: IfNotPresent
          args:
            - "-v=5"
            - "--nodeid=$(NODE_ID)"
            - "--endpoint=$(CSI_ENDPOINT)"
          env:
            - name: NODE_ID
              valueFrom:
                fieldRef:
                  fieldPath: spec.nodeName
            - name: CSI_ENDPOINT
              value: unix:///csi/csi.sock
          volumeMounts:
            - name: pods-mount-dir
              mountPath: /var/lib/kubelet/pods
              mountPropagation: "Bidirectional"
            - mountPath: /csi
              name: socket-dir
          resources:
            limits:
              cpu: 200m
              memory: 200Mi
            requests:
              cpu: 10m
              memory: 20Mi
      volumes:
        - name: pods-mount-dir
          hostPath:
            path: /var/lib/kubelet/pods
            type: Directory
        - name: socket-dir
          emptyDir: {}

storageclass-nfs.yaml ,修改以下的nfs的地址和目录

---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: nfs-csi
provisioner: nfs.csi.k8s.io
parameters:
  server: 10.159.1.105  #修改为自己的nfs的服务器地址
  share: /data/nfs_basedir/  #修改为nfs的目录
reclaimPolicy: Retain  # only retain is supported,目前这个回收策略只支持Retain
volumeBindingMode: Immediate
mountOptions:
  - vers=3
  - noresvport

2. 查看部署的pod是否都启动了

kubectl get po -n kube-system|grep csi-nfs
root@workstation:/data/nfs-csi# kubectl get po -n kube-system|grep csi-nfs
csi-nfs-controller-59697d8b6-lm5v8        3/3     Running     0          5h35m
csi-nfs-controller-59697d8b6-pbv5k        3/3     Running     0          5h35m
csi-nfs-node-5zvfh                        3/3     Running     0          5h35m
csi-nfs-node-8qcw6                        3/3     Running     0          5h35m
csi-nfs-node-sjf8m                        3/3     Running     0          5h35m
csi-nfs-node-tkq4w                        3/3     Running     0          5h35m
csi-nfs-node-xls9l                        3/3     Running     3          5h35m

 

3. 验证新的SC已经创建成功

kubectl get sc

root@workstation:/data/rke/mercury_k8s/nfs-storage-class/nfs-csi# kubectl get sc
NAME            PROVISIONER              RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE
nfs-csi         nfs.csi.k8s.io           Retain          Immediate           false                  154m

4. 使用创建的SC,动态创建PVC

kubectl apply -f test-csi-nfs-driver.yaml
---
apiVersion: v1
kind: Service
metadata:
  name: nginx
  labels:
    app: nginx
spec:
  ports:
  - port: 80
    name: web
  clusterIP: None
  selector:
    app: nginx
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: web
spec:
  selector:
    matchLabels:
      app: nginx 
  serviceName: "nginx"
  replicas: 1 
  template:
    metadata:
      labels:
        app: nginx 
    spec:
      terminationGracePeriodSeconds: 10
      containers:
      - name: nginx
        image: nginx
        ports:
        - containerPort: 80
          name: web
        volumeMounts:
        - name: www
          mountPath: /usr/share/nginx/html
  volumeClaimTemplates:
  - metadata:
      name: www
    spec:
      accessModes: [ "ReadWriteOnce" ]
      storageClassName: "nfs-csi"
      resources:
        requests:
          storage: 1Gi

5.查看新创建的PVC,可以登录容器创建文件,然后把nfs挂在本地查看文件。这个操作就不演示了。

root@workstation:/data/nfs-csi# kubectl get pvc
NAME        STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
www-web-0   Bound    pvc-9c6e135e-8df4-4d50-aad2-ed8e054f5c12   1Gi        RWO            nfs-csi        2m52s

 

  • 1
    点赞
  • 8
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值