环境准备这里准备四台机器都挂两块盘,一块作为数据盘。

1 gfs yum源

vim CentOS-Linux-Gluster-8.repo
[centos-gluster10]
name=CentOS-8 - Gluster 10
baseurl=https://mirrors.huaweicloud.com/centos/7/storage/x86_64/gluster-9/
gpgcheck=0
enabled=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-Storage
  • 1.
  • 2.
  • 3.
  • 4.
  • 5.
  • 6.
  • 7.
  1. 清除缓存
yum clean all
yum makecache
  • 1.
  • 2.
  1. 所有节点安装gfs
yum install --skip-broken -y glusterfs glusterfs-server glusterfs-fuse glusterfs-rdma glusterfs-geo-replication glusterfs-devel
  • 1.
  1. 启动
for n in n "systemctl enable glusterd && systemctl start glusterd" ;done
  • 1.
  1. 加入节点
for gfs_host in gfs-001 gfs-002 gfs-003 gfs-004;do gluster peer probe $gfs_host;done
  • 1.
  1. 查看状态
gluster peer status
  • 1.
  1. Heketi 的安装
yum install -y heketi heketi-client
  • 1.
  1. 修改配置文件

glusterfs部署_glusterfs

{
  "_port_comment": "Heketi Server Port Number",
  "port": "8080",

  "_use_auth": "Enable JWT authorization. Please enable for deployment",
  "use_auth": false,

  "_jwt": "Private keys for access",
  "jwt": {
    "_admin": "Admin has access to all APIs",
    "admin": {
      "key": "My Secret"
    },
    "_user": "User only has access to /volumes endpoint",
    "user": {
      "key": "My Secret"
    }
  },

  "_glusterfs_comment": "GlusterFS Configuration",
  "glusterfs": {
    "_executor_comment": [
      "Execute plugin. Possible choices: mock, ssh",
      "mock: This setting is used for testing and development.",
      "      It will not send commands to any node.",
      "ssh:  This setting will notify Heketi to ssh to the nodes.",
      "      It will need the values in sshexec to be configured.",
      "kubernetes: Communicate with GlusterFS containers over",
      "            Kubernetes exec api."
    ],
    "executor": "ssh",

    "_sshexec_comment": "SSH username and private key file information",
    "sshexec": {
      "keyfile": "/etc/heketi/heketi_key",
      "user": "root",
      "port": "22",
      "fstab": "/etc/fstab"
    },

    "_kubeexec_comment": "Kubernetes configuration",
    "kubeexec": {
      "host" :"https://kubernetes.host:8443",
      "cert" : "/path/to/crt.file",
      "insecure": false,
      "user": "kubernetes username",
      "password": "password for kubernetes user",
      "namespace": "OpenShift project or Kubernetes namespace",
      "fstab": "Optional: Specify fstab file on node.  Default is /etc/fstab"
    },
  • 1.
  • 2.
  • 3.
  • 4.
  • 5.
  • 6.
  • 7.
  • 8.
  • 9.
  • 10.
  • 11.
  • 12.
  • 13.
  • 14.
  • 15.
  • 16.
  • 17.
  • 18.
  • 19.
  • 20.
  • 21.
  • 22.
  • 23.
  • 24.
  • 25.
  • 26.
  • 27.
  • 28.
  • 29.
  • 30.
  • 31.
  • 32.
  • 33.
  • 34.
  • 35.
  • 36.
  • 37.
  • 38.
  • 39.
  • 40.
  • 41.
  • 42.
  • 43.
  • 44.
  • 45.
  • 46.
  • 47.
  • 48.
  • 49.
  • 50.


  1. 生成相关ssh证书
ssh-keygen -f /etc/heketi/heketi_key -t rsa -N ''
ssh-copy-id -i /etc/heketi/heketi_key.pub root@gfs-001
ssh-copy-id -i /etc/heketi/heketi_key.pub root@gfs-002
ssh-copy-id -i /etc/heketi/heketi_key.pub root@gfs-003
ssh-copy-id -i /etc/heketi/heketi_key.pub root@gfs-004
chown heketi:heketi /etc/heketi/heketi_key*
  • 1.
  • 2.
  • 3.
  • 4.
  • 5.
  • 6.
  1. 启动服务
systemctl start heketi && systemctl enable heketi
  • 1.
  1. 在集群的主节点设置环境变量
export HEKETI_CLI_SERVER=http://gfs-001:8080
  • 1.
  1. 配置节点文件
# 这个文件有默认模板根据修改就行了
cat /usr/share/heketi/topology-sample.json
{
    "clusters": [
        {
            "nodes": [
                {
                    "node": {
                        "hostnames": {
                            "manage": [
                                "192.168.60.102"
                            ],
                            "storage": [
                                "192.168.60.102"
                            ]
                        },
                        "zone": 1
                    },
                    "devices": [
                        {
                            "name": "/dev/sdb",
                            "destroydata": false
                        }
                    ]
                },
                {
                    "node": {
                        "hostnames": {
                            "manage": [
                                "192.168.60.103"
                            ],
                            "storage": [
                                "192.168.60.103"
                            ]
                        },
                        "zone": 1
                    },
                    "devices": [
                        {
                            "name": "/dev/sdb",
                            "destroydata": false
                        }
                    ]
                },
                {
                    "node": {
                        "hostnames": {
                            "manage": [
                                "192.168.60.104"
                            ],
                            "storage": [
                                "192.168.60.104"
                            ]
                        },
                        "zone": 1
                    },
                    "devices": [
                        {
                            "name": "/dev/sdb",
                            "destroydata": false
                        }
                    ]
                }
            ]
        }
    ]
}
  • 1.
  • 2.
  • 3.
  • 4.
  • 5.
  • 6.
  • 7.
  • 8.
  • 9.
  • 10.
  • 11.
  • 12.
  • 13.
  • 14.
  • 15.
  • 16.
  • 17.
  • 18.
  • 19.
  • 20.
  • 21.
  • 22.
  • 23.
  • 24.
  • 25.
  • 26.
  • 27.
  • 28.
  • 29.
  • 30.
  • 31.
  • 32.
  • 33.
  • 34.
  • 35.
  • 36.
  • 37.
  • 38.
  • 39.
  • 40.
  • 41.
  • 42.
  • 43.
  • 44.
  • 45.
  • 46.
  • 47.
  • 48.
  • 49.
  • 50.
  • 51.
  • 52.
  • 53.
  • 54.
  • 55.
  • 56.
  • 57.
  • 58.
  • 59.
  • 60.
  • 61.
  • 62.
  • 63.
  • 64.
  • 65.
  • 66.
  • 67.
  1. 创建
heketi-cli topology load --json=/usr/share/heketi/topology-sample.json
  • 1.
  1. 查看状态
[root@gfs-001 ~]#  heketi-cli  node list
Id:3c7b84bc9d36ebb7ca39ddd76fbe4ec9	Cluster:89a447b897daf3b31a6fa6c2e7a4a2a8
Id:81e7e3fdf61ba887bf82a8e7273519b3	Cluster:89a447b897daf3b31a6fa6c2e7a4a2a8
Id:b16aec3a25ab904166e77aaf0646f499	Cluster:89a447b897daf3b31a6fa6c2e7a4a2a8
  • 1.
  • 2.
  • 3.
  • 4.
  1. 创建卷
heketi-cli volume create --size=10
  • 1.
  1. 查询卷
[root@gfs-001 ~]# gluster volume info

Volume Name: vol_5203c8c764a398b3cd2c0a4f8620ba78
Type: Distributed-Replicate
Volume ID: 0eec576e-4350-46ab-bc70-f760f15fd8ea
Status: Started
Snapshot Count: 0
Number of Bricks: 2 x 3 = 6
Transport-type: tcp
Bricks:
Brick1: 192.168.60.104:/var/lib/heketi/mounts/vg_fb32278e0323dbc6754eef5495ff8d04/brick_fdb0d7fc9513dc71ade5b33eeb9cc2da/brick
Brick2: 192.168.60.103:/var/lib/heketi/mounts/vg_1a8b5d1f2e4b8b571b98f00af09c1307/brick_eb6f289f7a67d2f59458ae1cd416ad9d/brick
Brick3: 192.168.60.102:/var/lib/heketi/mounts/vg_ceb8a79195eb4c0faf8b7b81640e1760/brick_1da8436fd2559aadc98aec4f078d6ab9/brick
Brick4: 192.168.60.104:/var/lib/heketi/mounts/vg_fb32278e0323dbc6754eef5495ff8d04/brick_58582bdc3e05a1e1514974a462a4564a/brick
Brick5: 192.168.60.103:/var/lib/heketi/mounts/vg_1a8b5d1f2e4b8b571b98f00af09c1307/brick_d269dba2b6c5611d4c61c8ba9a26dc97/brick
Brick6: 192.168.60.102:/var/lib/heketi/mounts/vg_ceb8a79195eb4c0faf8b7b81640e1760/brick_72b274b3f83471f6fb2471c68eea927f/brick
Options Reconfigured:
user.heketi.id: 5203c8c764a398b3cd2c0a4f8620ba78
cluster.granular-entry-heal: on
storage.fips-mode-rchecksum: on
transport.address-family: inet
nfs.disable: on
performance.client-io-threads: off
  • 1.
  • 2.
  • 3.
  • 4.
  • 5.
  • 6.
  • 7.
  • 8.
  • 9.
  • 10.
  • 11.
  • 12.
  • 13.
  • 14.
  • 15.
  • 16.
  • 17.
  • 18.
  • 19.
  • 20.
  • 21.
  • 22.
  • 23.
  1. 扩容卷
heketi-cli volume expand --volume=5203c8c764a398b3cd2c0a4f8620ba78 --expand-size=10
 # --volume 指定上述卷的ID,--expand-size 扩容数值,这里比如扩容10,总量就是默认值+扩容值=20
  • 1.
  • 2.

GlusterFS 树内存储驱动程序在 Kubernetes v1.25 版本中被弃用,然后在 v1.26 版本中被完全移除。

文档地址: https://kubernetes.io/zh-cn/docs/concepts/storage/volumes/#glusterfs

k8s后续版本想要接入gfs只能使用csi的方式了,这个也是k8s目前力推的方式。

  1. 创建csi的rabc
# 文件内容
[root@master-001 glusterfs]# cat rbac-csi-nfs-controller.yaml
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: csi-glusterfs-controller-sa
  namespace: glusterfs

---

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  name: glusterfs-external-provisioner-role
rules:
- apiGroups:
  - ""
  resources:
  - persistentvolumes
  verbs:
  - get
  - list
  - watch
  - create
  - delete
  - patch
- apiGroups:
  - ""
  resources:
  - persistentvolumeclaims
  verbs:
  - get
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - persistentvolumeclaims/status
  verbs:
  - update
  - patch
- apiGroups:
  - storage.k8s.io
  resources:
  - storageclasses
  verbs:
  - get
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - events
  verbs:
  - get
  - list
  - watch
  - create
  - update
  - patch
- apiGroups:
  - storage.k8s.io
  resources:
  - csinodes
  verbs:
  - get
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - get
  - list
  - watch
- apiGroups:
  - coordination.k8s.io
  resources:
  - leases
  verbs:
  - get
  - list
  - watch
  - create
  - update
  - patch
- apiGroups:
  - ""
  resources:
  - secrets
  verbs:
  - get

---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: glusterfs-csi-provisioner-binding
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: glusterfs-external-provisioner-role
subjects:
- kind: ServiceAccount
  name: csi-glusterfs-controller-sa
  namespace: glusterfs
# 创建
[root@master-001 glusterfs]# kubectl apply -f rbac-csi-nfs-controller.yaml
  • 1.
  • 2.
  • 3.
  • 4.
  • 5.
  • 6.
  • 7.
  • 8.
  • 9.
  • 10.
  • 11.
  • 12.
  • 13.
  • 14.
  • 15.
  • 16.
  • 17.
  • 18.
  • 19.
  • 20.
  • 21.
  • 22.
  • 23.
  • 24.
  • 25.
  • 26.
  • 27.
  • 28.
  • 29.
  • 30.
  • 31.
  • 32.
  • 33.
  • 34.
  • 35.
  • 36.
  • 37.
  • 38.
  • 39.
  • 40.
  • 41.
  • 42.
  • 43.
  • 44.
  • 45.
  • 46.
  • 47.
  • 48.
  • 49.
  • 50.
  • 51.
  • 52.
  • 53.
  • 54.
  • 55.
  • 56.
  • 57.
  • 58.
  • 59.
  • 60.
  • 61.
  • 62.
  • 63.
  • 64.
  • 65.
  • 66.
  • 67.
  • 68.
  • 69.
  • 70.
  • 71.
  • 72.
  • 73.
  • 74.
  • 75.
  • 76.
  • 77.
  • 78.
  • 79.
  • 80.
  • 81.
  • 82.
  • 83.
  • 84.
  • 85.
  • 86.
  • 87.
  • 88.
  • 89.
  • 90.
  • 91.
  • 92.
  • 93.
  • 94.
  • 95.
  • 96.
  • 97.
  • 98.
  • 99.
  • 100.
  • 101.
  • 102.
  • 103.
  • 104.
  • 105.
  • 106.
  • 107.
  • 108.
  • 109.
  • 110.
  1. 创建相关资源容器
[root@master-001 glusterfs]# cat csi-glusterfs-controller.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: csi-glusterfs-controller
  namespace: glusterfs
spec:
  progressDeadlineSeconds: 600
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      app: csi-glusterfs-controller
  strategy:
    rollingUpdate:
      maxSurge: 25%
      maxUnavailable: 25%
    type: RollingUpdate
  template:
    metadata:
      labels:
        app: csi-glusterfs-controller
    spec:
      containers:
      - args:
        - -v=2
        - --csi-address=$(ADDRESS)
        - --leader-election
        env:
        - name: ADDRESS
          value: /csi/csi.sock
        image: registry.cn-beijing.aliyuncs.com/public-lib/csi-provisioner:v2.2.2
        imagePullPolicy: IfNotPresent
        name: csi-provisioner
        resources:
          limits:
            memory: 400Mi
          requests:
            cpu: 10m
            memory: 20Mi
        terminationMessagePath: /dev/termination-log
        terminationMessagePolicy: File
        volumeMounts:
        - mountPath: /csi
          name: socket-dir
      #- args:
      #  - --csi-address=$(ADDRESS)
      #  - --v=2
      #  - --timeout=150s
      #  - --leader-election
      #  - --retry-interval-start=500ms
      #  - --handle-volume-inuse-error=false
      #  env:
      #  - name: ADDRESS
      #    value: unix:///csi/csi.sock
      #  image: registry.cn-beijing.aliyuncs.com/public-lib/csi-resizer:v1.0.1
      #  imagePullPolicy: IfNotPresent
      #  name: csi-resizer
      #  resources: {}
      #  terminationMessagePath: /dev/termination-log
      #  terminationMessagePolicy: File
      #  volumeMounts:
      #  - mountPath: /csi
      #    name: socket-dir
      - args:
        - --csi-address=/csi/csi.sock
        - --probe-timeout=3s
        - --health-port=29652
        - --v=2
        image: registry.cn-beijing.aliyuncs.com/juestnow/livenessprobe:v2.10.0
        #image: k8s.gcr.io/sig-storage/livenessprobe:v2.5.0
        imagePullPolicy: IfNotPresent
        name: liveness-probe
        resources:
          limits:
            memory: 100Mi
          requests:
            cpu: 10m
            memory: 20Mi
        terminationMessagePath: /dev/termination-log
        terminationMessagePolicy: File
        volumeMounts:
        - mountPath: /csi
          name: socket-dir
      - args:
        - -v=2
        - --nodeid=$(NODE_ID)
        - --endpoint=$(CSI_ENDPOINT)
        env:
        - name: NODE_ID
          valueFrom:
            fieldRef:
              apiVersion: v1
              fieldPath: spec.nodeName
        - name: CSI_ENDPOINT
          value: unix:///csi/csi.sock
        image: registry.cn-beijing.aliyuncs.com/public-lib/glusterfs-csi-driver:latest
        #image: docker.io/levindecaro/csi-driver-glusterfs:v0.1
        imagePullPolicy: IfNotPresent
        livenessProbe:
          failureThreshold: 5
          httpGet:
            path: /healthz
            port: healthz
            scheme: HTTP
          initialDelaySeconds: 30
          periodSeconds: 30
          successThreshold: 1
          timeoutSeconds: 10
        name: glusterfs
        ports:
        - containerPort: 29652
          hostPort: 29652
          name: healthz
          protocol: TCP
        resources:
          limits:
            memory: 200Mi
          requests:
            cpu: 10m
            memory: 20Mi
        securityContext:
          allowPrivilegeEscalation: true
          capabilities:
            add:
            - SYS_ADMIN
          privileged: true
        terminationMessagePath: /dev/termination-log
        terminationMessagePolicy: File
        volumeMounts:
        - mountPath: /var/lib/kubelet/pods
          mountPropagation: Bidirectional
          name: pods-mount-dir
        - mountPath: /csi
          name: socket-dir
      dnsPolicy: ClusterFirstWithHostNet
      hostNetwork: true
      priorityClassName: system-cluster-critical
      restartPolicy: Always
      schedulerName: default-scheduler
      securityContext: {}
      serviceAccount: csi-glusterfs-controller-sa
      serviceAccountName: csi-glusterfs-controller-sa
      terminationGracePeriodSeconds: 30
      tolerations:
      - effect: NoSchedule
        key: node-role.kubernetes.io/master
        operator: Exists
      - effect: NoSchedule
        key: node-role.kubernetes.io/controlplane
        operator: Exists
      volumes:
      - hostPath:
          path: /var/lib/kubelet/pods
          type: Directory
        name: pods-mount-dir
      - emptyDir: {}
        name: socket-dir
 
 [root@master-001 glusterfs]# cat csi-glusterfs-node.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: csi-glusterfs-node
  namespace: glusterfs
spec:
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      app: csi-glusterfs-node
  template:
    metadata:
      labels:
        app: csi-glusterfs-node
    spec:
      containers:
      - args:
        - --csi-address=/csi/csi.sock
        - --probe-timeout=3s
        - --health-port=9653
        - --v=2
        #image: k8s.gcr.io/sig-storage/livenessprobe:v2.5.0
        image: registry.cn-beijing.aliyuncs.com/juestnow/livenessprobe:v2.10.0
        imagePullPolicy: IfNotPresent
        name: liveness-probe
        resources:
          limits:
            memory: 100Mi
          requests:
            cpu: 10m
            memory: 20Mi
        terminationMessagePath: /dev/termination-log
        terminationMessagePolicy: File
        volumeMounts:
        - mountPath: /csi
          name: socket-dir
      - args:
        - --v=2
        - --csi-address=/csi/csi.sock
        - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
        env:
        - name: DRIVER_REG_SOCK_PATH
          value: /var/lib/kubelet/plugins/csi-glusterfsplugin/csi.sock
        - name: KUBE_NODE_NAME
          valueFrom:
            fieldRef:
              apiVersion: v1
              fieldPath: spec.nodeName
        image: registry.cn-beijing.aliyuncs.com/public-lib/csi-node-driver-registrar:v2.9.0
        #image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.4.0
        imagePullPolicy: IfNotPresent
        livenessProbe:
          exec:
            command:
            - /csi-node-driver-registrar
            - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
            - --mode=kubelet-registration-probe
          failureThreshold: 3
          initialDelaySeconds: 30
          periodSeconds: 10
          successThreshold: 1
          timeoutSeconds: 15
        name: node-driver-registrar
        resources:
          limits:
            memory: 100Mi
          requests:
            cpu: 10m
            memory: 20Mi
        terminationMessagePath: /dev/termination-log
        terminationMessagePolicy: File
        volumeMounts:
        - mountPath: /csi
          name: socket-dir
        - mountPath: /registration
          name: registration-dir
      - args:
        - -v=2
        - --nodeid=$(NODE_ID)
        - --endpoint=$(CSI_ENDPOINT)
        env:
        - name: NODE_ID
          valueFrom:
            fieldRef:
              apiVersion: v1
              fieldPath: spec.nodeName
        - name: CSI_ENDPOINT
          value: unix:///csi/csi.sock
        image: registry.cn-beijing.aliyuncs.com/public-lib/glusterfs-csi-driver:latest
        #image: docker.io/levindecaro/csi-driver-glusterfs:v0.1
        imagePullPolicy: IfNotPresent
        livenessProbe:
          failureThreshold: 5
          httpGet:
            path: /healthz
            port: healthz
            scheme: HTTP
          initialDelaySeconds: 30
          periodSeconds: 30
          successThreshold: 1
          timeoutSeconds: 10
        name: glusterfs
        ports:
        - containerPort: 9653
          hostPort: 9653
          name: healthz
          protocol: TCP
        resources:
          limits:
            memory: 300Mi
          requests:
            cpu: 10m
            memory: 20Mi
        securityContext:
          allowPrivilegeEscalation: true
          capabilities:
            add:
            - SYS_ADMIN
          privileged: true
        terminationMessagePath: /dev/termination-log
        terminationMessagePolicy: File
        volumeMounts:
        - mountPath: /csi
          name: socket-dir
        - mountPath: /var/lib/kubelet/pods
          mountPropagation: Bidirectional
          name: pods-mount-dir
      dnsPolicy: ClusterFirstWithHostNet
      hostNetwork: true
      restartPolicy: Always
      schedulerName: default-scheduler
      securityContext: {}
      serviceAccount: csi-glusterfs-controller-sa
      serviceAccountName: csi-glusterfs-controller-sa
      terminationGracePeriodSeconds: 30
      volumes:
      - hostPath:
          path: /var/lib/kubelet/plugins/csi-glusterfsplugin
          type: DirectoryOrCreate
        name: socket-dir
      - hostPath:
          path: /var/lib/kubelet/pods
          type: Directory
        name: pods-mount-dir
      - hostPath:
          path: /var/lib/kubelet/plugins_registry
          type: Directory
        name: registration-dir
  updateStrategy:
    rollingUpdate:
      maxSurge: 0
      maxUnavailable: 1
    type: RollingUpdate
 
 
 [root@master-001 glusterfs]# cat csi-glusterfs-driverinfo.yaml
---
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
  name: glusterfs.csi.k8s.io
  namespace: glusterfs
spec:
  attachRequired: false
  volumeLifecycleModes:
    - Persistent
 
 # 创建这三个资源文件
 [root@master-001 glusterfs]# kubectl apply -f csi-glusterfs-controller.yaml csi-glusterfs-driverinfo.yaml csi-glusterfs-node.yaml
  • 1.
  • 2.
  • 3.
  • 4.
  • 5.
  • 6.
  • 7.
  • 8.
  • 9.
  • 10.
  • 11.
  • 12.
  • 13.
  • 14.
  • 15.
  • 16.
  • 17.
  • 18.
  • 19.
  • 20.
  • 21.
  • 22.
  • 23.
  • 24.
  • 25.
  • 26.
  • 27.
  • 28.
  • 29.
  • 30.
  • 31.
  • 32.
  • 33.
  • 34.
  • 35.
  • 36.
  • 37.
  • 38.
  • 39.
  • 40.
  • 41.
  • 42.
  • 43.
  • 44.
  • 45.
  • 46.
  • 47.
  • 48.
  • 49.
  • 50.
  • 51.
  • 52.
  • 53.
  • 54.
  • 55.
  • 56.
  • 57.
  • 58.
  • 59.
  • 60.
  • 61.
  • 62.
  • 63.
  • 64.
  • 65.
  • 66.
  • 67.
  • 68.
  • 69.
  • 70.
  • 71.
  • 72.
  • 73.
  • 74.
  • 75.
  • 76.
  • 77.
  • 78.
  • 79.
  • 80.
  • 81.
  • 82.
  • 83.
  • 84.
  • 85.
  • 86.
  • 87.
  • 88.
  • 89.
  • 90.
  • 91.
  • 92.
  • 93.
  • 94.
  • 95.
  • 96.
  • 97.
  • 98.
  • 99.
  • 100.
  • 101.
  • 102.
  • 103.
  • 104.
  • 105.
  • 106.
  • 107.
  • 108.
  • 109.
  • 110.
  • 111.
  • 112.
  • 113.
  • 114.
  • 115.
  • 116.
  • 117.
  • 118.
  • 119.
  • 120.
  • 121.
  • 122.
  • 123.
  • 124.
  • 125.
  • 126.
  • 127.
  • 128.
  • 129.
  • 130.
  • 131.
  • 132.
  • 133.
  • 134.
  • 135.
  • 136.
  • 137.
  • 138.
  • 139.
  • 140.
  • 141.
  • 142.
  • 143.
  • 144.
  • 145.
  • 146.
  • 147.
  • 148.
  • 149.
  • 150.
  • 151.
  • 152.
  • 153.
  • 154.
  • 155.
  • 156.
  • 157.
  • 158.
  • 159.
  • 160.
  • 161.
  • 162.
  • 163.
  • 164.
  • 165.
  • 166.
  • 167.
  • 168.
  • 169.
  • 170.
  • 171.
  • 172.
  • 173.
  • 174.
  • 175.
  • 176.
  • 177.
  • 178.
  • 179.
  • 180.
  • 181.
  • 182.
  • 183.
  • 184.
  • 185.
  • 186.
  • 187.
  • 188.
  • 189.
  • 190.
  • 191.
  • 192.
  • 193.
  • 194.
  • 195.
  • 196.
  • 197.
  • 198.
  • 199.
  • 200.
  • 201.
  • 202.
  • 203.
  • 204.
  • 205.
  • 206.
  • 207.
  • 208.
  • 209.
  • 210.
  • 211.
  • 212.
  • 213.
  • 214.
  • 215.
  • 216.
  • 217.
  • 218.
  • 219.
  • 220.
  • 221.
  • 222.
  • 223.
  • 224.
  • 225.
  • 226.
  • 227.
  • 228.
  • 229.
  • 230.
  • 231.
  • 232.
  • 233.
  • 234.
  • 235.
  • 236.
  • 237.
  • 238.
  • 239.
  • 240.
  • 241.
  • 242.
  • 243.
  • 244.
  • 245.
  • 246.
  • 247.
  • 248.
  • 249.
  • 250.
  • 251.
  • 252.
  • 253.
  • 254.
  • 255.
  • 256.
  • 257.
  • 258.
  • 259.
  • 260.
  • 261.
  • 262.
  • 263.
  • 264.
  • 265.
  • 266.
  • 267.
  • 268.
  • 269.
  • 270.
  • 271.
  • 272.
  • 273.
  • 274.
  • 275.
  • 276.
  • 277.
  • 278.
  • 279.
  • 280.
  • 281.
  • 282.
  • 283.
  • 284.
  • 285.
  • 286.
  • 287.
  • 288.
  • 289.
  • 290.
  • 291.
  • 292.
  • 293.
  • 294.
  • 295.
  • 296.
  • 297.
  • 298.
  • 299.
  • 300.
  • 301.
  • 302.
  • 303.
  • 304.
  • 305.
  • 306.
  • 307.
  • 308.
  • 309.
  • 310.
  • 311.
  • 312.
  • 313.
  • 314.
  • 315.
  • 316.
  • 317.
  • 318.
  • 319.
  • 320.
  • 321.
  • 322.
  • 323.
  • 324.
  • 325.
  • 326.
  • 327.
  • 328.
  • 329.
  1. 创建pv和pvc
[root@master-001 glusterfs]# cat static-pv.yaml
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: static-pv
  namespace: glusterfs
spec:
  capacity:
    storage: 10Gi
  accessModes:
    - ReadWriteMany
  persistentVolumeReclaimPolicy: Retain
  mountOptions:
    - rw
    - backup-volfile-servers=gluster-1.glusterfs.local:gluster-2.glusterfs.local
  csi:
    driver: glusterfs.csi.k8s.io
    readOnly: false
    volumeHandle: 8dd8cb13-dd16-419e-b89c-b6536c0c35d6 # Run uuidgen to generate a unique id
    volumeAttributes:
      server: gluster-0.glusterfs.local
      share: vol_5203c8c764a398b3cd2c0a4f8620ba78

---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: static-0
  namespace: glusterfs
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 10Gi
  volumeName: static-pv
  storageClassName: ""
  
 # 创建
 [root@master-001 glusterfs]# kubectl apply -f static-pv.yaml
 [root@master-001 glusterfs]# kubectl get pvc -n glusterfs
NAME       STATUS   VOLUME      CAPACITY   ACCESS MODES   STORAGECLASS   AGE
static-0   Bound    static-pv   10Gi       RWX                           9s
  • 1.
  • 2.
  • 3.
  • 4.
  • 5.
  • 6.
  • 7.
  • 8.
  • 9.
  • 10.
  • 11.
  • 12.
  • 13.
  • 14.
  • 15.
  • 16.
  • 17.
  • 18.
  • 19.
  • 20.
  • 21.
  • 22.
  • 23.
  • 24.
  • 25.
  • 26.
  • 27.
  • 28.
  • 29.
  • 30.
  • 31.
  • 32.
  • 33.
  • 34.
  • 35.
  • 36.
  • 37.
  • 38.
  • 39.
  • 40.
  • 41.
  • 42.
  • 43.
  • 44.
  1. 目前storageclass创建pv有问题可能因为csi的问题只能通过手动创建pv和pvc