熟悉了PV/PVC之后,我们知道PV和PVC是一一对应的,一旦PV被一个PVC关联之后,PV就不能被其他的PVC关联了。从这里可以看出,我们每使用一次PVC就要创建一次PV,有没有好的办法简化一些呢?有,那就是StorageClass(SC),SC就好像是帮我们提供了一个存储池,当我们在这个存储池上创建PVC的时候,会自动创建PV,而且一个SC可以对应多个PVC,也就是说我们创建了一个SC后,就可以直接创建PVC了,不用再单独创建PV。

    在用户视图上就变成了这样:

[kubernetes]持久化存储之StorageClass_d3

    但实际上是这样的:

    [kubernetes]持久化存储之StorageClass_nginx_02

01 创建StorageClass

    因为StorageClass可以实现自动配置,所以使用StorageClass之前,我们需要先安装存储驱动的自动配置程序,而这个配置程序必须拥有一定的权限去访问我们的kubernetes集群(类似dashboard一样,必须有权限访问各种api,才能实现管理)。

    1)创建rbac

cat rbac.yaml 
apiVersion: v1
kind: ServiceAccount # 创建一个用户
metadata:
name: nfs-client-provisioner
namespace: kube-system

---
kind: ClusterRole # 创建一个集群角色
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules: # 角色权限
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]

---
kind: ClusterRoleBinding # 将用户绑定到集群角色中
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
namespace: kube-system
roleRef:
kind: ClusterRole
name: nfs-client-provisioner-runner
apiGroup: rbac.authorization.k8s.io

# 应用
[root@host21 sc]# kubectl apply -f rbac.yaml
serviceaccount/nfs-client-provisioner created
clusterrole.rbac.authorization.k8s.io/nfs-client-provisioner-runner created
clusterrolebinding.rbac.authorization.k8s.io/run-nfs-client-provisioner created

    2)创建自动配置程序 -- nfs-provisioner

[root@host21 sc]# cat dp.yaml 
kind: Deployment
apiVersion: apps/v1
metadata:
name: nfs-provisioner-01
namespace: kube-system
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-provisioner-01
template:
metadata:
labels:
app: nfs-provisioner-01
spec:
serviceAccountName: nfs-client-provisioner # 用户名
containers:
- name: nfs-client-provisioner
image: harbor.od.com/public/nfs-client-provisioner:latest
imagePullPolicy: IfNotPresent
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env: # 定义环境变量
- name: PROVISIONER_NAME
value: nfs-provisioner-01 # 此处供应者名字供storageclass调用
- name: NFS_SERVER
value: host200 # 填入NFS的地址
- name: NFS_PATH
value: /data/nfs-volume/sc-pool # 填入NFS挂载的目录
volumes:
- name: nfs-client-root
nfs:
server: host200 # 填入NFS的地址
path: /data/nfs-volume/sc-pool # 填入NFS挂载的目录

# 应用
[root@host21 sc]# kubectl apply -f dp.yaml
deployment.apps/nfs-provisioner-01 created

# 查看
[root@host21 sc]# kubectl get pod -n kube-system | grep nfs
nfs-provisioner-01-684f9b4798-lzbhv 1/1 Running 0 3m9s

    3)配置StorageClass

[root@host21 sc]# cat sc.yaml 
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-sc-pool
provisioner: nfs-provisioner-01 # 这里的名字要和dp里面定义的名字一样
# Supported policies: Delete、 Retain , default is Delete
reclaimPolicy: Retain
# 应用
[root@host21 sc]# kubectl apply -f sc.yaml
storageclass.storage.k8s.io/nfs-sc-pool created
# 查看
[root@host21 sc]# kubectl get sc
NAME PROVISIONER AGE
nfs-sc-pool nfs-provisioner-01 14s

02 测试

    1)创建PVC

# pvc.yaml
[root@host21 sc]# cat pvc.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: pvc01-sc-pool
spec:
storageClassName: nfs-sc-pool # 指定storageclass
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Mi
# 应用
[root@host21 sc]# kubectl apply -f pvc.yaml
persistentvolumeclaim/pvc01-sc-pool created
# 查看pvc
[root@host21 sc]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
pvc01-sc-pool Bound pvc-e6d1021e-0db8-4dee-8149-4229c272e203 1Mi RWX nfs-sc-pool 5s
# 查看pv
[root@host21 sc]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pvc-e6d1021e-0db8-4dee-8149-4229c272e203 1Mi RWX Retain Bound default/pvc01-sc-pool nfs-sc-pool 8s

    通过上面可以看出我们创建了storageclass之后,就可以不用创建pv而直接创建pvc了。

    2)查看pv

[root@host21 sc]# kubectl describe pv pvc-e6d1021e-0db8-4dee-8149-4229c272e203 
Name: pvc-e6d1021e-0db8-4dee-8149-4229c272e203
Labels: <none>
Annotations: pv.kubernetes.io/provisioned-by: nfs-provisioner-01
Finalizers: [kubernetes.io/pv-protection]
StorageClass: nfs-sc-pool
Status: Bound
Claim: default/pvc01-sc-pool
Reclaim Policy: Retain
Access Modes: RWX
VolumeMode: Filesystem
Capacity: 1Mi
Node Affinity: <none>
Message:
Source:
Type: NFS (an NFS mount that lasts the lifetime of a pod)
Server: host200
Path: /data/nfs-volume/sc-pool/default-pvc01-sc-pool-pvc-e6d1021e-0db8-4dee-8149-4229c272e203
ReadOnly: false
Events: <none>

    可以看到,pv自动被自动创建后,还在nfs目录中自动创建了一个和pv名称一样的目录给该pv使用,这样我们就不用担心数据乱了。

    3)生产应用

[root@host21 sc]# cat nginx.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: nginx
name: nginx
spec:
replicas: 1
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- image: nginx
name: nginx
volumeMounts: # 我们这里将nginx容器默认的页面目录挂载
- name: nginx-html
mountPath: "/usr/share/nginx/html"
volumes:
- name: nginx-html
persistentVolumeClaim:
claimName: nginx-html-pvc # 要和下面的pvc名称一样
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: nginx-html-pvc # pvc名称
spec:
storageClassName: nfs-sc-pool
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Mi
# 应用
[root@host21 sc]# kubectl apply -f nginx.yaml
deployment.apps/nginx created
# 验证
[root@host21 sc]# kubectl get pod
NAME READY STATUS RESTARTS AGE
nginx-b7db77fbb-b2mln 1/1 Running 0 6m3s
[root@host21 sc]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pvc-3755c46c-34ee-452b-971a-d562511199d7 1Mi RWX Retain Bound default/nfs-nginx-pvc nfs-sc-pool 6m6s
pvc-60d11564-9713-4d3a-88a5-f9955685bdb5 1Mi RWX Retain Bound default/nginx-html-pvc nfs-sc-pool 4m49s
pvc-e6d1021e-0db8-4dee-8149-4229c272e203 1Mi RWX Retain Bound default/pvc01-sc-pool nfs-sc-pool 28m
[root@host21 sc]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
nfs-nginx-pvc Bound pvc-3755c46c-34ee-452b-971a-d562511199d7 1Mi RWX nfs-sc-pool 6m8s
nginx-html-pvc Bound pvc-60d11564-9713-4d3a-88a5-f9955685bdb5 1Mi RWX nfs-sc-pool 4m52s
pvc01-sc-pool Bound pvc-e6d1021e-0db8-4dee-8149-4229c272e203 1Mi RWX nfs-sc-pool 28m

[kubernetes]持久化存储之StorageClass_nginx_03