1、emptyDir
以内存为介质,临时的数据
[root@vms10 chap4-volume]# cat pod1.yaml
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: pod1
name: pod1
spec:
# 需要先指定挂载卷
volumes:
- name: volume1
emptyDir: {}
containers:
- image: nginx
imagePullPolicy: IfNotPresent
name: pod1
# 多个容器可以共享挂载卷的数据
volumeMounts:
- mountPath: /data
name: volume1
resources: {}
dnsPolicy: ClusterFirst
restartPolicy: Always
status: {}
[root@vms10 chap4-volume]# kubectl describe pod pod1
Mounts:
/data from volume1 (rw)
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-gp57k (ro)
2、hostPath
持久化存储,将pod内部容器文件映射到宿主机上
注意:
如果切换了node,且数据没有同步到要切换的node节点上,将会导致数据丢失。
[root@vms10 chap4-volume]# cat pod-hostPath.yaml
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: pod1
name: pod1
spec:
volumes:
- name: volume1
hostPath:
path: /data
containers:
- image: nginx
imagePullPolicy: IfNotPresent
name: nginx1
volumeMounts:
- mountPath: /var/www
name: volume1
- image: nginx
imagePullPolicy: IfNotPresent
name: nginx2
command: ["sh","-c","sleep 5000" ]
volumeMounts:
- mountPath: /var/www
name: volume1
resources: {}
dnsPolicy: ClusterFirst
restartPolicy: Always
status: {}
# 验证
[root@vms30 data]# ls /data/
index.html
[root@vms10 chap4-volume]# kubectl exec -it pod1 -c nginx1 -- bash
root@pod1:/# ls /var/www/
index.html
[root@vms10 chap4-volume]# kubectl exec -it pod1 -c nginx2 -- bash
root@pod1:/# ls /var/www/
index.html
3、使用nfs作为存储
弊端:共享空间有谁来创建?(存在安全风险)
3.1、创建nfs服务器
[root@nfs ~]# yum -y install nfs-utils
[root@nfs ~]# systemctl enable nfs-server --now
[root@nfs ~]# firewall-cmd --set-default-zone=trusted
Warning: ZONE_ALREADY_SET: trusted
success
# 创建共享目录和配置文件
[root@nfs ~]# mkdir /data
[root@nfs ~]# vim /etc/exports
# no_root_squash:不压缩root权限,客户端以root身份写入时,如果不设置此选项,root会变为nobody
#
#/data *(rw,no_root_squash)
/data 192.168.26.0/24(rw,no_root_squash)
# 重新加载 a:所有文件 r:reload v:显示配置信息
[root@nfs ~]# exportfs -avr
exporting 192.168.26.0/24:/data
# 在所有node节点20/30安装nfs客户端
[root@vms20 ~]# yum -y install nfs-utils
[root@vms30 ~]# yum -y install nfs-utils
[root@vms30 ~]# showmount -e 192.168.26.31
Export list for 192.168.26.31:
/data 192.168.26.0/24
# 测试能否挂载
[root@vms30 ~]# mount 192.168.26.31:/data /mnt ;umount /mnt
3.2、使用nfs共享pod存储
[root@vms10 chap4-volume]# cat pod-nfs.yaml
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: pod1
name: pod1
spec:
nodeName: vms20.rhce.cc
volumes:
- name: nfs
nfs:
server: 192.168.26.31
path: /data
containers:
- image: nginx
imagePullPolicy: IfNotPresent
name: nginx1
volumeMounts:
- mountPath: /usr/share/nginx/html
name: nfs
- image: nginx
imagePullPolicy: IfNotPresent
name: nginx2
command: ["sh","-c","sleep 5000" ]
volumeMounts:
- mountPath: /usr/share/nginx/html
name: nfs
resources: {}
dnsPolicy: ClusterFirst
restartPolicy: Always
status: {}
[root@vms10 chap4-volume]# kubectl apply -f pod-nfs.yaml
pod/pod1 created
[root@vms10 chap4-volume]# kubectl get pod -owide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod1 2/2 Running 0 13s 10.244.71.148 vms20.rhce.cc <none> <none>
4、持久性存储
使用pv和pvc保证pod不直接连接到nfs服务器,保障了数据安全性
pv:持久性卷
pvc:持久性卷声明
一个pv只能和一个pvc关联
容量属性是使用 PV 对象的
capacity
属性来设置的访问模式有:
ReadWriteOnce
卷可以被一个节点以读写方式挂载。 ReadWriteOnce 访问模式也允许运行在同一节点上的多个 Pod 访问卷。
ReadOnlyMany
卷可以被多个节点以只读方式挂载。
ReadWriteMany
卷可以被多个节点以读写方式挂载。
ReadWriteOncePod
卷可以被单个 Pod 以读写方式挂载。 如果你想确保整个集群中只有一个 Pod 可以读取或写入该 PVC, 请使用ReadWriteOncePod 访问模式。这只支持 CSI 卷以及需要 Kubernetes 1.22 以上版本。
创建pv
[root@vms10 chap4-volume]# kubectl get pv
No resources found
[root@vms10 chap4-volume]# vim pod-nfs-pv.yaml
# 创建PV
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv1
spec:
capacity:
storage: 5Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: xx
mountOptions:
- hard
- nfsvers=4.1
nfs:
path: /data
server: 192.168.26.31
# 查看创建的pv
[root@vms10 chap4-volume]# kubectl apply -f pod-nfs-pv.yaml
persistentvolume/pv1 created
[root@vms10 chap4-volume]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pv1 5Gi RWO Recycle Available 5s
创建pvc
[root@vms10 chap4-volume]# cat pod-nfs-pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc1
spec:
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
resources:
requests:
storage: 5Gi
storageClassName: xx
# 定义pvc
[root@vms10 chap4-volume]# kubectl apply -f pod-nfs-pvc.yaml
persistentvolumeclaim/pvc1 created
[root@vms10 chap4-volume]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
pvc1 Bound pv1 5Gi RWO 4s
pv和pvc如何关联
accessModes必须一致
pvc的storage必须<=pv的storage
pvc和pv的storageClass要匹配
创建pod
[root@vms10 pv-pvc]# cat nfs-pv-pvc.yaml
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: pod1
name: pod1
spec:
nodeName: vms30.rhce.cc
volumes:
- name: nfs
persistentVolumeClaim:
claimName: pvc1
containers:
- image: nginx
imagePullPolicy: IfNotPresent
name: nginx1
volumeMounts:
- mountPath: /usr/share/nginx/html
name: nfs
- image: nginx
imagePullPolicy: IfNotPresent
name: nginx2
command: ["sh","-c","sleep 5000" ]
volumeMounts:
- mountPath: /usr/share/nginx/html
name: nfs
resources: {}
dnsPolicy: ClusterFirst
restartPolicy: Always
status: {}
[root@vms10 pv-pvc]# kubectl exec -it pod1 -c nginx1 -- bash
root@pod1:/# df -h
Filesystem Size Used Avail Use% Mounted on
overlay 150G 6.7G 144G 5% /
tmpfs 64M 0 64M 0% /dev
tmpfs 489M 0 489M 0% /sys/fs/cgroup
/dev/sda1 150G 6.7G 144G 5% /etc/hosts
shm 64M 0 64M 0% /dev/shm
192.168.26.31:/data 150G 2.4G 148G 2% /usr/share/nginx/html
pvc的回收策略
目前的回收策略有:
- Retain -- 手动回收,pv状态不会释放(变成released),再次恢复需要先删除pvc和pv,再重新创建,否则pvc一直处于pending状态(无法与pv关联)
- Recycle -- 删除pvc时,所有数据会删除(生成一个busybox:1.27的清理容器),pv状态释放(Available),可以再次与pv关联 (
rm -rf /thevolume/*
)- Delete -- 诸如 AWS EBS、GCE PD、Azure Disk 或 OpenStack Cinder 卷这类关联存储资产也被删除
5、动态卷供应(解决nfs无法自动创建共享目录,即无法自动提供pv给pvc)
前面讲持久性存储的时候,是要先创建 pv 然后才能创建 pvc。如果不同的命名空间里同时要创建不同的 pvc,那么就需要提前把 pv 创建好,这样才能为 pvc 提供存储。这种操作方式太过于麻烦,所以可以通过 storageClass(简称为 sc)来解决这个问题。最终的效果是,管理员不需要提前创建 pv,只要创建好 storageClass 之后就不用管 pv 了,用户创建 pvc 的时候,storageClass 会自动创建创建出来一个 pv 和这个 pvc 进行关联。
# 1、修改kube-apiserver.yaml
[root@vms10 pv-pvc]# vim /etc/kubernetes/manifests/kube-apiserver.yaml
- --feature-gates=RemoveSelfLink=false
[root@vms10 pv-pvc]# systemctl restart kubelet
[root@vms10 动态供应卷]# unzip external-storage-master.zip
[root@vms10 动态供应卷]# cd external-storage-master/nfs-client/deploy/
[root@vms10 deploy]# vim deployment.yaml
image: quay.io/external_storage/nfs-client-provisioner:latest
imagePullPolicy: IfNotPresent
env:
# 设置分配器名称
- name: PROVISIONER_NAME
value: fuseim.pri/ifs
- name: NFS_SERVER
value: 192.168.26.31
- name: NFS_PATH
value: /data
volumes:
- name: nfs-client-root
nfs:
server: 192.168.26.31
path: /data
# 2、将镜像提前导入到各个节点中
[root@vms20 ~]# docker load -i nfs-client-provisioner.tar
[root@vms30 ~]# docker load -i nfs-client-provisioner.tar
# 3、部署rbac
[root@vms10 deploy]# kubectl apply -f rbac.yaml
# 4、部署分配器
[root@vms10 deploy]# kubectl apply -f deployment.yaml
deployment.apps/nfs-client-provisioner created
[root@vms10 deploy]# kubectl get pods -n default
NAME READY STATUS RESTARTS AGE
nfs-client-provisioner-5b6d867846-44nmn 1/1 Running 0 57s
# 5、创建storageClass
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: managed-nfs-storage
provisioner: fuseim.pri/ifs # or choose another name, must match deployment's env PROVISIONER_NAME'
parameters:
archiveOnDelete: "false"
[root@vms10 deploy]# kubectl apply -f class.yaml
[root@vms10 deploy]# kubectl get sc
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
managed-nfs-storage fuseim.pri/ifs Delete Immediate false 2m10s
# 6、创建pvc
[root@vms10 动态供应卷]# cat pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc1
spec:
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
resources:
requests:
storage: 5Gi
storageClassName: managed-nfs-storage
[root@vms10 动态供应卷]# kubectl apply -f pvc.yaml
persistentvolumeclaim/pvc1 created
[root@vms10 动态供应卷]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
pvc1 Bound pvc-a5f63be6-47a1-4ad3-ba18-9c4aff10f87b 5Gi RWO managed-nfs-storage 7s
# 自动帮助我们创建pv
[root@vms10 动态供应卷]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pvc-a5f63be6-47a1-4ad3-ba18-9c4aff10f87b 5Gi RWO Delete Bound chap4-volume/pvc1 managed-nfs-storage 9s
[root@vms10 动态供应卷]# kubectl describe pv pvc-a5f63be6-47a1-4ad3-ba18-9c4aff10f87b
Name: pvc-a5f63be6-47a1-4ad3-ba18-9c4aff10f87b
Labels: <none>
Annotations: pv.kubernetes.io/provisioned-by: fuseim.pri/ifs
Finalizers: [kubernetes.io/pv-protection]
StorageClass: managed-nfs-storage
Status: Bound
Claim: chap4-volume/pvc1
Reclaim Policy: Delete
Access Modes: RWO
VolumeMode: Filesystem
Capacity: 5Gi
Node Affinity: <none>
Message:
Source:
Type: NFS (an NFS mount that lasts the lifetime of a pod)
Server: 192.168.26.31
Path: /data/chap4-volume-pvc1-pvc-a5f63be6-47a1-4ad3-ba18-9c4aff10f87b
ReadOnly: false
Events: <none>
# 创建pod,验证
[root@vms10 动态供应卷]# kubectl apply -f nfs-pv-pvc.yaml
pod/pod1 created
[root@vms10 动态供应卷]# kubectl get pod
NAME READY STATUS RESTARTS AGE
pod1 2/2 Running 0 5s
[root@nfs data]# ls
chap4-volume-pvc1-pvc-a5f63be6-47a1-4ad3-ba18-9c4aff10f87b