9.1 k8s存储Volumes介绍
Container(容器)中的磁盘文件是短暂的,当容器崩溃时,kubelet会重新启动容器,但最初的文件将丢失,Container会以最干净的状态启动。另外,当一个Pod运行多个Container时,各个容器可能需要共享一些文件。Kubernetes Volume可以解决这两个问题。eg:如一个pod里,容器A和容器B需要共享数据;不同pod间共享数据;
一些需要持久化数据的程序才会用到Volumes,或者一些需要共享数据的容器需要volumes。
不同pod间共享数据也可以通过volumes解决,比如我们nfs,jfs,ceph,公有云的nas等;
日志收集的需求:需要在应用程序的容器里面加一个sidecar,这个容器是一个收集日志的容器,比如filebeat,它通过volumes共享应用程序的日志文件目录。就是同一个pod下不同容器共享数据;
Volumes:官方文档https://kubernetes.io/docs/concepts/storage/volumes/
9.2 Volumes EmptyDir实现数据共享
比较常用的volumes的使用emptydir,hostpath,NFS(configmap和secret 之前讲过)等
emptydir主要是用用作pod下不同容器间共享数据,不是持久化存储,重启后数据丢失。
和上述volume不同的是,如果删除Pod,emptyDir卷中的数据也将被删除,一般emptyDir卷用于Pod中的不同Container共享数据。它可以被挂载到相同或不同的路径上。
默认情况下,emptyDir卷支持节点上的任何介质,可能是SSD、磁盘或网络存储,具体取决于自身的环境。可以将emptyDir.medium字段设置为Memory,让Kubernetes使用tmpfs(内存支持的文件系统),虽然tmpfs非常快,但是tmpfs在节点重启时,数据同样会被清除,并且设置的大小会被计入到Container的内存限制当中。
[root@k8s-master01 ~]# cat nginx-deploy_1205_emptydir.yaml
# cat nginx-deploy.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: nginx
name: nginx
spec:
replicas: 2 #副本数
selector:
matchLabels:
app: nginx
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
app: nginx
spec:
containers:
- image: registry.cn-beijing.aliyuncs.com/dotbalo/nginx:1.15.12
imagePullPolicy: IfNotPresent
name: nginx
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
# ports:
# - containerPort: 8080
# name: nginx-port
# protocol: TCP
volumeMounts:
- mountPath: /opt
name: share-volume
- image: registry.cn-beijing.aliyuncs.com/dotbalo/nginx:1.15.12
imagePullPolicy: IfNotPresent
name: nginx2
command:
- sh
- -c
- sleep 3600
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
# ports:
# - containerPort: 8090
# name: nginx2-port
# protocol: TCP
volumeMounts:
- mountPath: /mnt
name: share-volume
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
volumes:
- name: share-volume
emptyDir: {}
#medium: Memory
您在 /var/spool/mail/root 中有新邮件
[root@k8s-master01 ~]#
kubectl create -f nginx-deploy_1205_emptydir.yaml
9.3 Volumes HostPath挂载宿主机路径
ps:一般不推荐使用
作用:把宿主机上的目录挂载到pod里;
[root@k8s-master01 ~]# cat nginx-deploy_1205_hostpath.yaml
# cat nginx-deploy.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: nginx
name: nginx
spec:
replicas: 2 #副本数
selector:
matchLabels:
app: nginx
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
app: nginx
spec:
containers:
- image: registry.cn-beijing.aliyuncs.com/dotbalo/nginx:1.15.12
imagePullPolicy: IfNotPresent
name: nginx
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
# ports:
# - containerPort: 8080
# name: nginx-port
# protocol: TCP
volumeMounts:
- mountPath: /opt
name: share-volume
- mountPath: /etc/timezone
name: timezone
- image: registry.cn-beijing.aliyuncs.com/dotbalo/nginx:1.15.12
imagePullPolicy: IfNotPresent
name: nginx2
command:
- sh
- -c
- sleep 1200
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
# ports:
# - containerPort: 8090
# name: nginx2-port
# protocol: TCP
volumeMounts:
- mountPath: /mnt
name: share-volume
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
volumes:
- name: share-volume
emptyDir: {}
#medium: Memory
- name: timezone
hostPath:
path: /etc/timezone
type: File
[root@k8s-master01 ~]#
kubectl create -f nginx-deploy_1205_hostpath.yaml
9.4 挂载NFS至容器
宿主机node01(ip:192.168.1.110)安装nfs服务器
yum -y install nfs-utils rpcbind
systemctl restart nfs-server
vi /etc/exports
[root@k8s-node01 ~]# cat /etc/exports
/data/nfs/ 192.168.1.0/24(rw,sync,no_subtree_check,no_root_squash)
[root@k8s-node01 ~]# exportfs -r
[root@k8s-node01 ~]# systemctl reload nfs-server
在master01节点操作
yum install -y nfs-utils;
mount -t nfs 192.168.1.110:/data/nfs /mnt/;
测试完成,我们先卸载/mnt,然后测试volumes挂载nfs;
umount /mnt/;
df -h; #确认挂载已删除
[root@k8s-master01 ~]# cat nginx-deploy_1206_volumes_nfs.yaml
# cat nginx-deploy.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: nginx
name: nginx
spec:
replicas: 2 #副本数
selector:
matchLabels:
app: nginx
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
app: nginx
spec:
containers:
- image: registry.cn-beijing.aliyuncs.com/dotbalo/nginx:1.15.12
imagePullPolicy: IfNotPresent
name: nginx
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
# ports:
# - containerPort: 8080
# name: nginx-port
# protocol: TCP
volumeMounts:
# - mountPath: /opt
# name: share-volume
- mountPath: /etc/timezone
name: timezone
- mountPath: /opt
name: nfs-volumes
- image: registry.cn-beijing.aliyuncs.com/dotbalo/nginx:1.15.12
imagePullPolicy: IfNotPresent
name: nginx2
command:
- sh
- -c
- sleep 1200
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
# ports:
# - containerPort: 8090
# name: nginx2-port
# protocol: TCP
volumeMounts:
- mountPath: /mnt
name: share-volume
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
volumes:
- name: share-volume
emptyDir: {}
#medium: Memory
- name: timezone
hostPath:
path: /etc/timezone
type: File
- name: nfs-volumes
nfs:
server: 192.168.1.110
path: /data/nfs/test-nfs-dp
[root@k8s-master01 ~]#
kubectl create -f nginx-deploy_1206_volumes_nfs.yaml
9.5 为什么要引入PV和PVC?
9.6 PV访问和回收策略
9.7 文件存储、块存储、对象存储区别
文件存储推荐:公有云的NAS和CephFS
对象存储:程序直接写入远端存储,不需要挂载操作;
9.8 创建NAS或NFS类型的PV
生产环境不推荐使用nfs(因为是单点),建议使用公有云的nas平台(高可用);
课程里以nfs为演示,nas和nfs两者配置一样;
nfs-server:node01节点;
yum install -y nfs * rpcbind ;
mkdir -p /data/k8s
[root@k8s-node01 k8s]# cat /etc/exports
/data/k8s/ 192.168.1.0/24(rw,sync,no_subtree_check,no_root_squash)
[root@k8s-node01 k8s]# exportfs -r
[root@k8s-node01 k8s]# systemctl restart nfs rpcbind
nfs-client:master01-03+node02
yum install -y nfs-utils;
master01节点测试
[root@k8s-master01 ~]# mount -t nfs 192.168.1.110:/data/k8s /mnt/
node01(nfs服务器)测试
测试完成master01节点卸载挂载点
pv和pvc创建成功不代表能用只有pod挂载成功才算成功;
[root@k8s-master01 pv]# pwd
/root/pv
[root@k8s-master01 pv]# cat pv-nfs.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-nfs
spec:
capacity:
storage: 5Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Recycle
storageClassName: nfs-slow
nfs:
path: /data/k8s
server: 192.168.1.110
[root@k8s-master01 pv]#
kubectl create -f pv-nfs.yaml
9.9 PV的状态
9.10 创建HostPath类型的PV
[root@k8s-master01 pv]# cat pv-hostpath.yaml
kind: PersistentVolume
apiVersion: v1
metadata:
name: task-pv-volume
labels:
type: local
spec:
storageClassName: hostpath
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
hostPath:
path: "/mnt/data"
[root@k8s-master01 pv]#
kubectl create -f pv-hostpath.yaml
9.11 PVC如何绑定到PV?
9.12 PVC挂载示例
[root@k8s-master01 pv]# cat pvc-nfs.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: nfs-pvc-claim
spec:
storageClassName: nfs-slow
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 3Gi
您在 /var/spool/mail/root 中有新邮件
[root@k8s-master01 pv]#
kubectl create -f pvc-nfs.yaml
pv和pvc绑定成功不代表可用,需要挂载到pod里测试;
创建pod挂载pvc类型的volumes
[root@k8s-master01 pv]# cat pvc-nfs-pod.yaml
kind: Pod
apiVersion: v1
metadata:
name: task-pv-pod
spec:
volumes:
- name: task-pv-storage
persistentVolumeClaim:
claimName: nfs-pvc-claim
containers:
- name: nfs-pv-container
image: registry.cn-beijing.aliyuncs.com/dotbalo/nginx
ports:
- containerPort: 80
name: "http-server"
volumeMounts:
- mountPath: "/usr/share/nginx/html"
name: task-pv-storage
[root@k8s-master01 pv]#
kubectl create -f pvc-nfs-pod.yaml
在nfs服务端node01节点更新数据
在master01节点的pod里查看
步骤:先创建pv,然后创建pvc,最后pod 挂载pvc;
[root@k8s-master01 pv]# cat pvc-nginx-deploy_1207.yaml
# cat nginx-deploy.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: nginx
name: nginx
spec:
replicas: 2 #副本数
selector:
matchLabels:
app: nginx
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
app: nginx
spec:
volumes:
- name: task-pv-storage
persistentVolumeClaim:
claimName: nfs-pvc-claim
containers:
- image: registry.cn-beijing.aliyuncs.com/dotbalo/nginx:1.15.12
imagePullPolicy: IfNotPresent
name: nginx
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: "/usr/share/nginx/html"
name: task-pv-storage
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
[root@k8s-master01 pv]#
kubectl create -f pvc-nginx-deploy_1207.yaml
nfs的pvc从而实现了两个pod间数据的共享;
9.13 PVC创建和挂载处于Pending的原因
pvc不能replace/apply时,先delete -f,再create;
---------------教程来源:51cto 杜宽老师k8s课程的学习笔记 -------------