storage

1.empty-dir类型

[root@master storage]# cat empty-dir.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: empty-dir-demo
  namespace: myspace
  labels:
    storage-type: empty-dir
spec:
  volumes:
  - name: html
    emptyDir: {}
  containers:
  - name: nginx
    image: nginx:1.12-alpine
    volumeMounts:
    - name: html
      mountPath: /usr/share/nginx/html
  - name: pagen
    image: alpine
    volumeMounts: 
    - name: html
      mountPath: /html
    command: ["/bin/sh","-c"]
    args:
    - while true; 
      do 
        echo $(hostname)-$(date) >> /html/index.html;
        sleep 10;
      done
[root@master storage]# kubectl get pods -n myspace  -o wide
NAME             READY   STATUS    RESTARTS   AGE   IP             NODE     NOMINATED NODE   READINESS GATES
empty-dir-demo   2/2     Running   0          32s   10.244.2.128   node02   <none>           <none>

[root@master storage]# kubectl describe pods empty-dir-demo -n myspace
Name:               empty-dir-demo
Namespace:          myspace
Priority:           0
PriorityClassName:  <none>
Node:               node02/192.168.100.4
Start Time:         Tue, 09 Jun 2020 14:44:44 +0800
Labels:             storage-type=empty-dir
Annotations:        kubectl.kubernetes.io/last-applied-configuration:
                      {"apiVersion":"v1","kind":"Pod","metadata":{"annotations":{},"labels":{"storage-type":"empty-dir"},"name":"empty-dir-demo","namespace":"my...
Status:             Running
IP:                 10.244.2.128
Containers:
  nginx:
    Container ID:   docker://a7470cb99e45b84ba4578c5b47316e1fabcd6f4d444cea1b02a20041d578f421
    Image:          nginx:1.12-alpine
    Image ID:       docker-pullable://docker.io/nginx@sha256:db5acc22920799fe387a903437eb89387607e5b3f63cf0f4472ac182d7bad644
    Port:           <none>
    Host Port:      <none>
    State:          Running
      Started:      Tue, 09 Jun 2020 14:44:48 +0800
    Ready:          True
    Restart Count:  0
    Environment:    <none>
    Mounts:
      /usr/share/nginx/html from html (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-kznjt (ro)
  pagen:
    Container ID:  docker://758fef9c16c01d2a48fb2ed0f104d71d73a7b3042f739f3c7d6378a56f5cec7e
    Image:         alpine
    Image ID:      docker-pullable://docker.io/alpine@sha256:185518070891758909c9f839cf4ca393ee977ac378609f700f60a771a2dfe321
    Port:          <none>
    Host Port:     <none>
    Command:
      /bin/sh
      -c
    Args:
      while true; do echo $(hostname)-$(date) >> /html/index.html; sleep 10; done
    State:          Running
      Started:      Tue, 09 Jun 2020 14:45:04 +0800
    Ready:          True
    Restart Count:  0
    Environment:    <none>
    Mounts:
      /html from html (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-kznjt (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             True 
  ContainersReady   True 
  PodScheduled      True 
Volumes:
  html:
    Type:    EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:  
  default-token-kznjt:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  default-token-kznjt
    Optional:    false
QoS Class:       BestEffort
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type    Reason     Age   From               Message
  ----    ------     ----  ----               -------
  Normal  Scheduled  81s   default-scheduler  Successfully assigned myspace/empty-dir-demo to node02
  Normal  Pulled     78s   kubelet, node02    Container image "nginx:1.12-alpine" already present on machine
  Normal  Created    78s   kubelet, node02    Created container
  Normal  Started    77s   kubelet, node02    Started container
  Normal  Pulling    77s   kubelet, node02    pulling image "alpine"
  Normal  Pulled     61s   kubelet, node02    Successfully pulled image "alpine"
  Normal  Created    61s   kubelet, node02    Created container
  Normal  Started    61s   kubelet, node02    Started container
#empty-dir-demo的pod中运行2个容器,一个是nginx,一个是pagen,当一个pod中含有多个容器才真正意义上实现共享的目的,一个pod中包含一个容器通常是为了持久存储的目的
#pagen容器每隔10秒钟向共享的目录下的html追加一条信息:
[root@master storage]# kubectl exec -it empty-dir-demo -c nginx /bin/sh -n myspace
/ # cat /usr/share/nginx/html/index.html 
empty-dir-demo-Tue Jun 9 06:45:04 UTC 2020
empty-dir-demo-Tue Jun 9 06:45:14 UTC 2020
empty-dir-demo-Tue Jun 9 06:45:24 UTC 2020
empty-dir-demo-Tue Jun 9 06:45:34 UTC 2020
empty-dir-demo-Tue Jun 9 06:45:44 UTC 2020
empty-dir-demo-Tue Jun 9 06:45:54 UTC 2020
empty-dir-demo-Tue Jun 9 06:46:04 UTC 2020
empty-dir-demo-Tue Jun 9 06:46:14 UTC 2020
empty-dir-demo-Tue Jun 9 06:46:24 UTC 2020
empty-dir-demo-Tue Jun 9 06:46:34 UTC 2020
empty-dir-demo-Tue Jun 9 06:46:44 UTC 2020
empty-dir-demo-Tue Jun 9 06:46:54 UTC 2020
empty-dir-demo-Tue Jun 9 06:47:04 UTC 2020
empty-dir-demo-Tue Jun 9 06:47:14 UTC 2020
empty-dir-demo-Tue Jun 9 06:47:24 UTC 2020
empty-dir-demo-Tue Jun 9 06:47:34 UTC 2020
#empty也可以基于RAM创建的tmpfs文件系统作为存储卷,常用于为容器应用提供高性能缓存
volumes:
- name: cache
  emptyDir:
  medium: Memory
#emptyDir类型存储生命周期和pod一样,当pod不存在时,数据也会丢失,不具有持久存储的意义

2.hostPath

[root@master storage]# cat hostPath.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: hostpath-demo
  namespace: myspace
  labels:
    storage: hostPath
spec:
  containers:
  - name: hostpath
    image: ikubernetes/myapp:v1
    volumeMounts:
    - name: share
      mountPath: /usr/share/nginx/html
  volumes:
  - name: share
    hostPath:
      path: /home/demo/share
      type: Directory

[root@master storage]# kubectl apply -f  hostPath.yaml 
pod/hostpath-demo created
[root@master storage]# kubectl get pods -n myspace
NAME            READY   STATUS    RESTARTS   AGE
hostpath-demo   1/1     Running   0          7s
[root@master storage]# kubectl get pods -n myspace -o wide
NAME            READY   STATUS    RESTARTS   AGE   IP            NODE     NOMINATED NODE   READINESS GATES
hostpath-demo   1/1     Running   0          13s   10.244.1.79   node01   <none>           <none>
[root@master storage]# curl 10.244.1.79
<html>
<head><title>403 Forbidden</title></head>
<body bgcolor="white">
<center><h1>403 Forbidden</h1></center>
<hr><center>nginx/1.12.2</center>
</body>
</html>
#node01节点创建index.html,并写入信息
[root@node01 ~]# useradd demo
[root@node01 ~]# mkdir -p /home/demo/share
[root@node01 ~]# ls
[root@node01 ~]# cd /home/demo/share/
[root@node01 share]# ls
[root@node01 share]# echo "this is node01" > index.html
#上面的输出中pod刚好被调度到node01节点,因此可以访问
[root@master storage]# curl 10.244.1.79
this is node01
#重新删除并再次应用pod,使其调度到node02节点
[root@master storage]# kubectl delete -f hostPath.yaml 
pod "hostpath-demo" deleted
[root@master storage]# kubectl get pods -n myspace -o wide
No resources found.
[root@master storage]# kubectl apply -f hostPath.yaml 
[root@master storage]# kubectl get pods -n myspace -o wide -w
NAME            READY   STATUS              RESTARTS   AGE   IP       NODE     NOMINATED NODE   READINESS GATES
hostpath-demo   0/1     ContainerCreating   0          22s   <none>   node02   <none>           <none>
[root@master storage]# kubectl describe pods hostpath-demo -n myspace
...               ...
...               ...
Events:
  Type     Reason       Age               From               Message
  ----     ------       ----              ----               -------
  Normal   Scheduled    71s               default-scheduler  Successfully assigned myspace/hostpath-demo to node02
  Warning  FailedMount  6s (x8 over 70s)  kubelet, node02    MountVolume.SetUp failed for volume "share" : hostPath type check failed: /home/demo/share is not a directory
#由于node02节点没有/home/demo/share/index.html,会导致pod的创建失败,从而也就不能访问数据,失败的原因之一是指定了 type: Directory,这个参数不指定默认是可以创建成功的,但是依旧访问不到数据
#以下是去掉type:Directory参数后的输出
[root@master storage]# kubectl get pods -n myspace -o wide
NAME            READY   STATUS    RESTARTS   AGE   IP             NODE     NOMINATED NODE   READINESS GATES
hostpath-demo   1/1     Running   0          7s    10.244.2.129   node02   <none>           <none>
[root@master storage]# kubectl get pods -n myspace -o wide
NAME            READY   STATUS    RESTARTS   AGE   IP             NODE     NOMINATED NODE   READINESS GATES
hostpath-demo   1/1     Running   0          7s    10.244.2.129   node02   <none>           <none>
[root@master storage]# curl 10.244.2.129
<html>
<head><title>403 Forbidden</title></head>
<body bgcolor="white">
<center><h1>403 Forbidden</h1></center>
<hr><center>nginx/1.12.2</center>
</body>
</html>


#hostPath有两个关键字: path(必选)和type(可选)
#Directory: 目录路径必须存在
#DirectoryOrCreate:指定路径不存在时自动创建权限为0755的空目录,属主属组都是kubelet
#FileOrCreate:指定的路径不存在时自动创建权限为0644的空文件,属主属组都是kubelet

#hostPath类型的节点存储,当pod被调度至其他节点,或者pod所在的节点损坏,都会造成数据的不可访问,因此一定意义上不具有持久存储的意义

3.网路存储–NFS

#NFS-server端配置(注意服务启动顺序)
[root@master storage]# yum -y install nfs-utils
[root@master storage]# cat /etc/exports
/nfs_share		192.168.100.0/24(rw,no_root_squash)
[root@master storage]# systemctl enable rpcbind
[root@master storage]# systemctl enable nsf-server
[root@master storage]# systemctl start rpcbind
[root@master storage]# systemctl start nfs-server
[root@master storage]# exportfs -arv
exporting 192.168.100.0/24:/nfs_share
#NSF-client(node01)配置
[root@node01 share]# yum -y install nfs-utils
[root@node01 share]# systemctl enable rpcbind
[root@node01 share]# systemctl start rpcbind
[root@node01 share]# showmount -e 192.168.100.2
Export list for 192.168.100.2:
/nfs_share 192.168.100.0/24
#NFS-client(node02)配置
[root@node02 ~]# yum -y install nfs-utils
[root@node02 ~]# systemctl enable rpcbind
[root@node02 ~]# systemctl start rpcbind
[root@node02 ~]# showmount -e 192.168.100.2
Export list for 192.168.100.2:
/nfs_share 192.168.100.0/24
[root@master storage]# cat nfs.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: nfs
  namespace: myspace
  labels:
    storage: nfs
spec:
  containers:
  - name: nfs
    image: ikubernetes/myapp:v1
    volumeMounts:
    - name: nfs-share
      mountPath: /usr/share/nginx/html
  volumes:
  - name: nfs-share
    nfs:
      server: 192.168.100.2
      path: /nfs_share
      readOnly : false
[root@master storage]# kubectl apply -f nfs.yaml 
pod/nfs created
[root@master storage]# kubectl get pods -n myspace  -o wide
NAME   READY   STATUS    RESTARTS   AGE   IP             NODE     NOMINATED NODE   READINESS GATES
nfs    1/1     Running   0          15s   10.244.2.130   node02   <none>           <none>
[root@master storage]# curl 10.244.2.130
this is nfs_share
[root@master storage]# kubectl delete -f nfs.yaml 
pod "nfs" deleted
[root@master storage]# kubectl apply -f nfs.yaml 
pod/nfs created
root@master storage]# kubectl get pods -n myspace  -o wide
NAME   READY   STATUS    RESTARTS   AGE   IP            NODE     NOMINATED NODE   READINESS GATES
nfs    1/1     Running   0          8s    10.244.1.94   node01   <none>           <none>
[root@master storage]# curl 10.244.1.94
this is nfs_share
#以上输出表明不管pod被调用到哪个节点,都可以访问到同样的数据,具有持久性存储的意义

总结:
1.emptyDir:存储的生命周期跟随pod的终结而消失
2.hostPath:当pod被调度到不同的节点,或因故障pod所在节点损坏都会造成数据的不可访问
3.NFS:无论pod被调度到任何节点,都可以实现数据的访问
4.类似的实现持久性存储的还有ceph的rbd,clusterFS,cinder等,不过这些都要清楚网路存储的访问细节才可以完成存储卷的配置任务

4.持久性存储–pv&pvc

#将共享存储作为一种可由用户申请使用的资源,实现存储消费,常见的网络存储和云端存储都可以被PV支持,像nfs,rbd,cinder等,当考虑到性能场景时可能会用到storatgeClas--更高级的持久存储
#下面实例使用NFS作为后端存储来创建PV
mkdir -p /nfs_share/{pv-nfs-01,pv-nfs-02}
[root@master storage]# ll /nfs_share/
total 4
-rw-r--r-- 1 root    root   18 Mar 11  2019 index.html
drwxr-xr-x 2 root    root    6 Jun  9 17:46 pv-nfs-01
drwxr-xr-x 2 root    root    6 Jun  9 17:46 pv-nfs-02
#创建pv--pv和pvc使用标签选择器进行关联
[root@master storage]# cat pv.yaml 
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv-nfs-01
  labels:
    storage: nfs-pv
spec:
  capacity:
    storage: 5Gi
  volumeMode: Filesystem
  accessModes:
    - ReadWriteMany
  persistentVolumeReclaimPolicy: Recycle
  storageClassName: slow
  mountOptions:
  - hard
  - nfsvers=4.1
  nfs:
    path: "/nfs_share/pv-nfs-01"
    server: 192.168.100.2
[root@master storage]# kubectl apply -f pv.yaml 
persistentvolume/pv-nfs-01 created
#当前pv处于Available状态
[root@master storage]# kubectl get pv 
NAME        CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM   STORAGECLASS   REASON   AGE
pv-nfs-01   5Gi        RWX            Recycle          Available           slow                    8s
#创建PVC,pv属于集群级别的资源,pvc是名称空间的资源
[root@master storage]# cat  pvc.yaml 
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: pvc-nfs-pv-01
  namespace: myspace
  labels:
     storage: nfs-pv
spec:
  accessModes:
  - ReadWriteMany
  volumeMode: Filesystem
  resources:
    requests:
      storage: 5Gi
  storageClassName: slow
  selector:
    matchLabels:
      storage: nfs-pv
#pvc绑定pv
[root@master storage]# kubectl apply -f pvc.yaml 
persistentvolumeclaim/pvc-nfs-pv-01 created
[root@master storage]# kubectl get pvc -n myspace
NAME            STATUS   VOLUME      CAPACITY   ACCESS MODES   STORAGECLASS   AGE
pvc-nfs-pv-01   Bound    pv-nfs-01   5Gi        RWX            slow           9s
[root@master storage]# kubectl get pv
NAME        CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM                   STORAGECLASS   REASON   AGE
pv-nfs-01   5Gi        RWX            Recycle          Bound    myspace/pvc-nfs-pv-01   slow                    10m
#在pod中使用pvc
[root@master storage]# cat pvc-pod.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: nfs-pvc-pod
  namespace: myspace
spec:
  containers:
  - name: nginx
    image: ikubernetes/myapp:v2
    volumeMounts:
    - name: nfs-pvc
      mountPath: /usr/share/nginx/html
  volumes:
  - name: nfs-pvc
    persistentVolumeClaim:
      claimName: pvc-nfs-pv-01
[root@master storage]# kubectl apply -f pv-pod.yaml 
pod/nfs-pvc-pod created
[root@master storage]# kubectl get pods -n myspace -o wide
NAME          READY   STATUS    RESTARTS   AGE   IP             NODE     NOMINATED NODE   READINESS GATES
nfs           1/1     Running   0          64m   10.244.1.94    node01   <none>           <none>
nfs-pvc-pod   1/1     Running   0          11s   10.244.2.131   node02   <none>           <none>
[root@master storage]# curl 10.244.2.131
<html>
<head><title>403 Forbidden</title></head>
<body bgcolor="white">
<center><h1>403 Forbidden</h1></center>
<hr><center>nginx/1.12.2</center>
</body>
</html>
[root@master storage]# echo "this is nfs-pvc-pod's test" > /nfs_share/pv-nfs-01/index.html
[root@master storage]# curl 10.244.2.131
this is nfs-pvc-pod's test

总结:
1.在Pod中使用pvc,需要指定claimName字段和readOnly字段
2.pvc在申请pv时需要指定目标空间的大小,访问模式,pv标签选择器和StorageClass等相关信息
3.pv创建的参数capacity,访问模式,回收策略,卷模型,storageClassName,mountOptions等

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值