k8s pv pvc的使用

k8s pv pvc的使用

  1. 安装nfs服务器
yum install rpcbind nfs-utils -y
systemctl enable rpcbind
systemctl enable nfs
systemctl start rpcbind
systemctl start nfs
mkdir -p /root/data/sc-data
[root@master sc-data]# cat /etc/exports
/root/data/sc-data 192.168.1.0/24(rw,no_root_squash)
/root/data/nginx/pv  192.168.1.0/24(rw,no_root_squash)
别的服务器检测是否能用
[root@node1 ~]# showmount -e 192.168.1.90
Export list for 192.168.1.90:
/root/data/sc-data  192.168.1.0/24
/root/data/nginx/pv 192.168.1.0/24

静态创建 pv pvc

  1. 创建pv
[root@master xiongfei]# cat  pv-nginx.yaml 
apiVersion: v1
kind: PersistentVolume
metadata:
  name:  pv-nginx  # pv 的名字
spec:
  capacity:  # 容量
    storage: 5Gi
  accessModes: # 访问模式
  - ReadWriteMany  
  persistentVolumeReclaimPolicy: Retain  # 删除pv之后,nfs目录下数据的保存方式 ,delte 删除,Retain 保留
  nfs:
    path: /root/data/nginx/pv
    server: 192.168.1.90
  1. 创建pvc
[root@master xiongfei]# cat nginx-pvc.yaml 
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: pvc-nginx
  namespace: dev
spec:
  accessModes: 
  - ReadWriteMany
  resources:
    requests:
      storage: 4Gi # 这个地方的容量尽量要在pv的范围之内,他会去匹配到和她比较合适的pv 进行绑定
  1. 创建一个configmap
[root@master xiongfei]# cat nginx-configmap.yaml 
apiVersion: v1
kind: ConfigMap
metadata:
  name: nginx-configmap
  namespace: dev
data:
  default.conf: |-
    server {
       listen       80;
       server_name  localhost;

       #charset koi8-r;

       #access_log  logs/host.access.log  main;

       location / {
        root   /usr/share/nginx/html/account;  
        index  index.html index.htm;
       }
 

        location /account {
                proxy_pass http://192.168.1.130:8088/account;
          }         
        

        error_page 405 =200 $uri;

        error_page   500 502 503 504  /50x.html;
        location = /50x.html {
            root   html;
        }
    }

     server {
         listen 8081;
         server_name localhost;
         location / {
            root   /usr/share/nginx/html/org; 
            index  index.html index.htm;
        }
                    
         location /org {
          
            proxy_pass http://192.168.1.130:8082/org;  
           # 使用websocket的协议进行
            proxy_http_version 1.1;
            proxy_set_header Upgrade $http_upgrade;
            # 启用支持websscoket连接
            proxy_set_header Connection "Upgrade";
            proxy_set_header X-Real-IP $remote_addr;
            proxy_read_timeout 600s;
          }
                  
        location ~ ^/V1.0/(.*) {
            rewrite /(.*)$ /org/$1 break; 
            proxy_pass http://192.168.1.130:8082;
            proxy_set_header Host $proxy_host; 
           }        
      }
  1. 创建一个hpa,提高可用行(这个在这里不用看)
[root@master xiongfei]# cat nginx-hpa.yaml 
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
  name: pc-hpa
  namespace: dev
spec:
  minReplicas: 1  #最小pod数量
  maxReplicas: 10 #最大pod数量
  targetCPUUtilizationPercentage: 10 # CPU使用率指标
  scaleTargetRef:   # 指定要控制的nginx信息
    apiVersion:  apps/v1
    kind: Deployment
    name: nginx-deploy  # 控制器的名字

  1. 创建一个pod 实例验证一下
[root@master xiongfei]# cat nginx-service-deployment.yaml 
apiVersion: v1
kind: Service
metadata:
  labels:
    app: nginx-service
  name: nginx-service   
  namespace: dev
spec:
  ports:
  - name: account-nginx
    port: 80
    protocol: TCP
    targetPort: 80
    nodePort: 30013
  - name: org-nginx
    port: 8081
    protocol: TCP
    targetPort: 8081
    nodePort: 30014
  selector:
    app: nginx-pod1
  type: NodePort

---
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: nginx-deploy
  name: nginx-deploy
  namespace: dev
spec:
  replicas: 1
  selector:
    matchLabels:
      app: nginx-pod1
  strategy:
    type: RollingUpdate
  template:
    metadata:
      labels:
        app: nginx-pod1
      namespace: dev
    spec:
      containers:
      - image: nginx:1.17.1
        name: nginx
        ports:
        - containerPort: 80
          protocol: TCP
        - containerPort: 8081
          protocol: TCP
        resources:
          limits:
            cpu: "1" 
          requests:
            cpu: "500m"
        volumeMounts:
        - name: nginx-config
          mountPath: /etc/nginx/conf.d/
          readOnly: true
        - name: nginx-html
          mountPath: /usr/share/nginx/html/
          readOnly: false              
       # - name: nginx-html2
       #   mountPath: /usr/share/nginx/html/app-vue2
       #   readOnly: false
      volumes:
      - name: nginx-config
        configMap:
          name: nginx-configmap # 指定configmap 的名字
      - name: nginx-html
        persistentVolumeClaim:
          claimName: pvc-nginx # 指定pvc 的名字
          readOnly: false

  1. 查看pv pvc 之间的绑定状态
[root@master ~]# kubectl get pv,pvc  -n dev  
NAME                                                        CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM           STORAGECLASS   REASON   AGE
persistentvolume/pv-nginx                                   5Gi        RWX            Retain           Bound    dev/pvc-nginx                           2d6h

NAME                              STATUS   VOLUME     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
persistentvolumeclaim/pvc-nginx   Bound    pv-nginx   5Gi        RWX                           2d6h

  1. 创建一个pod 实例验证一下
    在这里插入图片描述
以上的这种演示方式是通过手动的情况下创建的

通过StorageClass 动态创建pv,pvc

在这里插入图片描述

  1. 创建 nfs provisioner (nfs 配置器)
[root@master k8s-StorageClass]# cat deployment.yaml 
kind: Deployment
apiVersion: apps/v1
metadata:
 name: nfs-client-provisioner  # 我们这里存储后端使用的是 nfs,那么我们就需要使用到一个 nfs-client 的自动配置程序,我们也叫它 Provisioner(制备器),这个程序使用我们已经配置好的 nfs 服务器,来自动创建持久卷,也就是自动帮我们创建 PV。
 namespace: kube-system
spec:
 replicas: 2
 strategy:
   type: Recreate
 selector:
   matchLabels:
     app: nfs-client-provisioner
 template:
   metadata:
     labels:
       app: nfs-client-provisioner
   spec:
     serviceAccountName: nfs-client-provisioner
     affinity:
       podAntiAffinity:
         preferredDuringSchedulingIgnoredDuringExecution:
         - weight: 100
           podAffinityTerm:
             topologyKey: kubernetes.io/hostname
             labelSelector:
               matchLabels:
                 app: nfs-client-provisioner
       # nodeAffinity:
         # requiredDuringSchedulingIgnoredDuringExecution:
           # nodeSelectorTerms:
           # - matchExpressions:
             # - key: dedicated
               # operator: In
               # values:
               # - "cmp"
     containers:
       - name: nfs-client-provisioner
         image: quay.io/external_storage/nfs-client-provisioner:v3.1.0-k8s1.11
         volumeMounts:
           - name: nfs-client-root
             mountPath: /persistentvolumes
         env:
           - name: PROVISIONER_NAME
             value: nfs-client-provisioner
           - name: NFS_SERVER
             value: 192.168.1.90
           - name: NFS_PATH
             value: /root/data/sc-data
     volumes:
       - name: nfs-client-root
         nfs:
           server: 192.168.1.90
           path: /root/data/sc-data

  1. 创建rbac 资源的授权策略集合
[root@master k8s-StorageClass]# cat rabc.yaml 
kind: ServiceAccount
apiVersion: v1
metadata:
 name: nfs-client-provisioner
 namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
 name: nfs-client-provisioner-runner
rules:
 - apiGroups: [""]
   resources: ["persistentvolumes"]
   verbs: ["get", "list", "watch", "create", "delete"]
 - apiGroups: [""]
   resources: ["persistentvolumeclaims"]
   verbs: ["get", "list", "watch", "update"]
 - apiGroups: ["storage.k8s.io"]
   resources: ["storageclasses"]
   verbs: ["get", "list", "watch"]
 - apiGroups: [""]
   resources: ["events"]
   verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
 name: run-nfs-client-provisioner
subjects:
 - kind: ServiceAccount
   name: nfs-client-provisioner
   namespace: kube-system
roleRef:
 kind: ClusterRole
 name: nfs-client-provisioner-runner
 apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
 namespace: kube-system
 name: leader-locking-nfs-client-provisioner
rules:
 - apiGroups: [""]
   resources: ["endpoints"]
   verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
 namespace: kube-system
 name: leader-locking-nfs-client-provisioner
subjects:
 - kind: ServiceAccount
   name: nfs-client-provisioner
   # replace with namespace where provisioner is deployed
   namespace: kube-system
roleRef:
 kind: Role
 name: leader-locking-nfs-client-provisioner
 apiGroup: rbac.authorization.k8s.io

  1. 创建storageclass
[root@master k8s-StorageClass]# cat storageclass.yaml 
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  annotations:
    kubectl.kubernetes.io/last-applied-configuration: |
      {"apiVersion":"storage.k8s.io/v1","kind":"StorageClass","metadata":{"annotations":{},"name":"nfs"},"provisioner":"nfs-client-provisioner","reclaimPolicy":"Delete"}
    storageclass.beta.kubernetes.io/is-default-class: "true"
    storageclass.kubernetes.io/is-default-class: "true"
  name: nfs
provisioner: nfs-client-provisioner
parameters:
  archiveOnDelete: "true"  ## 删除pv的时候,pv的内容是否要备份, 默认是true, 删除 pod  pvc 之后 nfs 目录下的 文件会以 archived-default-nginx-pvc-bcf313fa-a121-4833-8f74-2a255bdb7fcc 这样方式呈现,数据还在
reclaimPolicy: Retain  #有两种选择, Delete;Retain是保留文件
#reclaimPolicy: Retain #有两种选择, Delete;Retain是保留文件, 当删除pvc 时,pv 资源不会被立即回收,而是保留着,管理员可以手动对pv 进行清理,重用等操作,当 PVC 被删除时,PV 资源也会被删除

1. 第一种情况
parameters:
  archiveOnDelete: "true" 
reclaimPolicy: delete
	 文件会以arch... 命名,数据还在,新创立的pod,不会引用之前数据目录里的数据
2. 第二种情况
parameters:
  archiveOnDelete: "false" 
reclaimPolicy: delete
	 nfs 目录下的数据 会被全部删除

3. **第三种情况**
parameters:
  archiveOnDelete: "false"  # 默认是 true
reclaimPolicy: Retain   
   当回收策略改为Retain时, 删除pod时候, pvc  pv  在删除时, nfs 文件不会被清楚,还是以defatult 这样的形势存在,当再次创建pod,会引用之前的数据 
5. 第四种情况
parameters:
  archiveOnDelete: "true" 
reclaimPolicy: Retain
     数据目录下的 pvc的名字不变 还是以default 这样命名,新创建的pod ,不会引用之前留存的数据,pv的状态会变为Released 
  1. 创建一个测试pod
[root@master k8s-StorageClass]# cat test-nginx-pvc.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: nginx
spec:
  containers:
  - name: nginx
    image: nginx:latest
    ports:
    - containerPort: 80
    volumeMounts:
      - name: www
        mountPath: /usr/share/nginx/html
  volumes:
    - name: www
      persistentVolumeClaim:
        claimName: nginx  # 这里写上pvc的名字 
---
# 创建一个pvc,通过 sc 
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: nginx
spec:
  storageClassName: "nfs"  #  指定storageclass 类的名字 
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 5Gi

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值