【K8S学习笔记-004】K8s存储抽象(NFS,PVC,ConfigMap,Secret)

学习视频:https://www.bilibili.com/video/BV13Q4y1C7hS?p=26&spm_id_from=pageDriver&vd_source=0bf662c33adfc181186b04ba57e11dff
附带笔记:https://www.yuque.com/leifengyang/oncloud/kgheaf

存储层

在这里插入图片描述

假设有三台机器,我们想像docker的volume一样将pod内部的文件与主机的文件同步。如果3号机器的黑色pod宕机了,按照k8s的习惯将会(故障转移)在另一台机器上启动这个pod,那同步到主机的/tmp怎么办?
在这里插入图片描述

于是,我们引入存储层(以NFS为例)
在这里插入图片描述
在这里插入图片描述
不同节点的文件在存储层实现共享

NFS实验

  1. 在所有节点

yum install -y nfs-utils
  1. 主节点
[root@k8s-master ~]# echo "/nfs/data/ *(insecure,rw,sync,no_root_squash)" > /etc/exports
# 以非安全,读写的方式暴露/nfs/data/
[root@k8s-master ~]# mkdir -p /nfs/data
[root@k8s-master ~]# systemctl enable rpcbind --now
# 启动远程绑定
[root@k8s-master ~]# systemctl enable nfs-server --now
Created symlink from /etc/systemd/system/multi-user.target.wants/nfs-server.service to /usr/lib/systemd/system/nfs-server.service.
[root@k8s-master ~]# exportfs -r
#配置生效
[root@k8s-master ~]#[root@k8s-master ~]# exportfs
/nfs/data       <world>
  1. 从节点配置
#执行以下命令挂载 nfs 服务器上的共享目录到本机路径 /root/nfsmount
[root@k8s-1 ~]# showmount -e 192.168.23.243
Export list for 192.168.23.243:
/nfs/data *
[root@k8s-1 ~]# mkdir -p /nfs/data
[root@k8s-1 ~]# mount -t nfs 192.168.23.243:/nfs/data /nfs/data


[root@k8s-2 ~]# showmount -e 192.168.23.243
Export list for 192.168.23.243:
/nfs/data *
[root@k8s-2 ~]# mkdir -p /nfs/data
[root@k8s-2 ~]# mount -t nfs 192.168.23.243:/nfs/data /nfs/data

# 写入一个测试文件
[root@k8s-Master ~]# echo "hello nfs server" > /nfs/data/test.txt

[root@k8s-1 ~]# cat /nfs/data/test.txt
hello nfs server
[root@k8s-2 ~]# cat /nfs/data/test.txt
hello nfs server

从节点同样操作也能同步

原生方式数据挂载

以下是yaml文件

apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: nginx-pv-demo
  name: nginx-pv-demo
spec:
  replicas: 2
  selector:
    matchLabels:
      app: nginx-pv-demo
  template:
    metadata:
      labels:
        app: nginx-pv-demo
    spec:
      containers:
      - image: nginx
        name: nginx
        volumeMounts:
        - name: html
          mountPath: /usr/share/nginx/html
      volumes:
        - name: html
          nfs:
            server: 192.168.23.243
            path: /nfs/data/nginx-pv
[root@k8s-master ~]# kubectl apply -f volumeMount.yaml
deployment.apps/nginx-pv-demo created
[root@k8s-master ~]# kubectl get pod
NAME                             READY   STATUS              RESTARTS   AGE
hello-server-6cbb679d85-5hxzg    1/1     Running             1          46h
hello-server-6cbb679d85-cx8tl    1/1     Running             1          46h
my-dep-5b7868d854-bt64f          1/1     Running             1          2d11h
my-dep-5b7868d854-mrr67          1/1     Running             1          2d11h
my-dep-5b7868d854-mv4qf          1/1     Running             1          2d11h
my-dep-5b7868d854-vbxf4          1/1     Running             1          2d11h
nginx-demo-7d56b74b84-j7g6x      1/1     Running             1          46h
nginx-demo-7d56b74b84-tqnxq      1/1     Running             1          46h
nginx-pv-demo-677479c9fc-2nfzl   0/1     ContainerCreating   0          5s
nginx-pv-demo-677479c9fc-t9t5m   0/1     ContainerCreating   0          5s

过了一段时间发现两个pod一直没好,我们去看下问题在哪
在这里插入图片描述

在这里插入图片描述
更改错误

[root@k8s-master ~]# mkdir -p /nfs/data/nginx-pv
[root@k8s-master ~]# kubectl get deploy
NAME            READY   UP-TO-DATE   AVAILABLE   AGE
hello-server    2/2     2            2           46h
my-dep          4/4     4            4           2d12h
nginx-demo      2/2     2            2           46h
nginx-pv-demo   0/2     2            0           6m1s

[root@k8s-master ~]# kubectl delete -f volumeMount.yaml
deployment.apps "nginx-pv-demo" deleted
[root@k8s-master ~]# kubectl apply -f volumeMount.yaml
deployment.apps/nginx-pv-demo created

现在正常了
在这里插入图片描述

查看是否成功

[root@k8s-master nginx-pv]# cd /nfs/data/nginx-pv
[root@k8s-master nginx-pv]# echo 111222 > index.html

# 进入第一个pod查看/usr/share/nginx/html/index.html
[root@k8s-master nginx-pv]# kubectl exec -it nginx-pv-demo-677479c9fc-7swnz --bash
Error: unknown flag: --bash
See 'kubectl exec --help' for usage.
[root@k8s-master nginx-pv]# kubectl exec -it nginx-pv-demo-677479c9fc-7swnz /bin/bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
root@nginx-pv-demo-677479c9fc-7swnz:/# cd /usr/share/nginx/html/
root@nginx-pv-demo-677479c9fc-7swnz:/usr/share/nginx/html# cat index.html
111222

PV&PVC

PV:持久卷(Persistent Volume),将应用需要持久化的数据保存到指定位置
PVC:持久卷申明(Persistent Volume Claim),申明需要使用的持久卷规格

在前文的案例中,如果我们不需要数据挂载了,如果我们仅仅删除ymal文件,service关闭了,但是剩下的文件不能够自动清理。
在这里插入图片描述

提前创建好pv放入pv池中,然后pod按需自取

创建pv池

#nfs主节点
mkdir -p /nfs/data/01
mkdir -p /nfs/data/02
mkdir -p /nfs/data/03

创建pv池的yaml文件

apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv01-10m
spec:
  capacity:
    storage: 10M
  accessModes:
    - ReadWriteMany
  storageClassName: nfs
  nfs:
    path: /nfs/data/01
    server: 192.168.23.243
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv02-1gi
spec:
  capacity:
    storage: 1Gi
  accessModes:
    - ReadWriteMany
  storageClassName: nfs
  nfs:
    path: /nfs/data/02
    server: 192.168.23.243
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv03-3gi
spec:
  capacity:
    storage: 3Gi
  accessModes:
    - ReadWriteMany
  storageClassName: nfs
  nfs:
    path: /nfs/data/03
    server: 192.168.23.243
[root@k8s-master ~]# vi pvpool.yaml
[root@k8s-master ~]# kubectl apply -f pvpool.yaml
persistentvolume/pv01-10m created
persistentvolume/pv02-1gi created
persistentvolume/pv03-3gi created

[root@k8s-master ~]# kubectl get persistentvolume
NAME       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM   STORAGECLASS   REASON   AGE
pv01-10m   10M        RWX            Retain           Available           nfs                     62s
pv02-1gi   1Gi        RWX            Retain           Available           nfs                     62s
pv03-3gi   3Gi        RWX            Retain           Available           nfs                     62s

PVC创建与绑定

  1. 创建PVC
[root@k8s-master ~]# kubectl apply -f pvc.yaml
persistentvolumeclaim/nginx-pvc created

[root@k8s-master ~]# kubectl get pv
NAME       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM               STORAGECLASS   REASON   AGE
pv01-10m   10M        RWX            Retain           Available                       nfs                     6m9s
pv02-1gi   1Gi        RWX            Retain           Bound       default/nginx-pvc   nfs                     6m9s
pv03-3gi   3Gi        RWX            Retain           Available                       nfs                     6m9s

我们发现pv02状态变成了bind,如果我们删除申请书,看看会怎样

[root@k8s-master ~]# kubectl delete -f pvc.yaml
persistentvolumeclaim "nginx-pvc" deleted
[root@k8s-master ~]# kubectl get pv
NAME       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM               STORAGECLASS   REASON   AGE
pv01-10m   10M        RWX            Retain           Available                       nfs                     8m6s
pv02-1gi   1Gi        RWX            Retain           Released    default/nginx-pvc   nfs                     8m6s
pv03-3gi   3Gi        RWX            Retain           Available                       nfs                     8m6s

我们发现状态变成了release,重启下看看pvc

[root@k8s-master ~]# kubectl apply -f pvc.yaml
persistentvolumeclaim/nginx-pvc created
[root@k8s-master ~]# kubectl get pvc
NAME        STATUS   VOLUME     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
nginx-pvc   Bound    pv03-3gi   3Gi        RWX            nfs            3s
  1. 创建pod绑定PVC
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: nginx-deploy-pvc
  name: nginx-deploy-pvc
spec:
  replicas: 2
  selector:
    matchLabels:
      app: nginx-deploy-pvc
  template:
    metadata:
      labels:
        app: nginx-deploy-pvc
    spec:
      containers:
      - image: nginx
        name: nginx
        volumeMounts:
        - name: html
          mountPath: /usr/share/nginx/html
      volumes:
        - name: html
          persistentVolumeClaim:
            claimName: nginx-pvc

启动成功

[root@k8s-master ~]# kubectl get pod
NAME                                READY   STATUS    RESTARTS   AGE
hello-server-6cbb679d85-5hxzg       1/1     Running   1          2d
hello-server-6cbb679d85-cx8tl       1/1     Running   1          2d
my-dep-5b7868d854-bt64f             1/1     Running   1          2d12h
my-dep-5b7868d854-mrr67             1/1     Running   1          2d12h
my-dep-5b7868d854-mv4qf             1/1     Running   1          2d12h
my-dep-5b7868d854-vbxf4             1/1     Running   1          2d12h
nginx-demo-7d56b74b84-j7g6x         1/1     Running   1          2d
nginx-demo-7d56b74b84-tqnxq         1/1     Running   1          2d
nginx-deploy-pvc-79fc8558c7-s4rqq   1/1     Running   0          45s
nginx-deploy-pvc-79fc8558c7-xmjxh   1/1     Running   0          45s
nginx-pv-demo-677479c9fc-7swnz      1/1     Running   0          85m
nginx-pv-demo-677479c9fc-pdc59      1/1     Running   0          85m

测试一下

[root@k8s-master ~]# cd /nfs/data/03
[root@k8s-master 03]# echo 12122123 > index.html

[root@k8s-master 03]# kubectl exec -it nginx-deploy-pvc-79fc8558c7-s4rqq /bin/bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
root@nginx-deploy-pvc-79fc8558c7-s4rqq:/# cd /usr/share/nginx/html/
root@nginx-deploy-pvc-79fc8558c7-s4rqq:/usr/share/nginx/html# cat index.html
12122123

使用configMap抽取数据

以Redis为例

  1. 把之前的配置文件创建为配置集

[root@k8s-master ~]# vi redis.conf
[root@k8s-master ~]# cat redis.conf
appendonly yes

# 创建配置,redis保存到k8s的etcd;
[root@k8s-master ~]# kubectl create cm redis-conf --from-file=redis.conf
configmap/redis-conf created
[root@k8s-master ~]# kubectl get cm
NAME               DATA   AGE
kube-root-ca.crt   1      2d19h
redis-conf         1      9s
[root@k8s-master ~]# rm -rf redis.conf
[root@k8s-master ~]# kubectl get cm redis-conf -oyaml
apiVersion: v1
data:
  redis.conf: |+
    appendonly yes

kind: ConfigMap
metadata:
  creationTimestamp: "2022-07-14T09:29:03Z"
  managedFields:
  - apiVersion: v1
    fieldsType: FieldsV1
    fieldsV1:
      f:data:
        .: {}
        f:redis.conf: {}
    manager: kubectl-create
    operation: Update
    time: "2022-07-14T09:29:03Z"
  name: redis-conf
  namespace: default
  resourceVersion: "144649"
  uid: ffa48197-09ce-40ea-93a9-cb78a072254c

  1. 创建 Pod
    根据上面的yaml创建pod
apiVersion: v1
kind: Pod
metadata:
  name: redis
spec:
  containers:
  - name: redis
    image: redis
    command:
      - redis-server
      - "/redis-master/redis.conf"  #指的是redis容器内部的位置
    ports:
    - containerPort: 6379
    volumeMounts:
    - mountPath: /data
      name: data
    - mountPath: /redis-master
      name: config
  volumes:
    - name: data
      emptyDir: {}
    - name: config
      configMap:
        name: redis-conf
        items:
        - key: redis.conf
          path: redis.conf

架构图
在这里插入图片描述
测试一下

  1. 检查默认配置

在这里插入图片描述
4. 修改ConfigMap

[root@k8s-master ~]# kubectl get cm
NAME               DATA   AGE
kube-root-ca.crt   1      2d21h
redis-conf         1      125m
[root@k8s-master ~]# kubectl edit cm
configmap/kube-root-ca.crt skipped
configmap/redis-conf edited

在这里插入图片描述

  1. 检查配置是否更新

在这里插入图片描述

Secret

Secret 对象类型用来保存敏感信息,例如密码、OAuth 令牌和 SSH 密钥。 将这些信息放在 secret 中比放在 Pod 的定义或者 容器镜像 中来说更加安全和灵活

kubectl create secret docker-registry leifengyang-docker \
--docker-username=leifengyang \
--docker-password=Lfy123456 \
--docker-email=534096094@qq.com

##命令格式
kubectl create secret docker-registry regcred \
  --docker-server=<你的镜像仓库服务器> \
  --docker-username=<你的用户名> \
  --docker-password=<你的密码> \
  --docker-email=<你的邮箱地址>
apiVersion: v1
kind: Pod
metadata:
  name: private-nginx
spec:
  containers:
  - name: private-nginx
    image: leifengyang/guignginx:v1.0
  imagePullSecrets:
  - name: leifengyang-docker
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值