第五周作业

一、实现基于velero对etcd的单独namespace的备份和恢复

简介

Velero

  • Velero 是vmware开源的一个云原生的灾难恢复和迁移工具,它本身也是开源的,采用Go语言编写,可以安全的备份、恢复和迁移Kubernetes集群资源数据。
  • velero支持标准的K8S集群,既可以是私有云平台也可以是公有云,除了灾备之外它还能做资源移转,支持把容器应用从一个集群迁移到另一个集群。
  • velero的工作方式就是把kubernetes的数据备份到对象存储以实现高可用和持久化,默认的备份保存时间为720小时,并在需要的时候进行下载和恢复。

Velero 与etcd快照备份的区别

  • etcd快照是全局备份,在即使一个资源对象需要恢复,也需要全局恢复到备份状态,会影响到其他namespace里的pod运行服务。
  • velero可以针对性地进行备份,比如按照namespace单独备份、只备份单独的资源对象等;在恢复时只恢复单独的namespace或资源对象,而不影响其他namespace中pod运行服务。
  • velero支持ceph、oss等对象存储,etcd快照是一个本地文件。
  • velero支持任务计划实现周期备份,但etcd快照也可以基于cronjob实现。

velero备份流程

  • velero客户端调用kubernetes API Server创建Backup任务
  • Backup控制器基于watch机制通过API Server获取到备份任务
  • Backup控制器开始执行备份动作,其会通过请求API Server获取需要备份的数据
  • Backup控制器将获取到的数据备份到对象存储server端

image-20221206220250734

部署minio

1、安装docker并创建数据目录

root@k8s-deploy:~# mkdir /data/minio -p
root@k8s-deploy:~# docker version


2、创建minio容器

创建minio容器,如果不指定,则默认用户名密码为minioadmin/minioadmin,可以通过环境变量自定义,如下:

root@k8s-deploy:~# docker run --name minio \
 -p 9000:9000 \
 -p 9999:9999 \
 -d --restart=always \
 -e "MINIO_ROOT_USER=admin" \
 -e "MINIO_ROOT_PASSWORD=12345678" \
 -v /data/minio/data:/data \
 minio/minio:RELEASE.2022-04-12T06-55-35Z server /data \
 --console-address '0.0.0.0:9999'

image-20221206222114989

部署velero

1、部署

在部署节点或者master节点部署

root@k8s-master001:/usr/local/src# wget https://github.com/vmware-tanzu/velero/releases/download/v1.8.1/velero-v1.8.1-linux-amd64.tar.gz
root@k8s-master001:/usr/local/src# tar xvf velero-v1.8.1-linux-amd64.tar.gz 
velero-v1.8.1-linux-amd64/LICENSE
velero-v1.8.1-linux-amd64/examples/README.md
velero-v1.8.1-linux-amd64/examples/minio
velero-v1.8.1-linux-amd64/examples/minio/00-minio-deployment.yaml
velero-v1.8.1-linux-amd64/examples/nginx-app
velero-v1.8.1-linux-amd64/examples/nginx-app/README.md
velero-v1.8.1-linux-amd64/examples/nginx-app/base.yaml
velero-v1.8.1-linux-amd64/examples/nginx-app/with-pv.yaml
velero-v1.8.1-linux-amd64/velero
root@k8s-master001:/usr/local/src# cp velero-v1.8.1-linux-amd64/velero /usr/local/bin/
root@k8s-master001:/usr/local/src# velero --help


2、配置velero认证环境

工作目录

root@k8s-master001:~# mkdir /data/velero -p
root@k8s-master001:~# cd /data/velero/
root@k8s-master001:/data/velero# 

1、访问minio的认证文件
root@k8s-master001:/data/velero#  vim velero-auth.txt 

[default]
aws_access_key_id = admin
aws_secret_access_key = 12345678


2、准备user-csr文件
root@k8s-master001:/data/velero# vim awsuser-csr.json

{
  "CN": "awsuser",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}

3、准备证书签发环境
# 在master节点下载
root@k8s-master001:/data/velero# wget https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssl_1.6.1_linux_amd64 
root@k8s-master001:/data/velero# wget https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssljson_1.6.1_linux_amd64 
root@k8s-master001:/data/velero# wget https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssl-certinfo_1.6.1_linux_amd64
root@k8s-master001:/data/velero# mv cfssl-certinfo_1.6.1_linux_amd64 cfssl-certinfo
root@k8s-master001:/data/velero# mv cfssl_1.6.1_linux_amd64 cfssl
root@k8s-master001:/data/velero# mv cfssljson_1.6.1_linux_amd64 cfssljson
root@k8s-master001:/data/velero# cp cfssl-certinfo cfssl cfssljson /usr/local/bin/
root@k8s-master001:/data/velero# chmod  a+x /usr/local/bin/cfssl* 
root@k8s-master001:/data/velero# cfssl --help


# 或者在部署节点的kubeasz文件夹里拷贝
root@k8s-deploy:~# scp /etc/kubeasz/bin/cfssl* 172.21.90.212:/usr/local/bin/
cfssl                                                                                       100%   16MB 169.7MB/s   00:00    
cfssl-certinfo                                                                              100%   13MB 148.6MB/s   00:00    
cfssljson                                                                                   100%   11MB 166.9MB/s   00:00    
root@k8s-deploy:~# 
root@k8s-master001:~# cfssl --help

4、执行证书签发
# 在部署节点将文件ca-config.json拷贝到master节点的/data/velero/目录
root@k8s-deploy:~# scp /etc/kubeasz/clusters/k8s-cluster01/ssl/ca-config.json 172.21.90.212:/data/velero/
ca-config.json                                                                                 100%  483   584.8KB/s   00:00    

# master节点查看文件
root@k8s-master001:/data/velero# ll
total 20
drwxr-xr-x 2 root root 4096 Dec  6 22:51 ./
drwxr-xr-x 3 root root 4096 Dec  6 22:30 ../
-rw-r--r-- 1 root root  221 Dec  6 22:36 awsuser-csr.json
-rw-r--r-- 1 root root  483 Dec  6 22:51 ca-config.json
-rw-r--r-- 1 root root   70 Dec  6 22:33 velero-auth.txt

root@k8s-master001:/data/velero# /usr/local/bin/cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem -ca-key=/etc/kubernetes/ssl/ca-key.pem -config=./ca-config.json -profile=kubernetes ./awsuser-csr.json | cfssljson -bare awsuser
2022/12/06 22:57:32 [INFO] generate received request
2022/12/06 22:57:32 [INFO] received CSR
2022/12/06 22:57:32 [INFO] generating key: rsa-2048
2022/12/06 22:57:33 [INFO] encoded CSR
2022/12/06 22:57:33 [INFO] signed certificate with serial number 39901843030928839961587478488943254400776467389
2022/12/06 22:57:33 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").

# 验证证书
root@k8s-master001:/data/velero# ll awsuser*
-rw-r--r-- 1 root root  997 Dec  6 22:57 awsuser.csr
-rw-r--r-- 1 root root  221 Dec  6 22:36 awsuser-csr.json
-rw------- 1 root root 1679 Dec  6 22:57 awsuser-key.pem
-rw-r--r-- 1 root root 1387 Dec  6 22:57 awsuser.pem


5、分发证书到api-server证书路径
root@k8s-master001:/data/velero# cp awsuser-key.pem /etc/kubernetes/ssl/
root@k8s-master001:/data/velero# cp awsuser.pem /etc/kubernetes/ssl/

6、生成集群认证config文件
root@k8s-master001:/data/velero#  export KUBE_APISERVER="https://172.21.90.212:6443"
root@k8s-master001:/data/velero#  kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=./awsuser.kubeconfig

7、设置客户端证书认证
root@k8s-master001:/data/velero# kubectl config set-credentials awsuser \
--client-certificate=/etc/kubernetes/ssl/awsuser.pem \
--client-key=/etc/kubernetes/ssl/awsuser-key.pem \
--embed-certs=true \
--kubeconfig=./awsuser.kubeconfig
8、设置上下文参数
root@k8s-master001:/data/velero# kubectl config set-context kubernetes \
--cluster=kubernetes \
--user=awsuser \
--namespace=velero-system \
--kubeconfig=./awsuser.kubeconfig
9、设置默认上下文
root@k8s-master001:/data/velero# kubectl config use-context kubernetes --kubeconfig=awsuser.kubeconfig
Switched to context "kubernetes".

10、k8s集群中创建awsuser账户
root@k8s-master001:/data/velero# kubectl create clusterrolebinding awsuser --clusterrole=cluster-admin --user=awsuser
clusterrolebinding.rbac.authorization.k8s.io/awsuser created

11、创建namespace
root@k8s-master001:/data/velero# kubectl create ns velero-system
namespace/velero-system created

12、执行安装
root@k8s-master001:/data/velero# velero --kubeconfig  ./awsuser.kubeconfig \
 install \
     --provider aws \
     --plugins velero/velero-plugin-for-aws:v1.3.1 \
     --bucket velerodata  \
     --secret-file ./velero-auth.txt \
     --use-volume-snapshots=false \
 --namespace velero-system \
 --backup-location-config region=minio,s3ForcePathStyle="true",s3Url=http://172.21.90.211:9000    # 此处为minio的地址

13、验证安装

image-20221206232346480

查看velero的日志,找到“Backup storage location valid”才可以。

image-20221207230437577

1、实验过程中出现invalid错误

查看pod日志是发现“Backup storage location invalid”错误。

image-20221207230100871

这是因为我在执行安装velero时,将minio的地址写错了。

image-20221207230222096

2、纠错

我在这里将verlero完全卸载重新安装。使用腾讯云对象存储 COS 作为 Velero 后端存储,实现集群资源备份和还原 (senlt.cn)

# 查找
root@k8s-master001:/data/velero# kubectl get ns -A | grep velero
velero-system     Active   12m

root@k8s-master001:/data/velero# kubectl get clusterrolebinding | grep velero
velero-velero-system                                   ClusterRole/cluster-admin                                          13m

root@k8s-master001:/data/velero# kubectl get crds | grep velero
backups.velero.io                   2022-12-07T14:53:33Z
backupstoragelocations.velero.io    2022-12-07T14:53:33Z
deletebackuprequests.velero.io      2022-12-07T14:53:33Z
downloadrequests.velero.io          2022-12-07T14:53:33Z
podvolumebackups.velero.io          2022-12-07T14:53:33Z
podvolumerestores.velero.io         2022-12-07T14:53:33Z
resticrepositories.velero.io        2022-12-07T14:53:33Z
restores.velero.io                  2022-12-07T14:53:33Z
schedules.velero.io                 2022-12-07T14:53:33Z
serverstatusrequests.velero.io      2022-12-07T14:53:33Z
volumesnapshotlocations.velero.io   2022-12-07T14:53:33Z

# 执行以下命令删除
root@k8s-master001:/data/velero# kubectl delete namespace/velero-system clusterrolebinding/velero-velero-system
root@k8s-master001:/data/velero# kubectl delete crds -l component=velero

image-20221207231115597

删除之后再重新安装velero并查看日志发现“Backup storage location valid”表示正常。

image-20221207230437577

14、备份整个namespace
root@k8s-master001:/data/velero# DATE=`date +%Y%m%d%H%M%S`
root@k8s-master001:/data/velero# echo $DATE
20221207232042

# 对命名空间myserver进行备份
root@k8s-master001:/data/velero# velero backup create myserver-ns-backup-${DATE} \
--include-cluster-resources=true \
--include-namespaces myserver \
--kubeconfig=./awsuser.kubeconfig \
--namespace velero-system

image-20221207232551181

image-20221207232926946

再去minio的Buckets查看

image-20221207233140456

恢复测试
1、查看pod

查看命名空间myserver里的nginx pod

image-20221207233717102

image-20221207233914537

2、删除pod
root@k8s-deploy:/yaml/nginx-tomcat-case# kubectl delete -f nginx.yaml

image-20221207234151504

3、恢复数据
root@k8s-master001:/data/velero# velero restore create --from-backup myserver-ns-backup-20221207232042 --wait --kubeconfig=./awsuser.kubeconfig --namespace velero-system

image-20221207234719143

15、备份指定资源对象

备份指定namespace中的pod或特定资源

root@k8s-master001:/data/velero# velero backup create pod-backup-${DATE} \
--include-cluster-resources=true \
--ordered-resources 'pods=myserver/net-test1,default/net-test1' \
--namespace velero-system \
--include-namespaces=myserver,default

二、k8s中常见的资源对象的使用

1、kubernetes资源管理核心概念

k8s逻辑运行环境

image-20221212111119266

k8s设计理念-分层架构

image-20221212111202698

k8s设计理念-API设计原则

image-20221212111254577

kubernetes API简介

image-20221212111335859

kubernetes内置API和资源对象简介

查看内置API
获取token
root@k8s-deploy:~# kubectl get secrets -A
root@k8s-deploy:~# kubectl describe secrets kuboard-admin-token -n kuboard

image-20221212104411075

查看内置API

curl --cacert /etc/kubernetes/ssl/ca.pem -H “Authorization:Bearer TOKEN” https://47.122.7.18:6443

root@k8s-master001:~# curl --cacert /etc/kubernetes/ssl/ca.pem -H "Authorization:Bearer eyJhbGciOiJSUzI1NiIsImtpZCI6InhGR1R0NHQxRlQyaDY5V3VFWFhqSlNiQUFKaEphTmFRZnJZSndlbnAtanMifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJvYXJkIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZWNyZXQubmFtZSI6Imt1Ym9hcmQtYWRtaW4tdG9rZW4iLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoia3Vib2FyZC1hZG1pbiIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6Ijk5Yzc0NmYxLWE2OWQtNDVhYi05YThiLTVmMjdjZWNjZDYzNyIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJvYXJkOmt1Ym9hcmQtYWRtaW4ifQ.ll2EqvUrlPaHUZMtCg1mBAs2GisJ_NQ_-n6YXxoSsywkyCpT_L7j4TAqQyQ6aRNKngtohYv_WmyBlJZXNGiOFXzk6G0b6SxiXcv7UrpAbtzIKMOKnUwELSMW7FSA-sizGCz0I3IfB9Xkz-2ZKKMigsWbM-XQYvTFsSGbn38jajWlu_yNOI1zvbsogvW4-Od9PAm5-z5UUbKnuSPR5oF2LjmOCxmSSCWv8yuDoo0Gms-zPuXL1KOi259O0bV-3Os8cYp_WW713NheceIjaigNcurTQz4a1BuFnDX1juSH_3Qjg-W4I4oRtnD97K27fQ8WQIGLfjH88fz6iuEO7R1rCA" https://47.122.7.18:6443

image-20221212103803473

查询pods

curl --cacert /etc/kubernetes/ssl/ca.pem -H "Authorization:Bearer TOKEN" https://47.122.7.18:6443/api/v1/pods

image-20221212104202714

内置资源对象

image-20221212111926739

kubernetes资源对象操作命令

Deployments | Kubernetes

image-20221212105138629

2、kubernetes资源对象详解及示例

kubernetes几个重要概念

资源对象:kubernetes基于声明式API,和资源对象进行交互。

yaml文件:为了方便后期管理,使用yaml文件通过API管理资源对象。

yaml必需字段

  • apiVersion:创建该对象所使用的kubernetes API版本。
  • kind:想要创建的对象的类型。
  • metadata:定义识别对象唯一性的数据,包括一个name名称、可选的namespace。
  • spec:定义资源对象的详细规范信息(统一的label标签、容器名称、镜像、端口映射等)。
  • status:pod创建完成后k8s自动生产status状态。

yaml文件及必需字段

image-20221212154712239

Pod

image-20221212155832419

Job与cronjob

job:数据初始化(mysql/elasticsearch)Job | Kubernetes

CronJob | Kubernetes

image-20221212162017636

image-20221212164550188

在node节点查看

image-20221212164656943

RC/RS副本控制器

ReplicationController | Kubernetes

标签和选择算符 | Kubernetes

ReplicaSet | Kubernetes

image-20221212165017873

Deployment副本控制器

Deployments | Kubernetes是目前主流使用的控制器

image-20221212170448267

回滚 Deployment

Deployments | Kubernetes

有时,你可能想要回滚 Deployment;例如,当 Deployment 不稳定时(例如进入反复崩溃状态)。 默认情况下,Deployment 的所有上线记录都保留在系统中,以便可以随时回滚 (你可以通过修改修订历史记录限制来更改这一约束)。

检查 Deployment 上线历史

按照如下步骤检查回滚历史:

  1. 首先,检查 Deployment 修订历史:

    root@k8s-deploy:/yaml/k8s-Resource-N70/case3-controller# kubectl rollout history deployment nginx-deployment 
    deployment.apps/nginx-deployment 
    REVISION  CHANGE-CAUSE
    1         <none>
    2         <none>
    3         <none>
    
    
    
  2. 要查看修订历史的详细信息,运行:

    root@k8s-deploy:/yaml/k8s-Resource-N70/case3-controller# kubectl rollout history deployment nginx-deployment --revision=1
    deployment.apps/nginx-deployment with revision #1
    Pod Template:
      Labels:	app=ng-deploy-80
    	pod-template-hash=8468759b74
      Containers:
       ng-deploy-80:
        Image:	nginx:1.20.2
        Port:	80/TCP
        Host Port:	0/TCP
        Environment:	<none>
        Mounts:	<none>
      Volumes:	<none>
    
    
    

    image-20221212212750928

回滚到之前的修订版本

按照下面给出的步骤将 Deployment 从当前版本回滚到上一个版本(即版本 2)。

  1. 假定现在你已决定撤消当前上线并回滚到以前的修订版本:

    root@k8s-deploy:/yaml/k8s-Resource-N70/case3-controller# kubectl rollout undo deployment nginx-deployment 
    deployment.apps/nginx-deployment rolled back
    
    # 查看Deployment描述信息
    root@k8s-deploy:/yaml/k8s-Resource-N70/case3-controller# kubectl describe deployments.apps nginx-deployment
    

    image-20221212213525238

    或者,你也可以通过使用 --to-revision 来回滚到特定修订版本:

    root@k8s-deploy:/yaml/k8s-Resource-N70/case3-controller# kubectl rollout undo deployment nginx-deployment --to-revision=3
    deployment.apps/nginx-deployment rolled back
    
    # 查看Deployment描述信息
    root@k8s-deploy:/yaml/k8s-Resource-N70/case3-controller# kubectl describe deployments.apps nginx-deployment 
    

    image-20221212213724448

  2. 检查回滚是否成功以及 Deployment 是否正在运行,运行:

    root@k8s-deploy:/yaml/k8s-Resource-N70/case3-controller# kubectl get deployment nginx-deployment
    NAME               READY   UP-TO-DATE   AVAILABLE   AGE
    nginx-deployment   2/2     2            2           30m
    
    
  3. 获取 Deployment 描述信息:

    root@k8s-deploy:/yaml/k8s-Resource-N70/case3-controller# kubectl describe deployments.apps nginx-deployment 
    

Service简介

image-20221213101816519

image-20221213102034045

ClusterIP:k8s内部使用。

NodePort:会在每一个node监听一个相同的端口。用户客户端访问,会把请求转发到对应的service,然后service再转发给pod。用于将k8s中的服务暴露给k8s环境以外的客户端访问。

image-20221213102652503

image-20221213110034615

k8s集群内部访问
root@k8s-deploy:/yaml/k8s-Resource-N70/case4-service# kubectl apply -f 1-deploy_node.yml 
deployment.apps/nginx-deployment created
root@k8s-deploy:/yaml/k8s-Resource-N70/case4-service# kubectl get deployments.apps -o wide
NAME               READY   UP-TO-DATE   AVAILABLE   AGE    CONTAINERS     IMAGES         SELECTOR
nginx-deployment   1/1     1            1           113s   ng-deploy-80   nginx:1.20.0   app=ng-deploy-80

root@k8s-deploy:/yaml/k8s-Resource-N70/case4-service# kubectl apply -f 2-svc_service.yml 
service/ng-deploy-80 created
root@k8s-deploy:/yaml/k8s-Resource-N70/case4-service# 
root@k8s-deploy:/yaml/k8s-Resource-N70/case4-service# kubectl get svc -o wide
NAME           TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)   AGE   SELECTOR
kubernetes     ClusterIP   10.100.0.1     <none>        443/TCP   15d   <none>
ng-deploy-80   ClusterIP   10.100.57.70   <none>        80/TCP    3s    app=ng-deploy-80
root@k8s-deploy:/yaml/k8s-Resource-N70/case4-service# curl 10.100.57.70
^C


image-20221213104609067

image-20221213104654245

k8s集群外部访问
root@k8s-deploy:/yaml/k8s-Resource-N70/case4-service# kubectl apply -f 3-svc_NodePort.yml 
service/ng-deploy-80 created

root@k8s-deploy:/yaml/k8s-Resource-N70/case4-service# kubectl get svc -o wide
NAME           TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)        AGE   SELECTOR
kubernetes     ClusterIP   10.100.0.1       <none>        443/TCP        15d   <none>
ng-deploy-80   NodePort    10.100.165.192   <none>        81:30012/TCP   32s   app=ng-deploy-80

image-20221213105708355

image-20221213105955566

三、基于NFS实现pod数据持久化的使用方式,测试emptyDir、hostPath的使用

Volume-存储卷简介

卷 | Kubernetes

image-20221213142410298

emptyDir

image-20221213144712593

root@k8s-deploy:/yaml/k8s-Resource-N70/case5-emptyDir# vim deploy_emptyDir.yml 

#apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 1
  selector:
    matchLabels: #rs or deployment
      app: ng-deploy-80
  template:
    metadata:
      labels:
        app: ng-deploy-80
    spec:
      containers:
      - name: ng-deploy-80
        image: nginx
        ports:
        - containerPort: 80
        volumeMounts:
        - mountPath: /cache
          name: cache-volume
      volumes:
      - name: cache-volume
        emptyDir: {}

创建pod后进入容器的/cache目录创建文件y73.txt。

image-20221213150917405

在宿主机查找y73.txt文件并查看内容

image-20221213151016342

删除这个pod后,node节点上的目录也会跟着删除

# 删除pod
root@k8s-deploy:/yaml/k8s-Resource-N70/case5-emptyDir# kubectl delete -f deploy_emptyDir.yml 
deployment.apps "nginx-deployment" deleted

# 在宿主机查看文件已经不存在
root@k8s-node003:~# ll /var/lib/kubelet/pods/1a880ab2-03e9-42fa-aae2-ae7a5a0ef125/volumes/kubernetes.io~empty-dir/cache-volume/y73.txt
ls: cannot access '/var/lib/kubelet/pods/1a880ab2-03e9-42fa-aae2-ae7a5a0ef125/volumes/kubernetes.io~empty-dir/cache-volume/y73.txt': No such file or directory

image-20221213151902443

hostPath

数据只会保存在宿主机,无法共享。

image-20221213152430095

root@k8s-deploy:/yaml/k8s-Resource-N70/case6-hostPath# vim deploy_hostPath.yml 

#apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 1
  selector:
    matchLabels:
      app: ng-deploy-80
  template:
    metadata:
      labels:
        app: ng-deploy-80
    spec:
      containers:
      - name: ng-deploy-80
        image: nginx
        ports:
        - containerPort: 80
        volumeMounts:
        - mountPath: /cache
          name: cache-volume
      volumes:
      - name: cache-volume
        hostPath:
          path: /data/kubernetes

创建pod后进入容器的/cache目录并创建文件

image-20221213152415457

在yaml文件里已经将宿主机的/data/kubernetes目录挂载到容器的/cache目录,所以直接在宿主机即可访问

image-20221213152554894

删除pod之后,宿主机的文件依然存在

# 删除pod
root@k8s-deploy:/yaml/k8s-Resource-N70/case6-hostPath# kubectl delete -f deploy_hostPath.yml 
deployment.apps "nginx-deployment" deleted

# 在宿主机查看
root@k8s-node003:~# cat /data/kubernetes/y73.txt 
123

nfs共享存储

image-20221213152949772

创建nfs服务
root@haproxy:~# apt install nfs-server
root@haproxy:~# mkdir /data/k8sdata -p

# 在末尾添加
root@haproxy:~# vim /etc/exports
/data/k8sdata *(rw,no_root_squash)   #读写权限,不做权限映射

root@haproxy:~# systemctl restart nfs-server.service 
root@haproxy:~# systemctl enable nfs-server

在部署节点验证主机有没有权限

root@k8s-deploy:~# showmount -e 172.21.90.223

Command 'showmount' not found, but can be installed with:

apt install nfs-common
# 没装nfs-common无法识别nfs文件系统


root@k8s-deploy:~# apt install nfs-common 
root@k8s-deploy:~# showmount -e 172.21.90.223
Export list for 172.21.90.223:
/data/k8sdata *

示例1:挂载nfs
root@k8s-deploy:/yaml/k8s-Resource-N70/case7-nfs# vim 1-deploy_nfs.yml 

#apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 1
  selector:
    matchLabels:
      app: ng-deploy-80
  template:
    metadata:
      labels:
        app: ng-deploy-80
    spec:
      containers:
      - name: ng-deploy-80
        image: nginx
        ports:
        - containerPort: 80
        volumeMounts:
        - mountPath: /usr/share/nginx/html/mysite  #因为使用的nginx官方镜像,所以html目录默认是这个
          name: my-nfs-volume
      volumes:
      - name: my-nfs-volume
        nfs:
          server: 172.21.90.223
          path: /data/k8sdata

---
apiVersion: v1
kind: Service
metadata:
  name: ng-deploy-80
spec:
  ports:
  - name: http
    port: 81
    targetPort: 80
    nodePort: 30016
    protocol: TCP
  type: NodePort
  selector:
    app: ng-deploy-80

创建pod并验证

image-20221213170353308

image-20221213170529352

image-20221213170714076

将数据传到nfs服务器的/data/k8sdata目录,并验证pod能不能访问

root@haproxy:/data/k8sdata# pwd
/data/k8sdata
root@haproxy:/data/k8sdata# ll
total 32
drwxr-xr-x 2 root root  4096 Dec 13 17:10 ./
drwxr-xr-x 3 root root  4096 Dec 13 16:38 ../
-rw-r--r-- 1 root root 23799 Dec 13 17:10 微信图片_20221213170849.jpg

image-20221213213317590

image-20221213171246516

示例2:挂载多个nfs
root@k8s-deploy:/yaml/k8s-Resource-N70/case7-nfs# vim 2-deploy_nfs.yml 

#apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment-site2
spec:
  replicas: 1
  selector:
    matchLabels:
      app: ng-deploy-81
  template:
    metadata:
      labels:
        app: ng-deploy-81
    spec:
      containers:
      - name: ng-deploy-81
        image: nginx
        ports:
        - containerPort: 80
        volumeMounts:
        - mountPath: /usr/share/nginx/html/pool1
          name: my-nfs-volume-pool1
        - mountPath: /usr/share/nginx/html/pool2
          name: my-nfs-volume-pool2
      volumes:
      - name: my-nfs-volume-pool1
        nfs:
          server: 172.21.90.223
          path: /data/k8sdata/pool1
      - name: my-nfs-volume-pool2
        nfs:
          server: 172.21.90.223
          path: /data/k8sdata/pool2

---
apiVersion: v1
kind: Service
metadata:
  name: ng-deploy-81
spec:
  ports:
  - name: http
    port: 80
    targetPort: 80
    nodePort: 30017
    protocol: TCP
  type: NodePort
  selector:
    app: ng-deploy-81

在nfs服务器上传数据

root@haproxy:/data/k8sdata# mkdir /data/k8sdata/pool{1,2} -pv
mkdir: created directory '/data/k8sdata/pool1'
mkdir: created directory '/data/k8sdata/pool2'
root@haproxy:/data/k8sdata# 
root@haproxy:/data/k8sdata# echo "pool-1" > /data/k8sdata/pool1/index.html
root@haproxy:/data/k8sdata# echo "pool-2" > /data/k8sdata/pool2/index.html

创建pod并验证

image-20221213220903812

image-20221213220315823

image-20221213220224063

image-20221213220247497

PV/PVC

持久卷 | Kubernetes

PV是全局资源,不属于任何namespace。

image-20230108154028790

image-20230108154110737

访问模式

PersistentVolume 卷可以用资源提供者所支持的任何方式挂载到宿主系统上。 如下表所示,提供者(驱动)的能力不同,每个 PV 卷的访问模式都会设置为 对应卷所支持的模式值。 例如,NFS 可以支持多个读写客户,但是某个特定的 NFS PV 卷可能在服务器 上以只读的方式导出。每个 PV 卷都会获得自身的访问模式集合,描述的是 特定 PV 卷的能力。

访问模式有:

  • ReadWriteOnce

    卷可以被一个节点以读写方式挂载。 ReadWriteOnce 访问模式也允许运行在同一节点上的多个 Pod 访问卷。

  • ReadOnlyMany

    卷可以被多个节点以只读方式挂载。

  • ReadWriteMany

    卷可以被多个节点以读写方式挂载。

  • ReadWriteOncePod

    卷可以被单个 Pod 以读写方式挂载。 如果你想确保整个集群中只有一个 Pod 可以读取或写入该 PVC, 请使用ReadWriteOncePod 访问模式。这只支持 CSI 卷以及需要 Kubernetes 1.22 以上版本。

在命令行接口(CLI)中,访问模式也使用以下缩写形式:

  • RWO - ReadWriteOnce
  • ROX - ReadOnlyMany
  • RWX - ReadWriteMany
  • RWOP - ReadWriteOncePod

重要提醒! 每个卷同一时刻只能以一种访问模式挂载,即使该卷能够支持 多种访问模式。例如,一个 GCEPersistentDisk 卷可以被某节点以 ReadWriteOnce 模式挂载,或者被多个节点以 ReadOnlyMany 模式挂载,但不可以同时以两种模式 挂载。

卷插件ReadWriteOnceReadOnlyManyReadWriteManyReadWriteOncePod
AWSElasticBlockStore---
AzureFile-
AzureDisk---
CephFS-
Cinder---
CSI取决于驱动取决于驱动取决于驱动取决于驱动
FC--
FlexVolume取决于驱动-
Flocker---
GCEPersistentDisk--
Glusterfs-
HostPath---
iSCSI--
Quobyte-
NFS-
RBD--
VsphereVolume-- (Pod 运行于同一节点上时可行)-
PortworxVolume--
StorageOS---

image-20230108155634056

PV参数

image-20230108155735042

PVC创建参数

pod和PVC一定要在同一个namespace

image-20230108160835559

Volume-存储卷类型

image-20230108161501240

Volume-静态存储卷示例
创建nfs服务
root@haproxy:~# apt install nfs-server
root@haproxy:~# mkdir /data/k8sdata -p

# 在末尾添加
root@haproxy:~# vim /etc/exports
/data/k8sdata *(rw,no_root_squash)   #读写权限,不做权限映射

root@haproxy:~# systemctl restart nfs-server.service 
root@haproxy:~# systemctl enable nfs-server

在部署节点验证主机有没有权限

root@k8s-deploy:~# showmount -e 172.21.90.223

Command 'showmount' not found, but can be installed with:

apt install nfs-common
# 没装nfs-common无法识别nfs文件系统


root@k8s-deploy:~# apt install nfs-common 
root@k8s-deploy:~# showmount -e 172.21.90.223
Export list for 172.21.90.223:
/data/k8sdata *

创建PV
root@k8s-deploy:/yaml/k8s-Resource-N70/case8-pv-static# vim 1-myapp-persistentvolume.yaml 

apiVersion: v1
kind: PersistentVolume
metadata:
  name: myserver-myapp-static-pv
spec:
  capacity:
    storage: 10Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    path: /data/k8sdata/myserver/myappdata   #此处存放数据
    server: 192.168.0.119

# 在nfs服务器里事先写入数据,上传图片
root@k8s-haproxy1:~# mkdir /data/k8sdata/myserver/myappdata/ -p
root@k8s-haproxy1:~# vim /data/k8sdata/myserver/myappdata/index.html
root@k8s-haproxy1:~# cat "pv data" >> /data/k8sdata/myserver/myappdata/index.html
root@k8s-haproxy1:/data/k8sdata/myserver/myappdata# ll
total 168
drwxr-xr-x 2 root root    84 Jan  8 16:52 ./
drwxr-xr-x 3 root root    23 Jan  8 16:34 ../
-rw-r--r-- 1 root root 51651 Jan  8 16:51 IMG_0098.JPG
-rw-r--r-- 1 root root 39982 Jan  8 16:51 IMG_0099.JPG
-rw-r--r-- 1 root root 71573 Jan  8 16:51 IMG_0100.JPG
-rw-r--r-- 1 root root     8 Jan  8 16:35 index.html



root@k8s-deploy:/yaml/k8s-Resource-N70/case8-pv-static# kubectl apply -f 1-myapp-persistentvolume.yaml 
persistentvolume/myserver-myapp-static-pv created

root@k8s-deploy:/yaml/k8s-Resource-N70/case8-pv-static# kubectl get pv
NAME                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM   STORAGECLASS   REASON   AGE
myserver-myapp-static-pv   10Gi       RWO            Retain           Available                                   21s

root@k8s-deploy:/yaml/k8s-Resource-N70/case8-pv-static# kubectl describe pv myserver-myapp-static-pv

image-20230108162056977

创建PVC

PVC容量只能小于等于PV容量

root@k8s-deploy:/yaml/k8s-Resource-N70/case8-pv-static# vim 2-myapp-persistentvolumeclaim.yaml 

apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: myserver-myapp-static-pvc
  namespace: myserver
spec:
  volumeName: myserver-myapp-static-pv
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 10Gi

root@k8s-deploy:/yaml/k8s-Resource-N70/case8-pv-static# kubectl apply -f 2-myapp-persistentvolumeclaim.yaml 
persistentvolumeclaim/myserver-myapp-static-pvc created
 
root@k8s-deploy:/yaml/k8s-Resource-N70/case8-pv-static# kubectl get pvc -n myserver 
NAME                        STATUS   VOLUME                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
myserver-myapp-static-pvc   Bound    myserver-myapp-static-pv   10Gi       RWO                           16s
 
root@k8s-deploy:/yaml/k8s-Resource-N70/case8-pv-static# kubectl describe pvc myserver-myapp-static-pvc -n myserver 

image-20230108162831319

部署web服务
root@k8s-deploy:/yaml/k8s-Resource-N70/case8-pv-static# vim 3-myapp-webserver.yaml 

kind: Deployment
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
metadata:
  labels:
    app: myserver-myapp
  name: myserver-myapp-deployment-name
  namespace: myserver
spec:
  replicas: 1
  selector:
    matchLabels:
      app: myserver-myapp-frontend
  template:
    metadata:
      labels:
        app: myserver-myapp-frontend
    spec:
      containers:
        - name: myserver-myapp-container
          image: nginx:1.20.0
          #imagePullPolicy: Always
          volumeMounts:
          - mountPath: "/usr/share/nginx/html/statics"
            name: statics-datadir
      volumes:
        - name: statics-datadir
          persistentVolumeClaim:
            claimName: myserver-myapp-static-pvc

---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: myserver-myapp-service
  name: myserver-myapp-service-name
  namespace: myserver
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    targetPort: 80
    nodePort: 30080
  selector:
    app: myserver-myapp-frontend

root@k8s-deploy:/yaml/k8s-Resource-N70/case8-pv-static# kubectl apply -f 3-myapp-webserver.yaml
root@k8s-deploy:/yaml/k8s-Resource-N70/case8-pv-static# kubectl get pod -n myserver 
NAME                                             READY   STATUS    RESTARTS      AGE
linux73-nginx-deployment-cfdc998c-hsfsv          1/1     Running   1 (39m ago)   5d1h
linux73-nginx-deployment-cfdc998c-lpcqz          1/1     Running   1 (39m ago)   5d1h
myserver-myapp-deployment-name-595d649b9-7t9cl   1/1     Running   0             3m58s
net-test1                                        1/1     Running   1 (39m ago)   5d5h
net-test2                                        1/1     Running   1 (39m ago)   5d5h

root@k8s-deploy:/yaml/k8s-Resource-N70/case8-pv-static# kubectl get pod -n myserver 
NAME                                             READY   STATUS    RESTARTS      AGE
linux73-nginx-deployment-cfdc998c-hsfsv          1/1     Running   1 (41m ago)   5d1h
linux73-nginx-deployment-cfdc998c-lpcqz          1/1     Running   1 (41m ago)   5d1h
myserver-myapp-deployment-name-595d649b9-7t9cl   1/1     Running   0             6m11s
net-test1                                        1/1     Running   1 (41m ago)   5d5h
net-test2                                        1/1     Running   1 (41m ago)   5d5h
root@k8s-deploy:/yaml/k8s-Resource-N70/case8-pv-static# 
root@k8s-deploy:/yaml/k8s-Resource-N70/case8-pv-static# kubectl exec -it myserver-myapp-deployment-name-595d649b9-7t9cl bash -n myserver 
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
root@myserver-myapp-deployment-name-595d649b9-7t9cl:/#    
root@myserver-myapp-deployment-name-595d649b9-7t9cl:/# df -Th
Filesystem                                     Type     Size  Used Avail Use% Mounted on
overlay                                        overlay  120G  9.8G  111G   9% /
tmpfs                                          tmpfs     64M     0   64M   0% /dev
tmpfs                                          tmpfs    2.0G     0  2.0G   0% /sys/fs/cgroup
shm                                            tmpfs     64M     0   64M   0% /dev/shm
/dev/sda2                                      xfs      120G  9.8G  111G   9% /etc/hosts
tmpfs                                          tmpfs    3.6G   12K  3.6G   1% /run/secrets/kubernetes.io/serviceaccount
192.168.0.119:/data/k8sdata/myserver/myappdata nfs4     120G  8.7G  112G   8% /usr/share/nginx/html/statics
tmpfs                                          tmpfs    2.0G     0  2.0G   0% /proc/acpi
tmpfs                                          tmpfs    2.0G     0  2.0G   0% /proc/scsi
tmpfs                                          tmpfs    2.0G     0  2.0G   0% /sys/firmware
root@myserver-myapp-deployment-name-595d649b9-7t9cl:/# 
root@myserver-myapp-deployment-name-595d649b9-7t9cl:/# ls -l /usr/share/nginx/html/statics
total 168
-rw-r--r-- 1 root root 51651 Jan  8 08:51 IMG_0098.JPG
-rw-r--r-- 1 root root 39982 Jan  8 08:51 IMG_0099.JPG
-rw-r--r-- 1 root root 71573 Jan  8 08:51 IMG_0100.JPG
-rw-r--r-- 1 root root     8 Jan  8 08:35 index.html

image-20230108165550566

浏览器访问

image-20230108164429098

image-20230108164652134

image-20230108165312641

Volume-动态存储卷示例

先把静态存储卷删除,先删web服务,再删PVC,最后删PV。

image-20230108170745707

创建账户
root@k8s-deploy:/yaml/k8s-Resource-N70/case9-pv-dynamic-nfs# vim 1-rbac.yaml 

apiVersion: v1
kind: Namespace
metadata:
  name: nfs
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: nfs
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nfs-client-provisioner-runner
rules:
  - apiGroups: [""]
    resources: ["nodes"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: run-nfs-client-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: nfs
roleRef:
  kind: ClusterRole
  name: nfs-client-provisioner-runner
  apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: nfs
rules:
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: nfs
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: nfs
roleRef:
  kind: Role
  name: leader-locking-nfs-client-provisioner
  apiGroup: rbac.authorization.k8s.io


root@k8s-deploy:/yaml/k8s-Resource-N70/case9-pv-dynamic-nfs# kubectl apply -f 1-rbac.yaml 
创建存储类storageclass
root@k8s-deploy:/yaml/k8s-Resource-N70/case9-pv-dynamic-nfs# vim 2-storageclass.yaml 

apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: managed-nfs-storage
provisioner: k8s-sigs.io/nfs-subdir-external-provisioner # or choose another name, must match deployment's env PROVISIONER_NAME'
reclaimPolicy: Retain #PV的删除策略,默认为delete,删除PV后立即删除NFS server的数据
mountOptions:
  #- vers=4.1 #containerd有部分参数异常
  #- noresvport #告知NFS客户端在重新建立网络连接时,使用新的传输控制协议源端口
  - noatime #访问文件时不更新文件inode中的时间戳,高并发环境可提高性能
parameters:
  #mountOptions: "vers=4.1,noresvport,noatime"
  archiveOnDelete: "true"  #删除pod时保留pod数据,默认为false时为不保留数据 

创建NFS provisioner
root@k8s-deploy:/yaml/k8s-Resource-N70/case9-pv-dynamic-nfs# vim 3-nfs-provisioner.yaml 

apiVersion: apps/v1
kind: Deployment
metadata:
  name: nfs-client-provisioner
  labels:
    app: nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: nfs
spec:
  replicas: 1
  strategy: #部署策略
    type: Recreate
  selector:
    matchLabels:
      app: nfs-client-provisioner
  template:
    metadata:
      labels:
        app: nfs-client-provisioner
    spec:
      serviceAccountName: nfs-client-provisioner
      containers:
        - name: nfs-client-provisioner
          #image: k8s.gcr.io/sig-storage/nfs-subdir-external-provisioner:v4.0.2 
          image: registry.cn-qingdao.aliyuncs.com/zhangshijie/nfs-subdir-external-provisioner:v4.0.2
          volumeMounts:
            - name: nfs-client-root
              mountPath: /persistentvolumes
          env:
            - name: PROVISIONER_NAME
              value: k8s-sigs.io/nfs-subdir-external-provisioner
            - name: NFS_SERVER
              value: 192.168.0.119
            - name: NFS_PATH
              value: /data/volumes
      volumes:
        - name: nfs-client-root
          nfs:
            server: 192.168.0.119
            path: /data/volumes    #要在nfs服务器上创建该共享目录
# 在nfs服务器上创建该共享目录
root@k8s-haproxy1:~# mkdir -p /data/volumes
root@k8s-haproxy1:~# vim /etc/exports
末尾追加
/data/volumes *(rw,no_root_squash)

# 不重启nfs,使其生效
root@k8s-haproxy1:~# exportfs -arv
exportfs: /etc/exports [1]: Neither 'subtree_check' or 'no_subtree_check' specified for export "*:/data/k8sdata".
  Assuming default behaviour ('no_subtree_check').
  NOTE: this default has changed since nfs-utils version 1.0.x

exportfs: /etc/exports [2]: Neither 'subtree_check' or 'no_subtree_check' specified for export "*:/data/volumes".
  Assuming default behaviour ('no_subtree_check').
  NOTE: this default has changed since nfs-utils version 1.0.x

exporting *:/data/volumes
exporting *:/data/k8sdata


# 在部署节点查看共享目录
root@k8s-deploy:~# showmount -e 192.168.0.119
Export list for 192.168.0.119:
/data/volumes *
/data/k8sdata *

root@k8s-deploy:/yaml/k8s-Resource-N70/case9-pv-dynamic-nfs# kubectl apply -f 3-nfs-provisioner.yaml 
deployment.apps/nfs-client-provisioner created
root@k8s-deploy:/yaml/k8s-Resource-N70/case9-pv-dynamic-nfs# kubectl get pod -n nfs 
NAME                                      READY   STATUS    RESTARTS   AGE
nfs-client-provisioner-79fdb648c6-zrxzz   1/1     Running   0          10m
#查看日志确保没有报错
root@k8s-deploy:/yaml/k8s-Resource-N70/case9-pv-dynamic-nfs# kubectl logs -f nfs-client-provisioner-79fdb648c6-zrxzz -n 

创建PVC
root@k8s-deploy:/yaml/k8s-Resource-N70/case9-pv-dynamic-nfs# vim 4-create-pvc.yaml 

# Test PVC
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: myserver-myapp-dynamic-pvc
  namespace: myserver
spec:
  storageClassName: managed-nfs-storage #调用的storageclass 名称
  accessModes:
    - ReadWriteMany #访问权限
  resources:
    requests:
      storage: 500Mi #空间大小

root@k8s-deploy:/yaml/k8s-Resource-N70/case9-pv-dynamic-nfs# kubectl apply -f 4-create-pvc.yaml 
persistentvolumeclaim/myserver-myapp-dynamic-pvc created

root@k8s-deploy:/yaml/k8s-Resource-N70/case9-pv-dynamic-nfs# kubectl get pvc -n nfs 
No resources found in nfs namespace.
root@k8s-deploy:/yaml/k8s-Resource-N70/case9-pv-dynamic-nfs# kubectl get pvc -n myserver 
NAME                         STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS          AGE
myserver-myapp-dynamic-pvc   Bound    pvc-47ef8e2a-ebfe-430d-918a-4b9dd53e9ec8   500Mi      RWX            managed-nfs-storage   25s

# 在nfs服务器上可以看到对应目录已经生成
root@k8s-haproxy1:~# ll /data/volumes/
total 0
drwxr-xr-x 3 root root 90 Jan  8 17:48 ./
drwxr-xr-x 4 root root 36 Jan  8 17:19 ../
drwxrwxrwx 2 root root  6 Jan  8 17:48 myserver-myserver-myapp-dynamic-pvc-pvc-47ef8e2a-ebfe-430d-918a-4b9dd53e9ec8/

创建web服务
root@k8s-deploy:/yaml/k8s-Resource-N70/case9-pv-dynamic-nfs# vim 5-myapp-webserver.yaml 

kind: Deployment
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
metadata:
  labels:
    app: myserver-myapp
  name: myserver-myapp-deployment-name
  namespace: myserver
spec:
  replicas: 1
  selector:
    matchLabels:
      app: myserver-myapp-frontend
  template:
    metadata:
      labels:
        app: myserver-myapp-frontend
    spec:
      containers:
        - name: myserver-myapp-container
          image: nginx:1.20.0
          #imagePullPolicy: Always
          volumeMounts:
          - mountPath: "/usr/share/nginx/html/statics"
            name: statics-datadir
      volumes:
        - name: statics-datadir
          persistentVolumeClaim:
            claimName: myserver-myapp-dynamic-pvc

---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: myserver-myapp-service
  name: myserver-myapp-service-name
  namespace: myserver
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    targetPort: 80
    nodePort: 30080
  selector:
    app: myserver-myapp-frontend
    
    
root@k8s-deploy:/yaml/k8s-Resource-N70/case9-pv-dynamic-nfs# kubectl apply -f 5-myapp-webserver.yaml 
deployment.apps/myserver-myapp-deployment-name created
service/myserver-myapp-service-name created

image-20230108175746714

目前还没有数据,在nfs服务器上传一些图片

root@k8s-haproxy1:~# echo "dynamic-pvc data" >> /data/volumes/myserver-myserver-myapp-dynamic-pvc-pvc-47ef8e2a-ebfe-430d-918a-4b9dd53e9ec8/index.html
root@k8s-haproxy1:~# ll /data/volumes/myserver-myserver-myapp-dynamic-pvc-pvc-47ef8e2a-ebfe-430d-918a-4b9dd53e9ec8/
total 168
drwxrwxrwx 2 root root    84 Jan  8 18:00 ./
drwxr-xr-x 3 root root    90 Jan  8 17:48 ../
-rw-r--r-- 1 root root 51651 Jan  8 17:58 IMG_0098.JPG
-rw-r--r-- 1 root root 39982 Jan  8 17:58 IMG_0099.JPG
-rw-r--r-- 1 root root 71573 Jan  8 17:58 IMG_0100.JPG
-rw-r--r-- 1 root root    17 Jan  8 18:01 index.html

浏览器验证

image-20230108180158682

image-20230108180140075

image-20230108180237588

ConfigMap | Kubernetes

image-20230109144401897

image-20230109144433172

挂载配置文件
root@k8s-deploy:/yaml/k8s-Resource-N70/case10-configmap# vim 1-deploy_configmap.yml 

apiVersion: v1
kind: ConfigMap
metadata:
  name: nginx-config
data:
 default: |
    server {
       listen       80;
       server_name  www.mysite.com;
       index        index.html index.php index.htm;

       location / {
           root /data/nginx/html;   #网站首页文件路径
           if (!-e $request_filename) {
               rewrite ^/(.*) /index.html last;
           }
       }
    }


---
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 1
  selector:
    matchLabels:
      app: ng-deploy-80
  template:
    metadata:
      labels:
        app: ng-deploy-80
    spec:
      containers:
      - name: ng-deploy-80
        image: nginx:1.20.0
        ports:
        - containerPort: 80
        volumeMounts:
        - mountPath: /data/nginx/html
          name: nginx-static-dir
        - name: nginx-config
          mountPath:  /etc/nginx/conf.d
      volumes:
      - name: nginx-static-dir
        hostPath:
          path: /data/nginx
      - name: nginx-config
        configMap:
          name: nginx-config
          items:
             - key: default
               path: mysite.conf

---
apiVersion: v1
kind: Service
metadata:
  name: ng-deploy-80
spec:
  ports:
  - name: http
    port: 81
    targetPort: 80
    nodePort: 30019
    protocol: TCP
  type: NodePort
  selector:
    app: ng-deploy-80

root@k8s-deploy:/yaml/k8s-Resource-N70/case10-configmap# kubectl apply -f 1-deploy_configmap.yml 
configmap/nginx-config created
deployment.apps/nginx-deployment created
service/ng-deploy-80 created

root@k8s-deploy:/yaml/k8s-Resource-N70/case10-configmap# kubectl get pod
NAME                                READY   STATUS    RESTARTS   AGE
nginx-deployment-789cfd47bd-977nx   1/1     Running   0          4m19s

image-20230109144912626

浏览器访问

image-20230109151024398

创建首页文件,并刷新页面查看

image-20230109151508622

image-20230109151601997

挂载环境变量
root@k8s-deploy:/yaml/k8s-Resource-N70/case10-configmap# vim 2-deploy_configmap_env.yml 

apiVersion: v1
kind: ConfigMap

metadata:
  name: nginx-config
data:
  username: "user1"
  password: "12345678"


---
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 1
  selector:
    matchLabels:
      app: ng-deploy-80
  template:
    metadata:
      labels:
        app: ng-deploy-80
    spec:
      containers:
      - name: ng-deploy-80
        image: nginx
        env:
        - name: MY_USERNAME   #由configmap提供,user1
          valueFrom:
            configMapKeyRef:
              name: nginx-config
              key: username
        - name: MY_PASSWORD   #由configmap提供,12345678
          valueFrom:
            configMapKeyRef:
              name: nginx-config
              key: password
        ######
        - name: "password"
          value: "123456"

image-20230109153956983

四、基于Secret实现nginx的tls认证、并实现私有仓库镜像的下载认证

Secret | Kubernetes

image-20230109154253430

image-20230109154828159

secret类型-Opaque

image-20230109155055056

data类型数据

事先使用base64加密,pod使用时会自动解密。

root@k8s-deploy:/yaml/k8s-Resource-N70/case11-secret# echo admin | base64
YWRtaW4K
root@k8s-deploy:/yaml/k8s-Resource-N70/case11-secret# echo 123456 | base64
MTIzNDU2Cg==

root@k8s-deploy:/yaml/k8s-Resource-N70/case11-secret# vim 1-secret-Opaque-data.yaml 

apiVersion: v1
kind: Secret
metadata:
  name: mysecret-data
  namespace: myserver
type: Opaque
data:
  user: YWRtaW4K
  password: MTIzNDU2Cg==
  age: MTgK #非base64加密的会报错

image-20230109155955354

stringData类型数据

不用事先加密,上传到k8s会自动加密。

root@k8s-deploy:/yaml/k8s-Resource-N70/case11-secret# vim 2-secret-Opaque-stringData.yaml 

apiVersion: v1
kind: Secret
metadata:
  name: mysecret-stringdata
  namespace: myserver
type: Opaque
stringData:
  user: 'admin'
  password: '123456'

image-20230109160559308

pod挂载使用
root@k8s-deploy:/yaml/k8s-Resource-N70/case11-secret# vim 3-secret-Opaque-mount.yaml 

#apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
  name: myserver-myapp-app1-deployment
  namespace: myserver
spec:
  replicas: 1
  selector:
    matchLabels:
      app: myserver-myapp-app1
  template:
    metadata:
      labels:
        app: myserver-myapp-app1
    spec:
      containers:
      - name: myserver-myapp-app1
        image: tomcat:7.0.94-alpine
        ports:
        - containerPort: 8080
        volumeMounts:
        - mountPath: /data/myserver/auth
          name: myserver-auth-secret
      volumes:
      - name: myserver-auth-secret
        secret:
          secretName: mysecret-data #挂载指定的secret,挂载后会将base64解密为明文

---
apiVersion: v1
kind: Service
metadata:
  name: myserver-myapp-app1
  namespace: myserver
spec:
  ports:
  - name: http
    port: 8080
    targetPort: 8080
    nodePort: 30018
    protocol: TCP
  type: NodePort
  selector:
    app: myserver-myapp-app1

image-20230109161552429

secret挂载流程

image-20230109161741267

在宿主机查看挂载的secret,先找到pod被调度到哪台node,然后在该node节点查看。

image-20230109162625857

image-20230109162819587

secret类型-kubernetes.io/tls-为nginx提供证书

image-20230109164539604

image-20230109164609441

自签名证书
root@k8s-deploy:/yaml/k8s-Resource-N70/case11-secret/certs# openssl req -x509 -sha256 -newkey rsa:4096 -keyout ca.key -out ca.crt -days 3650 -nodes -subj '/CN=www.ca.com'
root@k8s-deploy:/yaml/k8s-Resource-N70/case11-secret/certs# openssl req -new -newkey rsa:4096 -keyout server.key -out server.csr -nodes -subj '/CN=www.mysite.com'
root@k8s-deploy:/yaml/k8s-Resource-N70/case11-secret/certs# openssl  x509 -req -sha256 -days 3650 -in server.csr -CA ca.crt -CAkey ca.key -set_serial 01 -out server.crt
root@k8s-deploy:/yaml/k8s-Resource-N70/case11-secret/certs# kubectl create secret tls myserver-tls-key --cert=./server.crt --key=./server.key -n myserver 
secret/myserver-tls-key created
root@k8s-deploy:/yaml/k8s-Resource-N70/case11-secret/certs# kubectl get secrets -n myserver 
NAME                  TYPE                DATA   AGE
mysecret-data         Opaque              3      52m
mysecret-stringdata   Opaque              2      60m
myserver-tls-key      kubernetes.io/tls   2      15s
root@k8s-deploy:/yaml/k8s-Resource-N70/case11-secret/certs# ll
total 20
drwxr-xr-x 2 root root   88 Jan  9 17:02 ./
drwxr-xr-x 3 root root  181 Jan  9 16:50 ../
-rw-r--r-- 1 root root 1809 Jan  9 16:56 ca.crt
-rw------- 1 root root 3272 Jan  9 16:56 ca.key
-rw-r--r-- 1 root root 1667 Jan  9 17:02 server.crt
-rw-r--r-- 1 root root 1590 Jan  9 16:59 server.csr
-rw------- 1 root root 3272 Jan  9 16:59 server.key


创建web服务nginx并使用证书
root@k8s-deploy:/yaml/k8s-Resource-N70/case11-secret# vim 4-secret-tls.yaml 

      containers:
      - name: myserver-myapp-frontend
        image: nginx:1.20.2-alpine
        ports:
          - containerPort: 80
        volumeMounts:
          - name: nginx-config
            mountPath:  /etc/nginx/conf.d/myserver
          - name: myserver-tls-key
            mountPath:  /etc/nginx/conf.d/certs
      volumes:
      - name: nginx-config
        configMap:
          name: nginx-config
          items:
             - key: default
               path: mysite.conf
      - name: myserver-tls-key
        secret:
          secretName: myserver-tls-key


---
apiVersion: v1
kind: Service
metadata:
  name: myserver-myapp-frontend
  namespace: myserver
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    targetPort: 80
    nodePort: 30020
    protocol: TCP
  - name: htts
    port: 443
    targetPort: 443
    nodePort: 30019
    protocol: TCP
  selector:
    app: myserver-myapp-frontend

image-20230109171032076

负载均衡转发请求到nodeport
root@k8s-haproxy1:~# vim /etc/haproxy/haproxy.cfg 
末尾添加
listen nginx-80G
  bind 192.168.0.189:80
  mode tcp
  server 192.168.0.113 192.168.0.113:30020 check inter 3s fall 3 rise 3
  server 192.168.0.114 192.168.0.114:30020 check inter 3s fall 3 rise 3
  server 192.168.0.115 192.168.0.115:30020 check inter 3s fall 3 rise 3


listen nginx-443
  bind 192.168.0.189:443
  mode tcp
  server 192.168.0.113 192.168.0.113:30019 check inter 3s fall 3 rise 3
  server 192.168.0.114 192.168.0.114:30019 check inter 3s fall 3 rise 3
  server 192.168.0.115 192.168.0.115:30019 check inter 3s fall 3 rise 3



root@k8s-haproxy1:~# systemctl reload haproxy.service

image-20230109171314299

配置hosts解析
echo "192.168.0.189 www.mysite.com" >> /etc/hosts
pod内验证

image-20230109184754127

编辑配置文件,默认官方镜像没有加载自定义配置

image-20230109172646112

在浏览器访问网站

image-20230109184939272

secret-私有仓库镜像的下载认证

存储docker registry的认证信息,在下载镜像的时候使用,这样每一个node节点不登陆仓库也能下载镜像。

创建secret

1、通过命令创建

kubectl create secret docker-registry keyName \
 --docker-server=registry.cn-beijing.aliyuncs.com/image-registry-1/baseimages \
 --docker-username=USER \
 --docker-password=PASSWORD

2、通过docker认证文件创建

# 先登录镜像仓库
root@k8s-deploy:/yaml/k8s-Resource-N70/case10-configmap# docker login --username=azikaban registry.cn-beijing.aliyuncs.com
Password: 
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store

Login Succeeded

# 登录成功后会在当前用户家目录创建认证信息
root@k8s-deploy:/yaml/k8s-Resource-N70/case10-configmap# cat /root/.docker/config.json
{
	"auths": {
		"registry.cn-beijing.aliyuncs.com": {
			"auth": "YXppa2FiYW46amlhbmcxMDE3"  
		},
		"y73.harbor.com": {
			"auth": "YWRtaW46MTIzNDU2"
		}
	}
}

# 上传一个镜像到阿里云镜像仓库
root@k8s-deploy:/yaml/k8s-Resource-N70/case10-configmap# docker push registry.cn-beijing.aliyuncs.com/image-registry-1/baseimages:nginx-1.20.0-alpine

image-20230109204441043

image-20230109204321295

# 创建secret
kubectl create secret generic aliyun-registry-image-pull-key \
 --from-file=.dockerconfigjson=/root/.docker/config.json \
 --type=kubernetes.io/dockerconfigjson \
 -n myserver

image-20230109205516642

创建pod
root@k8s-deploy:/yaml/k8s-Resource-N70/case11-secret# vim 5-secret-imagePull.yaml 

#apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
  name: myserver-myapp-frontend-deployment
  namespace: myserver
spec:
  replicas: 1
  selector:
    matchLabels:
      app: myserver-myapp-frontend
  template:
    metadata:
      labels:
        app: myserver-myapp-frontend
    spec:
      containers:
      - name: myserver-myapp-frontend
        image: registry.cn-beijing.aliyuncs.com/image-registry-1/baseimages:nginx-1.20.0-alpine  #阿里云私有镜像仓库
        ports:
          - containerPort: 80
      imagePullSecrets:
        - name: aliyun-registry-image-pull-key

---
apiVersion: v1
kind: Service
metadata:
  name: myserver-myapp-frontend
  namespace: myserver
spec:
  ports:
  - name: http
    port: 80
    targetPort: 80
    nodePort: 30022
    protocol: TCP
  type: NodePort
  selector:
    app: myserver-myapp-frontend

验证pod运行状态
root@k8s-deploy:/yaml/k8s-Resource-N70/case11-secret# kubectl get pod -o wide -n myserver
root@k8s-deploy:/yaml/k8s-Resource-N70/case11-secret# kubectl describe pod myserver-myapp-frontend-deployment-64db8c4d5f-twnvn -n myserver

image-20230109210346448

image-20230109210540598

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值