K8S总结4:etcd数据备份、pod生命周期调度流程、探针总结、数据持久化实战

1.基于Velero及MinIO实现对kubernetes 集群etcd数据自定义数据备份与恢复

部署minio:

[root@k8s-deployer ~]# docker pull registry.cn-hangzhou.aliyuncs.com/zhangshijie/minio:RELEASE.2022-04-12T06-55-35Z
[root@k8s-deployer ~]# docker pull minio/minio:RELEASE.2022-04-12T06-55-35Z
[root@k8s-deployer ~]# cd /usr/local/src/
[root@k8s-deployer src]# mkdir -p /data/minio
#创建minio容器,如果不指定,则默认用户名与密码为 minioadmin/minioadmin,可以通过环境变量自定义
[root@k8s-deployer src]# docker run --name minio \
-p 9000:9000 \
-p 9999:9999 \
-d --restart=always \
-e "MINIO_ROOT_USER=admin" \
-e "MINIO_ROOT_PASSWORD=12345678" \
-v /data/minio/data:/data \
registry.cn-hangzhou.aliyuncs.com/zhangshijie/minio:RELEASE.2022-04-12T06-55-35Z server /data \
--console-address '0.0.0.0:9999'
# 登录,创建kubect

在这里插入图片描述

部署velero:

[root@k8s-node1 ~]# nerdctl  pull velero/velero:v1.11.1# 在master节点部署velero
[root@k8s-master1 case13-DaemonSet]# wget https://github.com/vmware-tanzu/velero/releases/download/v1.11.1/velero-v1.11.1-linux-amd64.tar.gz
[root@k8s-master1 src]# tar xvf velero-v1.11.1-linux-amd64.tar.gz
[root@k8s-master1 src]# cp velero-v1.11.1-linux-amd64/velero  /usr/local/bin/
[root@k8s-master1 src]# velero  --help
# 配置velero认证环境:
[root@k8s-master1 src]# mkdir  /data/velero -p
[root@k8s-master1 src]# cd /data/velero
#访问minio的认证文件:
[root@k8s-master1 velero]# cat velero-auth.txt 
[default]
aws_access_key_id = admin
aws_secret_access_key = 12345678
#准备user-csr文件:
[root@k8s-master1 velero]# cat awsuser-csr.json 
{
  "CN": "awsuser",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
#准备证书签发环境、或使用已有的admin证书:
[root@k8s-master1 velero]# yum install golang-cfssl -y
[root@k8s-master1 velero]# wget https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssl_1.6.1_linux_amd64 
[root@k8s-master1 velero]# wget https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssljson_1.6.1_linux_amd64 
[root@k8s-master1 velero]# wget https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssl-certinfo_1.6.1_linux_amd64
[root@k8s-master1 velero]# mv cfssl-certinfo_1.6.1_linux_amd64 cfssl-certinfo
[root@k8s-master1 velero]# mv cfssl_1.6.1_linux_amd64 cfssl
[root@k8s-master1 velero]# mv cfssljson_1.6.1_linux_amd64 cfssljson
[root@k8s-master1 velero]# cp cfssl-certinfo cfssl cfssljson /usr/local/bin/
[root@k8s-master1 velero]# chmod  a+x /usr/local/bin/cfssl* 
#执行证书签:
[root@k8s-deployer src]# scp  /etc/kubeasz/clusters/k8s-cluster1/ssl/ca-config.json  172.18.10.122:/data/velero
[root@k8s-master1 velero]# /usr/local/bin/cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem -ca-key=/etc/kubernetes/ssl/ca-key.pem -config=./ca-config.json -profile=kubernetes ./awsuser-csr.json | cfssljson -bare awsuser
#验证证书:
[root@k8s-master1 velero]#  ll awsuser*
-rw-r--r-- 1 root root  997 828 05:57 awsuser.csr
-rw-r--r-- 1 root root  221 828 05:49 awsuser-csr.json
-rw------- 1 root root 1675 828 05:57 awsuser-key.pem
-rw-r--r-- 1 root root 1391 828 05:57 awsuser.pem
#分发证书到api-server证书路径:
[root@k8s-master1 velero]# cp awsuser-key.pem /etc/kubernetes/ssl/
[root@k8s-master1 velero]# cp awsuser.pem /etc/kubernetes/ssl/
#生成集群认证config文件:
[root@k8s-master1 velero]# kubectl config set-cluster kubernetes \
> --certificate-authority=/etc/kubernetes/ssl/ca.pem \
> --embed-certs=true \
> --server=${KUBE_APISERVER} \
> --kubeconfig=./awsuser.kubeconfig
#设置客户端证书认证:
[root@k8s-master1 velero]# kubectl config set-credentials awsuser \
> --client-certificate=/etc/kubernetes/ssl/awsuser.pem \
> --client-key=/etc/kubernetes/ssl/awsuser-key.pem \
> --embed-certs=true \
> --kubeconfig=./awsuser.kubeconfig
#设置上下文参数:
# kubectl config set-context kubernetes \
--cluster=kubernetes \
--user=awsuser \
--namespace=velero-system \
--kubeconfig=./awsuser.kubeconfig

#设置默认上下文:
[root@k8s-master1 velero]# kubectl config use-context kubernetes --kubeconfig=awsuser.kubeconfig

#k8s集群中创建awsuser账户:
[root@k8s-master1 velero]# kubectl create clusterrolebinding awsuser --clusterrole=cluster-admin --user=awsuser
#创建namespace:
[root@k8s-master1 velero]# kubectl create ns velero-system
# 执行安装:
root@k8s-master1 velero]# velero --kubeconfig  ./awsuser.kubeconfig \
> install \
>     --provider aws \
>     --plugins registry.cn-hangzhou.aliyuncs.com/zhangshijie/velero-plugin-for-aws:v1.7.1 \
>     --bucket velerodata  \
>     --secret-file ./velero-auth.txt \
>     --use-volume-snapshots=false \
> --namespace velero-system \
> --backup-location-config region=minio,s3ForcePathStyle="true",s3Url=http://172.18.10.121:9000
#验证安装:
[root@k8s-master1 velero]# kubectl get pod -n velero-system  -o wide
[root@k8s-node1 ~]# nerdctl  pull velero/velero-plugin-for-aws:v1.7.1
[root@k8s-node1 ~]# nerdctl  pull velero/velero:v1.11.1

在这里插入图片描述

对default ns进行备份:

[root@k8s-master1 velero]# DATE=`date +%Y%m%d%H%M%S`
[root@k8s-master1 velero]# velero backup create default-backup-${DATE} \
> --include-cluster-resources=true \
> --include-namespaces default \
> --kubeconfig=./awsuser.kubeconfig \
> --namespace velero-system
# 验证备份信息
[root@k8s-master1 velero]# velero backup describe default-backup-20230828061532 --kubeconfig=./awsuser.kubeconfig --namespace velero-system

minio验证备份数据:

在这里插入图片描述

删除pod并验证数据恢复:

# 删除之前pod
[root@k8s-master1 velero]# kubectl get pod -n default

在这里插入图片描述

# 删除default namespace中的pod
[root@k8s-master1 velero]# kubectl delete  pod net-test1
[root@k8s-master1 velero]# kubectl delete  pod net-test2

在这里插入图片描述

# 恢复pod
[root@k8s-master1 velero]# velero restore create --from-backup default-backup-20230828061532 --wait --kubeconfig=./awsuser.kubeconfig --namespace velero-system
# 验证pod
[root@k8s-master1 velero]# kubectl get pod -o wide

在这里插入图片描述

2.基于nerdctl + BuildKit + Containerd构建容器镜像

# 部署BuildKit 
[root@k8s-master1 src]# wget https://github.com/moby/buildkit/releases/download/v0.12.1/buildkit-v0.12.1.linux-amd64.tar.gz
[root@k8s-master1 src]# tar xvf buildkit-v0.12.1.linux-amd64.tar.gz 
[root@k8s-master1 src]# mv bin/* /usr/local/bin/
[root@k8s-master1 src]# buildctl --help
[root@k8s-master1 src]# cat /lib/systemd/system/buildkit.socket
[Unit]
Description=BuildKit
Documentation=https://github.com/moby/buildkit

[Socket]
ListenStream=%t/buildkit/buildkitd.sock

[Install]
WantedBy=sockets.target
[root@k8s-master1 src]# cat /lib/systemd/system/buildkitd.service
[Unit]
Description=BuildKit
Requires=buildkit.socket
After=buildkit.socketDocumentation=https://github.com/moby/buildkit

[Service]
ExecStart=/usr/local/bin/buildkitd --oci-worker=false --containerd-worker=true

[Install]
WantedBy=multi-user.target
[root@k8s-master1 src]# systemctl daemon-reload 
[root@k8s-master1 src]# systemctl enable buildkitd
Created symlink from /etc/systemd/system/multi-user.target.wants/buildkitd.service to /usr/lib/systemd/system/buildkitd.service.
[root@k8s-master1 src]# systemctl restart buildkitd
[root@k8s-master1 src]# systemctl status buildkitd

测试镜像构建:

[root@k8s-master1 ubuntu2204-nginx-1.22]# vim build-command.sh 
[root@k8s-master1 ubuntu2204-nginx-1.22]# 
[root@k8s-master1 ubuntu2204-nginx-1.22]# 
[root@k8s-master1 ubuntu2204-nginx-1.22]# cat build-command.sh 
#!/bin/bash
#docker build -t harbor.linuxarchitect.io/baseimages/nginx:v1 .
#docker push harbor.linuxarchitect.io/baseimages/nginx:v1

/usr/local/bin/nerdctl build -t harbor.linuxarchitect.io/baseimages/nginx-base:1.22.0 .

/usr/local/bin/nerdctl push harbor.linuxarchitect.io/baseimages/nginx-base:1.22.0
[root@k8s-master1 ubuntu2204-nginx-1.22]# cat Dockerfile 
FROM registry.cn-hangzhou.aliyuncs.com/zhangshijie/ubuntu:22.04
MAINTAINER "jack 2973707860@qq.com"


#ADD sources.list /etc/apt/sources.list

RUN apt update && apt  install -y iproute2  ntpdate  tcpdump telnet traceroute nfs-kernel-server nfs-common  lrzsz tree  openssl libssl-dev libpcre3 libpcre3-dev zlib1g-dev ntpdate tcpdump telnet traceroute  gcc openssh-server lrzsz tree  openssl libssl-dev libpcre3 libpcre3-dev zlib1g-dev ntpdate tcpdump telnet traceroute iotop unzip zip make


ADD nginx-1.22.0.tar.gz /usr/local/src/
RUN cd /usr/local/src/nginx-1.22.0 && ./configure --prefix=/apps/nginx && make && make install  && ln -sv /apps/nginx/sbin/nginx /usr/bin
RUN groupadd  -g 2088 nginx && useradd  -g nginx -s /usr/sbin/nologin -u 2088 nginx && chown -R nginx.nginx /apps/nginx
ADD nginx.conf /apps/nginx/conf/
ADD frontend.tar.gz /apps/nginx/html/


EXPOSE 80 443
#ENTRYPOINT ["nginx"]
CMD ["nginx","-g","daemon off;"]
# 登录镜像仓库
[root@k8s-master1 ubuntu2204-nginx-1.22]# nerdctl login harbor.linuxarchitect.io
# 构建镜像并上传
[root@k8s-master1 ubuntu2204-nginx-1.22]# sh build-command.sh 

在这里插入图片描述

测试镜像
[root@k8s-master1 ubuntu2204-nginx-1.22]# nerdctl run -d -p 80:80 harbor.linuxarchitect.io/baseimages/nginx-base:1.22.0

在这里插入图片描述

3.总结pod生命周期、pod常见状态、pod调度流程详解

pod生命周期:

pod的生命周期(pod lifecycle),容器开始初始化后,会有0个或者多个初始化容器,初始化容器结束后,从pod start时候可以配置postStart检测,运行过程中可以配置livenessProbe和readinessProbe,最后在 stop前可以配置preStop操作。

在这里插入图片描述

pod常见状态:

Unschedulable:#Pod不能被调度,kube-scheduler没有匹配到合适的node节点
PodScheduled:#pod正处于调度中,在kube-scheduler刚开始调度的时候,还没有将pod分配到指定的node,在筛选出合适的节点后就会更新etcd数据,将pod分配到指定的node
Pending: #正在创建Pod但是Pod中的容器还没有全部被创建完成=[处于此状态的Pod应该检查Pod依赖的存储是否有权限挂载等。
Failed: #Pod中有容器启动失败而导致pod工作异常。
Unknown: #由于某种原因无法获得pod的当前状态,通常是由于与pod所在的node节点通信错误。
Initialized:#所有pod中的初始化容器已经完成了
ImagePullBackOff: #Pod所在的node节点下载镜像失败
Running: #Pod内部的容器已经被创建并且启动。
Ready: #表示pod中的容器已经可以提供访问服务
Error: #pod 启动过程中发生错误
NodeLost: #Pod 所在节点失联
Waiting: #Pod 等待启动
Terminating: #Pod 正在被销毁
CrashLoopBackOff:#pod崩溃,但是kubelet正在将它重启
InvalidImageName:#node节点无法解析镜像名称导致的镜像无法下载
ImageInspectError:#无法校验镜像,镜像不完整导致
ErrImageNeverPull:#策略禁止拉取镜像,镜像中心权限是私有等
RegistryUnavailable:#镜像服务器不可用,网络原因或harbor宕机
ErrImagePull:#镜像拉取出错,超时或下载被强制终止
CreateContainerConfigError:#不能创建kubelet使用的容器配置
CreateContainerError:#创建容器失败
RunContainerError:#pod运行失败,容器中没有初始化PID为1的守护进程等
ContainersNotInitialized:#pod没有初始化完毕
ContainersNotReady:#pod没有准备完毕
ContainerCreating:#pod正在创建中
PodInitializing:#pod正在初始化中
DockerDaemonNotReady:#node节点decker服务没有启动
NetworkPluginNotReady:#网络插件没有启动

pod调度流程:命令行工具或者ui工具调用k8s api执行pod创建,发送到api server后,master把事件写到etcd中,之后kube schedule监听apiserver或去到要创建pod 的事件,调度成功后事件发给api server。之后再写入etcd,kubelet监听Master API端口并获取创建事件,kube-proxy监听Master api端口并创建网络规则。

在这里插入图片描述

4.总结pause容器、init容器的功能及使用案例

pause容器:

pod的底层容器,每个pod都会创建,镜像体积很小。主要用来初始化底层网络,不进行业务处理。
因此如果一个 Pod 中的两个容器 A 和 B,那么关系如下 :
1.A容器和B容器能够直接使用 localhost 通信;
2.A容器和B容器可以可以看到网卡、IP与端口监听信息。
3.Pod 只有一个 IP 地址,也就是该 Pod 的 Network Namespace 对应的IP 地址(由Infra 容器初始化并创建)。
4.k8s环境中的每个Pod有一个独立的IP地址(前提是地址足够用),并且此IP被当前 Pod 中所有容器在内部共享使用。

配置示例:

# 1、在node主机172.31.4.3测试,准备nginx配置文件,并配置动静分离:
[root@k8s-master2 pause-test-case]# pwd
/opt/pause-case/pause-test-case
[root@k8s-master2 pause-test-case]# cat nginx.conf 
error_log stderr;
events { worker_connections  1024; }
http {
    access_log /dev/stdout;
    server {
        listen 80 default_server;
        server_name www.mysite.com;
        location / {
          index index.html index.php;
          root /usr/share/nginx/html;
         }
        location ~ \.php$ {
            root           /usr/share/nginx/html;
            fastcgi_pass   127.0.0.1:9000;
            fastcgi_index  index.php;
            fastcgi_param  SCRIPT_FILENAME  $document_root$fastcgi_script_name;
             include        fastcgi_params;
             }
        }
}
# 2、部署pause容器
[root@k8s-master2 pause-test-case]# nerdctl pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.8
[root@k8s-master2 pause-test-case]# nerdctl run -d -p 80:80 --name pause-container-test registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.8
# 3、准备测试web页面:
[root@k8s-master2 pause-test-case]# ll html/
总用量 8
drwxr-xr-x 2 root root  6 424 2022 *
-rw-r--r-- 1 root root 34 424 2022 index.html
-rw-r--r-- 1 root root 25 424 2022 index.php
# 4、部署nginx 容器,并使用paush容器网络:
[root@k8s-master2 pause-test-case]# nerdctl run -d --name nginx-container-test \
-v `pwd`/nginx.conf:/etc/nginx/nginx.conf \
-v `pwd`/html:/usr/share/nginx/html \
--net=container:pause-container-test \
registry.cn-hangzhou.aliyuncs.com/zhangshijie/nginx:1.22.0-alpine
# 5、部署php容器,并使用paush容器网络:
[root@k8s-master2 pause-test-case]# nerdctl run -d --name php-container-test \
--net=container:pause-container-test \
-v `pwd`/html:/usr/share/nginx/html \
registry.cn-hangzhou.aliyuncs.com/zhangshijie/php:5.6.40-fpm
# 登录验证和容器ip地址验证

在这里插入图片描述在这里插入图片描述

init容器:

作用:
1、初始化运行环境,可以为业务容器提前准备好容器的运行环境,比如将业务容器需要的配置文件提前生成到指定位置,检查数据完整性、软件版本等基础运行环境。、
2、可以在运行容器之前准备好需要的业务数据,必输OSS下载,或者copy。
3、检查依赖的服务是否可以访问。

特点:
1.一个pod可以有多个业务容器还能同时再有多个init容器,但是每个init容器和业务容器的运行环境都是隔离的。
2.init容器会比业务容器先启动。
3.init容器运行成功之后才会继续运行业务容器。
4.如果一个pod有多个init容器,则需要从上到下逐个运行并且全部成功,最后才会运行业务容器。
5.init容器不支持探针检测(因为init容器初始化完成后就退出再也不运行了)。

示例:

[root@k8s-master2 case1-init-container]# cat 1-init-container.yaml 
kind: Deployment
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
metadata:
  labels:
    app: myserver-myapp 
  name: myserver-myapp-deployment-name
  namespace: myserver
spec:
  replicas: 1 
  selector:
    matchLabels:
      app: myserver-myapp-frontend
  template:
    metadata:
      labels:
        app: myserver-myapp-frontend
    spec:
      containers:
        - name: myserver-myapp-container
          image: nginx:1.20.0 
          #imagePullPolicy: Always
          volumeMounts:
          - mountPath: "/usr/share/nginx/html/myserver"
            name: myserver-data
          - name: tz-config
            mountPath: /etc/localtime
      initContainers:
        - name: init-web-data  # 初始化容器,创建容器前准备数据
          image: centos:7.9.2009
          command: ['/bin/bash','-c',"for i in `seq 1 10`;do echo '<h1>'$i web page at $(date +%Y%m%d%H%M%S) '<h1>' >> /data/nginx/html/myserver/index.html;sleep 1;done"]
          volumeMounts:
          - mountPath: "/data/nginx/html/myserver"
            name: myserver-data
          - name: tz-config
            mountPath: /etc/localtime
        - name: change-data-owner #给数据添加权限
          image: busybox:1.28
          command: ['/bin/sh','-c',"/bin/chmod 644 /data/nginx/html/myserver/* -R"]
          volumeMounts:
          - mountPath: "/data/nginx/html/myserver"
            name: myserver-data
          - name: tz-config
            mountPath: /etc/localtime
      volumes:
      - name: myserver-data
        hostPath:
          path: /tmp/data/html
      - name: tz-config
        hostPath:
          path: /etc/localtime 

---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: myserver-myapp-service
  name: myserver-myapp-service-name
  namespace: myserver
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    targetPort: 80
    nodePort: 30088
  selector:
    app: myserver-myapp-frontend
[root@k8s-master2 case1-init-container]# kubectl apply -f 1-init-container.yaml 
# 验证数据

在这里插入图片描述

5.总结探针

简介;探针是由 kubelet 对容器执行的定期诊断,以保证Pod的状态始终处于运行状态,要执行诊断,kubelet 调用由容器实现的Handler(处理程序),也称为
Hook(钩子)。
类型:
ExecAction #在容器内执行指定命令,如果命令退出时返回码为0则认为诊断成功。
TCPSocketAction #对指定端口上的容器的IP地址进行TCP检查,如果端口打开,则诊断被认为是成功的。
HTTPGetAction:#对指定的端口和路径上的容器的IP地址执行HTTPGet请求,如果响应的状态码大于等于200且小于 400,则诊断被认为是成功的。
grpc:针对支持gRPC 健康检查协议应用的心跳检测(响应的状态是 “SERVING”,则认为诊断成功),1.23 Alpha、1.24 Beta、1.27 GA
探测结果:
成功:容器通过了诊断。
失败:容器未通过诊断。
未知:诊断失败,因此不会采取任何行动。
pod重启策略:
Pod一旦配置探针,在检测失败时候,会基于restartPolicy对Pod进行下一步操作:
restartPolicy (容器重启策略):
 Always:当容器异常时,k8s自动重启该容器,ReplicationController/Replicaset/Deployment,默认为Always。
 OnFailure:当容器失败时(容器停止运行且退出码不为0),k8s自动重启该容器。
 Never:不论容器运行状态如何都不会重启该容器,Job或CronJob

功能及区别:
startupProbe:启动探针,kubernetesv1.16引入,判断容器内的应用程序是否已启动完成,如果配置了启动探测,则会先禁用所有其它的探测,直到startupProbe检测成功为止,如果startupProbe探测失败,则kubelet将杀死容器,容器将按照重启策略进行下一步操作,如果容器没有提供启动探测,则默认状态为成功。
livenessProbe:存活探针,检测容器容器是否正在运行,如果存活探测失败,则kubelet会杀死容器,并且容器将受到其重启策略的影响,如果容器不提供存活探针,则默认状态为 Success,livenessProbe用于控制是否重启pod。
readinessProbe:就绪探针,如果就绪探测失败,端点控制器将从与Pod匹配的所有Service的端点中删除该Pod的IP地址,初始延迟之前的就绪状态默认为Failure(失败),如果容器不提供就绪探针,则默认状态为 Success,readinessProbe用于控制pod是否添加至service,防止server将请求转到不能处理请求的pod上。

总结:
startupProbe:开始前检测,检测成功通过前不会进行livenessProbe和readinessProbe探测。
livenessProbe:用于决定是否重启pod的探测。
readinessProbe:用于是否剔除server后端的ip地址转发。

配置参数:
initialDelaySeconds: 120 #初始化延迟时间,告诉kubelet在执行第一次探测前应该等待多少秒,默认是0秒,最小值是0
periodSeconds: 60 #探测周期间隔时间,指定了kubelet应该每多少秒秒执行一次存活探测,默认是 10 秒。最小值是 1
timeoutSeconds: 5 #单次探测超时时间,探测的超时后等待多少秒,默认值是1秒,最小值是1。
successThreshold: 1 #从失败转为成功的重试次数,探测器在失败后,成功1次被认为是探测成功,默认值是1,存活探测的这个值必须是1,最小值是 1。
failureThreshold:3 #从成功转为失败的重试次数,当Pod启动了并且探测到失败,Kubernetes的重试次数,探测失败3次后认为是探测失败,就绪探测情况下的放弃Pod 会被打上未就绪的标签,默认值是3,最小值是1。

HTTP 探测器可以在 httpGet 上配置额外的字段:

host: #连接使用的主机名,默认是Pod的 IP,也可以在HTTP头中设置 “Host” 来代替。
scheme: http #用于设置连接主机的方式(HTTP 还是 HTTPS),默认是 HTTP。
path: /monitor/index.html #访问 HTTP 服务的路径。
httpHeaders: #请求中自定义的 HTTP 头,HTTP 头字段允许重复。
port: 80 #访问容器的端口号或者端口名,如果数字必须在 1 ~ 65535 之间。

案例:

startupProbe:

[root@k8s-master1 case3-Probe]# cat 5-startupProbe.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: myserver-myapp-frontend-deployment
  namespace: myserver
spec:
  replicas: 1
  selector:
    matchLabels: #rs or deployment
      app: myserver-myapp-frontend-label
    #matchExpressions:
    #  - {key: app, operator: In, values: [myserver-myapp-frontend,ng-rs-81]}
  template:
    metadata:
      labels:
        app: myserver-myapp-frontend-label
    spec:
      containers:
      - name: myserver-myapp-frontend-label
        image: nginx:1.20.2
        ports:
        - containerPort: 80
        startupProbe:
          httpGet:
            path: /index.html-1  # 启动探针,检测失败则不会启动,按照重启策略进行重启,成功前不会进行存活探针和就绪探针的检测
            port: 80
          initialDelaySeconds: 5 #首次检测延迟5s
          failureThreshold: 3  #从成功转为失败的次数
          periodSeconds: 3 #探测间隔周期


---
apiVersion: v1
kind: Service
metadata:
  name: myserver-myapp-frontend-service
  namespace: myserver
spec:
  ports:
  - name: http
    port: 81
    targetPort: 80
    nodePort: 40012
    protocol: TCP
  type: NodePort
  selector:
    app: myserver-myapp-frontend-label
# 检测失败,kubelet会杀死容器

在这里插入图片描述

[root@k8s-master1 case3-Probe]# cat 5-startupProbe.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: myserver-myapp-frontend-deployment
  namespace: myserver
spec:
  replicas: 1
  selector:
    matchLabels: #rs or deployment
      app: myserver-myapp-frontend-label
    #matchExpressions:
    #  - {key: app, operator: In, values: [myserver-myapp-frontend,ng-rs-81]}
  template:
    metadata:
      labels:
        app: myserver-myapp-frontend-label
    spec:
      containers:
      - name: myserver-myapp-frontend-label
        image: nginx:1.20.2
        ports:
        - containerPort: 80
        startupProbe:
          httpGet:
            path: /index.html
            port: 80
          initialDelaySeconds: 5 #首次检测延迟5s
          failureThreshold: 3  #从成功转为失败的次数
          periodSeconds: 3 #探测间隔周期


---
apiVersion: v1
kind: Service
metadata:
  name: myserver-myapp-frontend-service
  namespace: myserver
spec:
  ports:
  - name: http
    port: 81
    targetPort: 80
    nodePort: 30012
    protocol: TCP
  type: NodePort
  selector:
    app: myserver-myapp-frontend-label
[root@k8s-master1 case3-Probe]# kubectl apply -f 5-startupProbe.yaml 
# 验证,启动探针检测成功会成功运行pod

在这里插入图片描述

livenessProbe:

http类型:

[root@k8s-master1 case3-Probe]# cat 1-http-Probe.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: myserver-myapp-frontend-deployment
  namespace: myserver
spec:
  replicas: 1
  selector:
    matchLabels: #rs or deployment
      app: myserver-myapp-frontend-label
    #matchExpressions:
    #  - {key: app, operator: In, values: [myserver-myapp-frontend,ng-rs-81]}
  template:
    metadata:
      labels:
        app: myserver-myapp-frontend-label
    spec:
      containers:
      - name: myserver-myapp-frontend-label
        image: nginx:1.20.2
        ports:
        - containerPort: 80
        #readinessProbe:
        livenessProbe:  
          httpGet:
            #path: /monitor/monitor.html
            path: /index.html # 检测该路径文件是否存在,探测失败会重启pod
            port: 80
          initialDelaySeconds: 5
          periodSeconds: 3
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 3


---
apiVersion: v1
kind: Service
metadata:
  name: myserver-myapp-frontend-service
  namespace: myserver
spec:
  ports:
  - name: http
    port: 81
    targetPort: 80
    nodePort: 30012
    protocol: TCP
  type: NodePort
  selector:
    app: myserver-myapp-frontend-label
# 文件存在,启动正常

在这里插入图片描述

[root@k8s-master1 case3-Probe]# cat 1-http-Probe.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: myserver-myapp-frontend-deployment
  namespace: myserver
spec:
  replicas: 1
  selector:
    matchLabels: #rs or deployment
      app: myserver-myapp-frontend-label
    #matchExpressions:
    #  - {key: app, operator: In, values: [myserver-myapp-frontend,ng-rs-81]}
  template:
    metadata:
      labels:
        app: myserver-myapp-frontend-label
    spec:
      containers:
      - name: myserver-myapp-frontend-label
        image: nginx:1.20.2
        ports:
        - containerPort: 80
        #readinessProbe:
        livenessProbe:
          httpGet:
            path: /monitor/monitor.html # 路径不存在,存活探测失败会进行pod重启
            #path: /index.html
            port: 80
          initialDelaySeconds: 5
          periodSeconds: 3
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 3


---
apiVersion: v1
kind: Service
metadata:
  name: myserver-myapp-frontend-service
  namespace: myserver
spec:
  ports:
  - name: http
    port: 81
    targetPort: 80
    nodePort: 30012
    protocol: TCP
  type: NodePort
  selector:
    app: myserver-myapp-frontend-label
# 验证是否发生重启

在这里插入图片描述

TCPSocketAction类型:

[root@k8s-master1 case3-Probe]# cat 2-tcp-Probe.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: myserver-myapp-frontend-deployment
  namespace: myserver
spec:
  replicas: 1
  selector:
    matchLabels: #rs or deployment
      app: myserver-myapp-frontend-label
    #matchExpressions:
    #  - {key: app, operator: In, values: [myserver-myapp-frontend,ng-rs-81]}
  template:
    metadata:
      labels:
        app: myserver-myapp-frontend-label
    spec:
      containers:
      - name: myserver-myapp-frontend-label
        image: nginx:1.20.2
        ports:
        - containerPort: 80
        livenessProbe:
        #readinessProbe:
          tcpSocket:
            #port: 80
            port: 8080
          initialDelaySeconds: 5
          periodSeconds: 3
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 3


---
apiVersion: v1
kind: Service
metadata:
  name: myserver-myapp-frontend-service
  namespace: myserver
spec:
  ports:
  - name: http
    port: 81
    targetPort: 80
    nodePort: 30012
    protocol: TCP
  type: NodePort
  selector:
    app: myserver-myapp-frontend-label
# 验证,存活检测失败,pod发生重启

在这里插入图片描述

ExecAction类型:

[root@k8s-master1 case3-Probe]# cat 3-exec-Probe.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: myserver-myapp-redis-deployment
  namespace: myserver
spec:
  replicas: 1
  selector:
    matchLabels: #rs or deployment
      app: myserver-myapp-redis-label
    #matchExpressions:
    #  - {key: app, operator: In, values: [myserver-myapp-redis,ng-rs-81]}
  template:
    metadata:
      labels:
        app: myserver-myapp-redis-label
    spec:
      containers:
      - name: myserver-myapp-redis-container
        image: redis
        ports:
        - containerPort: 6379
        livenessProbe:
        #readinessProbe:
          exec:
            command:
            #- /apps/redis/bin/redis-cli
            - /usr/local/bin/redis-cli 
            - quit
          initialDelaySeconds: 5
          periodSeconds: 3
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 3
      
---
apiVersion: v1
kind: Service
metadata:
  name: myserver-myapp-redis-service
  namespace: myserver
spec:
  ports:
  - name: http
    port: 6379
    targetPort: 6379
    nodePort: 30016
    protocol: TCP
  type: NodePort
  selector:
    app: myserver-myapp-redis-label
[root@k8s-master1 case3-Probe]# kubectl apply -f 3-exec-Probe.yaml 
# 正常会重启,但是返回结果是未知的,探测失败,所以不会做任何操作

在这里插入图片描述

grpc类型:

[root@k8s-master1 case3-Probe]# cat 4.grpc-Probe.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: myserver-etcd-deployment
  namespace: myserver
spec:
  replicas: 1
  selector:
    matchLabels: #rs or deployment
      app: myserver-etcd-label
    #matchExpressions:
    #  - {key: app, operator: In, values: [myserver-etcd,ng-rs-81]}
  template:
    metadata:
      labels:
        app: myserver-etcd-label
    spec:
      containers:
      - name: myserver-etcd-label
        image: registry.cn-hangzhou.aliyuncs.com/zhangshijie/etcd:3.5.1-0
        command: [ "/usr/local/bin/etcd", "--data-dir",  "/var/lib/etcd", "--listen-client-urls", "http://0.0.0.0:2379", "--advertise-client-urls", "http://127.0.0.1:2379", "--log-level", "debug"]
        ports:
        - containerPort: 2379
        livenessProbe:
          grpc:
            port: 2378
          initialDelaySeconds: 10
          periodSeconds: 3
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 3
        readinessProbe:
          grpc:
            port: 2378
          initialDelaySeconds: 10
          periodSeconds: 3
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 3


---
apiVersion: v1
kind: Service
metadata:
  name: myserver-etcd-service
  namespace: myserver
spec:
  ports:
  - name: http
    port: 2379
    targetPort: 2379
    nodePort: 42379
    protocol: TCP
  type: NodePort
  selector:
    app: myserver-etcd-label
[root@k8s-master1 case3-Probe]# kubectl apply -f 4.grpc-Probe.yaml 
# 验证grpc探测失败后,存活检测重启pod,就绪检测会剔除server后端转发的pod

在这里插入图片描述

readinessProbe:

http类型:

[root@k8s-master1 case3-Probe]# cat 1-http-Probe.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: myserver-myapp-frontend-deployment
  namespace: myserver
spec:
  replicas: 1
  selector:
    matchLabels: #rs or deployment
      app: myserver-myapp-frontend-label
    #matchExpressions:
    #  - {key: app, operator: In, values: [myserver-myapp-frontend,ng-rs-81]}
  template:
    metadata:
      labels:
        app: myserver-myapp-frontend-label
    spec:
      containers:
      - name: myserver-myapp-frontend-label
        image: nginx:1.20.2
        ports:
        - containerPort: 80
        readinessProbe:
        #livenessProbe:
          httpGet:
            #path: /monitor/monitor.html
            path: /index.html
            port: 80
          initialDelaySeconds: 5
          periodSeconds: 3
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 3


---
apiVersion: v1
kind: Service
metadata:
  name: myserver-myapp-frontend-service
  namespace: myserver
spec:
  ports:
  - name: http
    port: 81
    targetPort: 80
    nodePort: 30012
    protocol: TCP
  type: NodePort
  selector:
    app: myserver-myapp-frontend-label
# 就绪探针成功,server后端正常转发给pod

在这里插入图片描述

[root@k8s-master1 case3-Probe]# cat 1-http-Probe.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: myserver-myapp-frontend-deployment
  namespace: myserver
spec:
  replicas: 1
  selector:
    matchLabels: #rs or deployment
      app: myserver-myapp-frontend-label
    #matchExpressions:
    #  - {key: app, operator: In, values: [myserver-myapp-frontend,ng-rs-81]}
  template:
    metadata:
      labels:
        app: myserver-myapp-frontend-label
    spec:
      containers:
      - name: myserver-myapp-frontend-label
        image: nginx:1.20.2
        ports:
        - containerPort: 80
        readinessProbe:
        #livenessProbe:
          httpGet:
            path: /monitor/monitor.html
            #path: /index.html
            port: 80
          initialDelaySeconds: 5
          periodSeconds: 3
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 3


---
apiVersion: v1
kind: Service
metadata:
  name: myserver-myapp-frontend-service
  namespace: myserver
spec:
  ports:
  - name: http
    port: 81
    targetPort: 80
    nodePort: 30012
    protocol: TCP
  type: NodePort
  selector:
    app: myserver-myapp-frontend-label
[root@k8s-master1 case3-Probe]# kubectl apply -f  1-http-Probe.yaml 
# 验证,就绪检测失败后会剔除service后端的转发

在这里插入图片描述

TCPSocketAction类型:

[root@k8s-master1 case3-Probe]# cat 2-tcp-Probe.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: myserver-myapp-frontend-deployment
  namespace: myserver
spec:
  replicas: 1
  selector:
    matchLabels: #rs or deployment
      app: myserver-myapp-frontend-label
    #matchExpressions:
    #  - {key: app, operator: In, values: [myserver-myapp-frontend,ng-rs-81]}
  template:
    metadata:
      labels:
        app: myserver-myapp-frontend-label
    spec:
      containers:
      - name: myserver-myapp-frontend-label
        image: nginx:1.20.2
        ports:
        - containerPort: 80
        #livenessProbe:
        readinessProbe:
          tcpSocket:
            #port: 80
            port: 8080
          initialDelaySeconds: 5
          periodSeconds: 3
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 3


---
apiVersion: v1
kind: Service
metadata:
  name: myserver-myapp-frontend-service
  namespace: myserver
spec:
  ports:
  - name: http
    port: 81
    targetPort: 80
    nodePort: 30012
    protocol: TCP
  type: NodePort
  selector:
    app: myserver-myapp-frontend-label
[root@k8s-master1 case3-Probe]# kubectl apply -f 2-tcp-Probe.yaml 
# 验证,8080端口检测失败,剔除service端点控制器对应的pod

在这里插入图片描述

postStart与preStop handlers简介:

https://kubernetes.io/zh/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/

当一个容器启动后,Kubernetes 将立即发送 postStart 事件;在容器被终结之前, Kubernetes 将发送一个 preStop 事件。容器可以为每个事件指定一个处理程序。

在这里插入图片描述

pod的终止流程:

https://cloud.google.com/blog/products/containers-kubernetes/kubernetes-best-practices-terminating-with-grace

1.创建pod
向API-Server提交创建请求、API-Server完成鉴权和准入并将事件写入etcd
kube-scheduler完成调度流程
kubelet创建并启动pod、然后执行postStart
周期进行livenessProbe
进入running状态
readinessProbe检测通过后,service关联pod
接受客户端请求
2.删除pod
向API-Server提交删除请求、API-Server完成鉴权和准入并将事件写入etcd
Pod被设置为”Terminating”状态、从service的Endpoints列表中删除并不再接受客户端请求。
pod执行PreStop
kubelet向pod中的容器发送SIGTERM信号(正常终止信号)终止pod里面的主进程,这个信号让容器知道自己很快将会被关闭
terminationGracePeriodSeconds: 60 #可选终止等待期(pod删除宽限期),如果有设置删除宽限时间,则等待宽限时间到期,否则最多等待30s,Kubernetes等待指定的时间称为优雅终止宽限期,默认情况下是30秒,值得注意的是等待期与preStop Hook和SIGTERM信号并行执行,即Kubernetes可能不会等待preStop Hook完成(最长30秒之后主进程还没有结束就就强制终止pod)。
SIGKILL信号被发送到Pod,并删除Pod

在这里插入图片描述

6.Kubernetes实战案例-PV/PVC简介、Kubernetes运行Zookeeper集群并基于PVC实现数据持久化;

1、基于StatefulSet+storageClassName实现
[root@k8s-master1 1.StatefulSet]# cat zk-cluster-StatefulSet.yaml 
apiVersion: v1
kind: Service
metadata:
  name: zookeeper-headless
  labels:
    app: zookeeper
spec:
  ports:
  - port: 2888
    name: server
  - port: 3888
    name: leader-election
  clusterIP: None
  selector:
    app: zookeeper
---
apiVersion: v1
kind: Service
metadata:
  name: zookeeper-service
  labels:
    app: zookeeper
spec:
  type: NodePort
  ports:
  - port: 2181
    name: client
  selector:
    app: zookeeper
---
#apiVersion: policy/v1beta1
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
  name: zookeeper-pdb
spec:
  selector:
    matchLabels:
      app: zookeeper
  maxUnavailable: 1
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: zookeeper
spec:
  selector:
    matchLabels:
      app: zookeeper
  serviceName: zookeeper-headless
  replicas: 3
  updateStrategy:
    type: RollingUpdate
  podManagementPolicy: OrderedReady
  template:
    metadata:
      labels:
        app: zookeeper
    spec:
      #affinity:
      #  podAntiAffinity:
      #    requiredDuringSchedulingIgnoredDuringExecution:
      #      - labelSelector:
      #          matchExpressions:
      #            - key: "app"
      #              operator: In
      #              values:
      #              - zookeeper
      #        topologyKey: "kubernetes.io/hostname"
      containers:
      - name: kubernetes-zookeeper
        imagePullPolicy: Always
        image: "registry.cn-hangzhou.aliyuncs.com/zhangshijie/zookeeper:v3.4.14-20230818"
        resources:
          limits:
            memory: "512Mi"
            cpu: "0.2"
          requests:
            memory: "512Mi"
            cpu: "0.2"
        ports:
        - containerPort: 2181
          name: client
        - containerPort: 2888
          name: server
        - containerPort: 3888
          name: leader-election
        command:
        - sh
        - -c
        - "start.sh \
          --servers=3 \
          --data_dir=/var/lib/zookeeper/data \
          --data_log_dir=/var/lib/zookeeper/data/log \
          --conf_dir=/opt/zookeeper/conf \
          --client_port=2181 \
          --election_port=3888 \
          --server_port=2888 \
          --tick_time=2000 \
          --init_limit=10 \
          --sync_limit=5 \
          --heap=512M \
          --max_client_cnxns=60 \
          --snap_retain_count=3 \
          --purge_interval=12 \
          --max_session_timeout=40000 \
          --min_session_timeout=4000 \
          --log_level=INFO"
        #readinessProbe:
        #  exec:
        #    command:
        #    - sh
        #    - -c
        #    - "ready_live.sh 2181"
        #  initialDelaySeconds: 10
        #  timeoutSeconds: 5
        #livenessProbe:
        #  exec:
        #    command:
        #    - sh
        #    - -c
        #    - "ready_live.sh 2181"
        #  initialDelaySeconds: 10
        #  timeoutSeconds: 5
        volumeMounts:
        - name: datadir
          mountPath: /var/lib/zookeeper
      securityContext:
        runAsUser: 1000
        fsGroup: 1000
  volumeClaimTemplates:
  - metadata:
      name: datadir
    spec:
      accessModes: [ "ReadWriteOnce" ]
      storageClassName: managed-nfs-storage 
      resources:
        requests:
          storage: 20Gi
  [root@k8s-master1 1.StatefulSet]# kubectl apply -f zk-cluster-StatefulSet.yaml 
  # 进入zookeeper集群验证
  [root@k8s-master1 1.StatefulSet]# kubectl exec -it zookeeper-0 bash
  zookeeper@zookeeper-0:/$ /usr/bin/zkServer.sh status

在这里插入图片描述
在这里插入图片描述

2、基于Deployment实现

# 构建镜像
[root@k8s-master1 zookeeper]# nerdctl pull registry.cn-hangzhou.aliyuncs.com/zhangshijie/slim_java:8
[root@k8s-master1 zookeeper]# bash build-command.sh v3.4.14
[root@k8s-deployer ~]# mkdir -p /data/k8sdata/magedu/zookeeper-datadir-1
[root@k8s-deployer ~]# mkdir -p /data/k8sdata/magedu/zookeeper-datadir-2
[root@k8s-deployer ~]# mkdir -p /data/k8sdata/magedu/zookeeper-datadir-3
[root@k8s-deployer ~]# cat /etc/exports
/data/k8sdata/magedu/zookeeper-datadir-1 *(rw,no_root_squash)
/data/k8sdata/magedu/zookeeper-datadir-2 *(rw,no_root_squash)
/data/k8sdata/magedu/zookeeper-datadir-3 *(rw,no_root_squash)
[root@k8s-deployer ~]# systemctl restart nfs-server
# 创建pv和pvc
[root@k8s-master1 pv]# cat zookeeper-persistentvolume.yaml 
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: zookeeper-datadir-pv-1
spec:
  capacity:
    storage: 20Gi
  accessModes:
    - ReadWriteOnce 
  nfs:
    server: 172.18.10.121
    path: /data/k8sdata/magedu/zookeeper-datadir-1 

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: zookeeper-datadir-pv-2
spec:
  capacity:
    storage: 20Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 172.18.10.121 
    path: /data/k8sdata/magedu/zookeeper-datadir-2 

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: zookeeper-datadir-pv-3
spec:
  capacity:
    storage: 20Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 172.18.10.121  
    path: /data/k8sdata/magedu/zookeeper-datadir-3 
[root@k8s-master1 pv]# cat zookeeper-persistentvolumeclaim.yaml 
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: zookeeper-datadir-pvc-1
  namespace: magedu
spec:
  accessModes:
    - ReadWriteOnce
  volumeName: zookeeper-datadir-pv-1
  resources:
    requests:
      storage: 10Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: zookeeper-datadir-pvc-2
  namespace: magedu
spec:
  accessModes:
    - ReadWriteOnce
  volumeName: zookeeper-datadir-pv-2
  resources:
    requests:
      storage: 10Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: zookeeper-datadir-pvc-3
  namespace: magedu
spec:
  accessModes:
    - ReadWriteOnce
  volumeName: zookeeper-datadir-pv-3
  resources:
    requests:
      storage: 10Gi
[root@k8s-master1 pv]# kubectl create namespace magedu
[root@k8s-master1 pv]# kubectl apply -f zookeeper-persistentvolumeclaim.yaml 
# 创建zk集群
[root@k8s-master1 zookeeper]# cat zookeeper.yaml 
apiVersion: v1
kind: Service
metadata:
  name: zookeeper
  namespace: magedu
spec:
  ports:
    - name: client
      port: 2181
  selector:
    app: zookeeper
---
apiVersion: v1
kind: Service
metadata:
  name: zookeeper1
  namespace: magedu
spec:
  type: NodePort        
  ports:
    - name: client
      port: 2181
      nodePort: 32181
    - name: followers
      port: 2888
    - name: election
      port: 3888
  selector:
    app: zookeeper
    server-id: "1"
---
apiVersion: v1
kind: Service
metadata:
  name: zookeeper2
  namespace: magedu
spec:
  type: NodePort        
  ports:
    - name: client
      port: 2181
      nodePort: 32182
    - name: followers
      port: 2888
    - name: election
      port: 3888
  selector:
    app: zookeeper
    server-id: "2"
---
apiVersion: v1
kind: Service
metadata:
  name: zookeeper3
  namespace: magedu
spec:
  type: NodePort        
  ports:
    - name: client
      port: 2181
      nodePort: 32183
    - name: followers
      port: 2888
    - name: election
      port: 3888
  selector:
    app: zookeeper
    server-id: "3"
---
kind: Deployment
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
metadata:
  name: zookeeper1
  namespace: magedu
spec:
  replicas: 1
  selector:
    matchLabels:
      app: zookeeper
  template:
    metadata:
      labels:
        app: zookeeper
        server-id: "1"
    spec:
      volumes:
        - name: data
          emptyDir: {}
        - name: wal
          emptyDir:
            medium: Memory
      containers:
        - name: server
          image: harbor.linuxarchitect.io/magedu/zookeeper:v3.4.14 
          imagePullPolicy: Always
          env:
            - name: MYID
              value: "1"
            - name: SERVERS
              value: "zookeeper1,zookeeper2,zookeeper3"
            - name: JVMFLAGS
              value: "-Xmx2G"
          ports:
            - containerPort: 2181
            - containerPort: 2888
            - containerPort: 3888
          volumeMounts:
          - mountPath: "/zookeeper/data"
            name: zookeeper-datadir-pvc-1 
      volumes:
        - name: zookeeper-datadir-pvc-1 
          persistentVolumeClaim:
            claimName: zookeeper-datadir-pvc-1
---
kind: Deployment
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
metadata:
  name: zookeeper2
  namespace: magedu
spec:
  replicas: 1
  selector:
    matchLabels:
      app: zookeeper
  template:
    metadata:
      labels:
        app: zookeeper
        server-id: "2"
    spec:
      volumes:
        - name: data
          emptyDir: {}
        - name: wal
          emptyDir:
            medium: Memory
      containers:
        - name: server
          image: harbor.linuxarchitect.io/magedu/zookeeper:v3.4.14 
          imagePullPolicy: Always
          env:
            - name: MYID
              value: "2"
            - name: SERVERS
              value: "zookeeper1,zookeeper2,zookeeper3"
            - name: JVMFLAGS
              value: "-Xmx2G"
          ports:
            - containerPort: 2181
            - containerPort: 2888
            - containerPort: 3888
          volumeMounts:
          - mountPath: "/zookeeper/data"
            name: zookeeper-datadir-pvc-2 
      volumes:
        - name: zookeeper-datadir-pvc-2
          persistentVolumeClaim:
            claimName: zookeeper-datadir-pvc-2
---
kind: Deployment
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
metadata:
  name: zookeeper3
  namespace: magedu
spec:
  replicas: 1
  selector:
    matchLabels:
      app: zookeeper
  template:
    metadata:
      labels:
        app: zookeeper
        server-id: "3"
    spec:
      volumes:
        - name: data
          emptyDir: {}
        - name: wal
          emptyDir:
            medium: Memory
      containers:
        - name: server
          image: harbor.linuxarchitect.io/magedu/zookeeper:v3.4.14 
          imagePullPolicy: Always
          env:
            - name: MYID
              value: "3"
            - name: SERVERS
              value: "zookeeper1,zookeeper2,zookeeper3"
            - name: JVMFLAGS
              value: "-Xmx2G"
          ports:
            - containerPort: 2181
            - containerPort: 2888
            - containerPort: 3888
          volumeMounts:
          - mountPath: "/zookeeper/data"
            name: zookeeper-datadir-pvc-3
      volumes:
        - name: zookeeper-datadir-pvc-3
          persistentVolumeClaim:
           claimName: zookeeper-datadir-pvc-3
[root@k8s-master1 zookeeper]# kubectl apply -f zookeeper.yaml
# 验证是否为集群

在这里插入图片描述

7.自定义镜像实现动静分离的web服务并结合NFS实现数据共享和持久化

# 构建基础镜像
[root@k8s-master1 centos]# bash build-command.sh 
# 构建jdk镜像
[root@k8s-master1 jdk-1.8.212]# cd /opt/zookeeper-case-n79/2.deployment/k8s-data/dockerfile/web/pub-images/jdk-1.8.212
[root@k8s-master1 jdk-1.8.212]# bash build-command.sh 
# 构建tomcat
[root@k8s-master1 tomcat-base-8.5.43]# cd /opt/zookeeper-case-n79/2.deployment/k8s-data/dockerfile/web/pub-images/tomcat-base-8.5.43
[root@k8s-master1 tomcat-base-8.5.43]# bash build-command.sh 
# 构建业务镜像
[root@k8s-master1 tomcat-base-8.5.43]# cd /opt/zookeeper-case-n79/2.deployment/k8s-data/dockerfile/web/magedu/tomcat-app1
[root@k8s-master1 tomcat-app1]# bash build-command.sh  v1
# 创建pod
[root@k8s-master1 tomcat-app1]# kubectl apply -f tomcat-app1.yaml 
deployment.apps/magedu-tomcat-app1-deployment created
service/magedu-tomcat-app1-service created
[root@k8s-master1 tomcat-app1]# cat tomcat-app1.yaml 
kind: Deployment
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
metadata:
  labels:
    app: magedu-tomcat-app1-deployment-label
  name: magedu-tomcat-app1-deployment
  namespace: magedu
spec:
  replicas: 1
  selector:
    matchLabels:
      app: magedu-tomcat-app1-selector
  template:
    metadata:
      labels:
        app: magedu-tomcat-app1-selector
    spec:
      containers:
      - name: magedu-tomcat-app1-container
        image: harbor.linuxarchitect.io/magedu/tomcat-app1:v1 
        #command: ["/apps/tomcat/bin/run_tomcat.sh"]
        imagePullPolicy: IfNotPresent
        #imagePullPolicy: Always
        ports:
        - containerPort: 8080
          protocol: TCP
          name: http
        env:
        - name: "password"
          value: "123456"
        - name: "age"
          value: "18"
        #resources:
        #  limits:
        #    cpu: 1
        #    memory: "512Mi"
        #  requests:
        #    cpu: 500m
        #    memory: "512Mi"
        volumeMounts:
        - name: magedu-images
          mountPath: /data/webapp/images
          readOnly: false
        - name: magedu-static
          mountPath: /data/webapp/static
          readOnly: false
      volumes:
      - name: magedu-images
        nfs:
          server: 172.18.10.121
          path: /data/k8sdata/magedu/images
      - name: magedu-static
        nfs:
          server: 172.18.10.121
          path: /data/k8sdata/magedu/static
#      nodeSelector:
#        project: magedu
#        app: tomcat
---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: magedu-tomcat-app1-service-label
  name: magedu-tomcat-app1-service
  namespace: magedu
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 8080
    nodePort: 30092
  selector:
    app: magedu-tomcat-app1-selector
[root@k8s-master1 tomcat-app1]# kubectl apply -f tomcat-app1.yaml 
#访问验证
[root@k8s-master1 tomcat-app1]# curl  172.18.10.122:30092/app1/index.html
tomcat app1 for magedu

# 部署前端服务
[root@k8s-master1 nginx-base]# cd /opt/zookeeper-case-n79/2.deployment/k8s-data/dockerfile/web/pub-images/nginx-base
[root@k8s-master1 nginx-base]# bash build-command.sh 
[root@k8s-master1 nginx]# cd /opt/zookeeper-case-n79/2.deployment/k8s-data/dockerfile/web/magedu/nginx
[root@k8s-master1 nginx]# bash build-command.sh v1
[root@k8s-master1 nginx]# cd /opt/zookeeper-case-n79/2.deployment/k8s-data/yaml/magedu/nginx
[root@k8s-master1 nginx]# cat nginx.yaml 
kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    app: magedu-nginx-deployment-label
  name: magedu-nginx-deployment
  namespace: magedu
spec:
  replicas: 1
  selector:
    matchLabels:
      app: magedu-nginx-selector
  template:
    metadata:
      labels:
        app: magedu-nginx-selector
    spec:
      containers:
      - name: magedu-nginx-container
        image: harbor.linuxarchitect.io/magedu/nginx-web1:v1
        #command: ["/apps/tomcat/bin/run_tomcat.sh"]
        #imagePullPolicy: IfNotPresent
        imagePullPolicy: Always
        ports:
        - containerPort: 80
          protocol: TCP
          name: http
        - containerPort: 443
          protocol: TCP
          name: https
        env:
        - name: "password"
          value: "123456"
        - name: "age"
          value: "20"
        resources:
          limits:
            cpu: 500m
            memory: 512Mi
          requests:
            cpu: 500m
            memory: 256Mi

        volumeMounts:
        - name: magedu-images
          mountPath: /usr/local/nginx/html/webapp/images
          readOnly: false
        - name: magedu-static
          mountPath: /usr/local/nginx/html/webapp/static
          readOnly: false
      volumes:
      - name: magedu-images
        nfs:
          server: 172.18.10.121
          path: /data/k8sdata/magedu/images 
      - name: magedu-static
        nfs:
          server: 172.18.10.121
          path: /data/k8sdata/magedu/static
      #nodeSelector:
      #  group: magedu

    

---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: magedu-nginx-service-label
  name: magedu-nginx-service
  namespace: magedu
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 80
    nodePort: 30090
  - name: https
    port: 443
    protocol: TCP
    targetPort: 443
    nodePort: 30091
  selector:
    app: magedu-nginx-selector
[root@k8s-master1 nginx]# kubectl apply -f nginx.yaml 
# 测试验证
[root@k8s-deployer static]# echo "1111111111111111111112" > index.html
[root@k8s-master1 nginx]# curl 172.18.10.124:30090/webapp/static/
1111111111111111111112

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值