k8s 修炼手册

deployment

apiVersion: apps/v1
kind: Deployment
metadata:
  name: hello-deploy
spec:
  replicas: 10
  selector:
    matchLabels:
      app: hello-world # Pod的label # 这个Label与Service的Label筛选器是匹配的
  revisionHistoryLimit: 5
  progressDeadlineSeconds: 300
  minReadySeconds: 10 # 每个pod更新动作间隔10s
  strategy:
    type: RollingUpdate # 使用RollingUpdate方式更新
    rollingUpdate: # 智能同时更新最多9个
      maxUnavailable: 1 # 不允许出现比期望状态指定的pod数量少超过一个的情况
      maxSurge: 1 # 不允许出现比期望状态指定的pod数量多超过一个的情况 
  template:
    metadata:
      labels:
        app: hello-world
    spec:
      containers:
      - name: hello-pod
        image: nigelpoulton/k8sbook:1.0
        ports:
        - containerPort: 8080

创建对应svc

apiVersion: v1
kind: Service
metadata:
  name: hello-svc
  labels:
    app: hello-world # Label 筛选器 service正在查找带有app=hello-world的Pod
spec:
  type: NodePort
  ports:
  - port: 8080
    nodePort: 30001
    protocol: TCP
  selector:
    app: hello-world

滚动更新查看命令

kubectl rollout status

查看历史版本

kubectl rollout history

查看更新后rs

kubectl get rs

根据版本信息回滚

kubectl rollout undo deployment hello-deploy --to-revision=1

Service

apiVersion: apps/v1
kind: Deployment
metadata:
  name: web-deploy
spec:
  replicas: 10
  selector:
    matchLabels:
      app: hello-world
  template:
    metadata:
      labels:
        app: hello-world
    spec:
      containers:
      - name: hello-ctr
        image: nigelpoulton/k8sbook:latest
        ports:
        - containerPort: 8080

命令行创建svc

kubectl expose deployment web-deploy --name=hello-svc --target-port=8080 --type=NodePort

查看svc

kubectl describe svc hello-svc

[root@master svc]# kubectl describe svc hello-svc
Name:                     hello-svc
Namespace:                default
Labels:                   <none>
Annotations:              <none>
Selector:                 app=hello-world # Label筛选器定义的Label
Type:                     NodePort
IP Family Policy:         SingleStack
IP Families:              IPv4
IP:                       10.12.205.3 # svc内部的ClusterIP(VIP)
IPs:                      10.12.205.3
Port:                     <unset>  8080/TCP
TargetPort:               8080/TCP # 应用正在监听的pod端口
NodePort:                 <unset>  32458/TCP # 集群外可访问的svc端口
Endpoints:                10.244.104.59:8080,10.244.104.60:8080,10.244.104.61:8080 + 7 more... # 能够匹配到Label筛选器的健康Pod的IP动态列表
Session Affinity:         None
External Traffic Policy:  Cluster
Events:                   <none>

声明式创建svc

apiVersion: v1
kind: Service
metadata:
  name: hello-svc
  labels:
    chapter: services
spec:
# ipFamilyPolicy: PreferDualStack
# ipFamilies:
# - IPv4
# - IPv6
  type: NodePort
  ports:
  - port: 8080
    nodePort: 30001
    targetPort: 8080
    protocol: TCP
  selector:
    app: hello-world

查看svc

kubectl get svc hello-svc
kubectl describe svc hello-svc

查看Endpoint

kubectl get ep hello-svc

滚动更新

原始状态

Service app=biz1 zone=pord
Pod1 app=biz1 zone=pord ver=4.1 Pod2 app=biz1 zone=pord ver=4.1

更新中

pod label 有ver 这时候新旧版本pod svc都提供服务

Service app=biz1 zone=pord
Pod1 app=biz1 zone=pord ver=4.1
Pod2 app=biz1 zone=pord ver=4.1
Pod3 app=biz1 zone=pord ver=4.2
Pod4 app=biz1 zone=pord ver=4.2

更新后

svc 添加label ver=4.2 此时svc流量至通向新版本pod 修改为ver=4.1 svc流量则通向旧版本

Service app=biz1 zone=pord ver=4.2
Pod1 app=biz1 zone=pord ver=4.1 Pod2 app=biz1 zone=pord ver=4.1 Pod3 app=biz1 zone=pord ver=4.2 Pod4 app=biz1 zone=pord ver=4.2

服务发现及注册

kubectl get service
kubectl get endpoint

服务注册

    1. post Service 配置到API Service
    1. 分配ClutserIP
    1. 配置持久化到集群存储
    1. 维护有Pod IP的Endpoint被创建
    1. 集群DNS发现新的Service
    1. 创建DNS记录
    1. kube-proxy拉取Service的配置
    1. 创建IPVS规则进行负载均衡

服务发现

    1. 请求DNS解析Service名称
    1. 收到ClusterIP
    1. 发送流量到ClusterIP
    1. 无路由,发送至容器的默认网关
    1. 转发至节点
    1. 无路由,发送至容器的默认网关
    1. 被节点内核处理
    1. 捕获(IPVS规则)
    1. 将目标IP的值重写为Pod的IP
apiVersion: v1
kind: Namespace
metadata:
  name: dev
---
apiVersion: v1
kind: Namespace
metadata:
  name: prod
---
kind: Deployment
apiVersion: apps/v1
metadata:
  name: enterprise
  namespace: dev
  labels:
    app: enterprise
spec:
  selector:
    matchLabels:
      app: enterprise
  replicas: 2
  template:
    metadata:
      labels:
        app: enterprise
    spec:
      terminationGracePeriodSeconds: 1
      containers:
      - image: nigelpoulton/k8sbook:text-dev
        name: enterprise-ctr
        ports:
        - containerPort: 8080
---
kind: Deployment
apiVersion: apps/v1
metadata:
  name: enterprise
  namespace: prod
  labels:
    app: enterprise
spec:
  selector:
    matchLabels:
      app: enterprise
  replicas: 2
  template:
    metadata:
      labels:
        app: enterprise
    spec:
      terminationGracePeriodSeconds: 1
      containers:
      - image: nigelpoulton/k8sbook:text-prod
        name: enterprise-ctr
        ports:
        - containerPort: 8080
---
apiVersion: v1
kind: Service
metadata:
  name: ent
  namespace: dev
spec:
  ports:
  - port: 8080
  selector:
    app: enterprise
---
apiVersion: v1
kind: Service
metadata:
  name: ent
  namespace: prod
spec:
  ports:
  - port: 8080
  selector:
    app: enterprise
---
apiVersion: v1
kind: Pod
metadata:
  name: jump
  namespace: dev
spec:
  terminationGracePeriodSeconds: 5
  containers:
  - image: ubuntu
    name: jump
    tty: true
    stdin: true

[root@master ~]# kubectl get all -n dev
NAME                             READY   STATUS    RESTARTS   AGE
pod/enterprise-76fc64bd9-h5gqg   1/1     Running   0          3h20m
pod/enterprise-76fc64bd9-kpxh9   1/1     Running   0          3h20m
pod/jump                         1/1     Running   0          3h20m

NAME          TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)    AGE
service/ent   ClusterIP   10.7.27.61   <none>        8080/TCP   3h20m

NAME                         READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/enterprise   2/2     2            2           3h20m

NAME                                   DESIRED   CURRENT   READY   AGE
replicaset.apps/enterprise-76fc64bd9   2         2         2       3h20m


[root@master ~]# kubectl get all -n prod
NAME                              READY   STATUS    RESTARTS   AGE
pod/enterprise-5cfcd578d7-lknbj   1/1     Running   0          3h27m
pod/enterprise-5cfcd578d7-mwzcb   1/1     Running   0          3h27m

NAME          TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)    AGE
service/ent   ClusterIP   10.2.20.188   <none>        8080/TCP   3h27m

NAME                         READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/enterprise   2/2     2            2           3h27m

NAME                                    DESIRED   CURRENT   READY   AGE
replicaset.apps/enterprise-5cfcd578d7   2         2         2       3h27m

root@master ~]# kubectl exec -it jump -n dev -- bash

root@jump:/# cat /etc/resolv.conf 
nameserver 10.0.0.10
search dev.svc.cluster.local svc.cluster.local cluster.local
options ndots:5

root@jump:/# apt-get update &&u apt-get install curl -y

root@jump:/# curl ent:8080
Hello from the DEV Namespace!
Hostname: enterprise-76fc64bd9-h5gqg

root@jump:/# curl ent.dev.svc.cluster.local:8080
Hello from the DEV Namespace!
Hostname: enterprise-76fc64bd9-h5gqg

root@jump:/# curl ent.prod.svc.cluster.local:8080
Hello from the PROD Namespace!
Hostname: enterprise-5cfcd578d7-mwzcb

# pod内curl其他pod及端口
# serviceName.namespace.svc.cluster.local:port

服务排查

  • Pod: 由coredns Deployment管理
  • Service: 一个名为kube-dns的ClusterIP Service,其监听端口为TCP/UDP 53
  • Endpoint: 也叫做kube-dns
    所有与集群DNS相关的对象都有k8s-app=kube-dns的Label
  1. 首先排查coredns Deployment机器管理的Pod试运行状态的
[root@master ~]# kubectl get deploy -n kube-system -l k8s-app=kube-dns
NAME      READY   UP-TO-DATE   AVAILABLE   AGE
coredns   2/2     2            2           33d

[root@master ~]# kubectl get pods -n kube-system -l k8s-app=kube-dns
NAME                       READY   STATUS    RESTARTS        AGE
coredns-857d9ff4c9-6cb2b   1/1     Running   28 (3d5h ago)   33d
coredns-857d9ff4c9-tvrff   1/1     Running   28 (3d5h ago)   33d

[root@master ~]# kubectl logs -n kube-system coredns-857d9ff4c9-6cb2b 
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
[INFO] plugin/ready: Still waiting on: "kubernetes"
[INFO] plugin/ready: Still waiting on: "kubernetes"
[INFO] plugin/kubernetes: waiting for Kubernetes API before starting server
.:53
[INFO] plugin/reload: Running configuration SHA512 = 591cf328cccc12bc490481273e738df59329c62c0b729d94e8b61db9961c2fa5f046dd37f1cf888b953814040d180f52594972691cd6ff41be96639138a43908
CoreDNS-1.11.1
linux/amd64, go1.20.7, ae2bbc2
  1. 查看Service机器Endpoint对象 保证ClutserIP由IP地址病监听TCP/UDP 53端口
[root@master ~]# kubectl get svc kube-dns -n kube-system
NAME       TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)                  AGE
kube-dns   ClusterIP   10.0.0.10    <none>        53/UDP,53/TCP,9153/TCP   33d

[root@master ~]# kubectl get ep kube-dns -n kube-system
NAME       ENDPOINTS                                                        AGE
kube-dns   10.244.219.68:53,10.244.219.69:53,10.244.219.68:53 + 3 more...   33d
  1. 确定DNS组件都正常后,使用 gcr.io/kubernetes-e2e-test-images/dnsutils:latest 镜像
    镜像包含ping traceroute curl dig nslookup命令
apt install iputils-ping  -y
apt install dnsutils
apt install traceroute
apt install nslookup


root@ubuntu-pod:/# nslookup kubernetes
# 返回
;; Got recursion not available from 10.0.0.10
Server:         10.0.0.10
Address:        10.0.0.10#53

Name:   kubernetes.default.svc.cluster.local
Address: 10.0.0.1
;; Got recursion not available from 10.0.0.10

volume

nfs

yum install -y nfs-common nfs-utils rpcbind
mkdir /nfsdata
chmod 666 /nfsdata
chown nfsnobody /nfsdata
chgrp nfsnobody /nfsdata # 没有nfsnobody 使用nobody
cat /etc/exports
/nfsdata *(rw,no_root_squash,no_all_squash,sync)
systemctl restart nfs-server
systemctl restart rpcbind
[root@master script]# ssh node1
[root@node1 ~]# mount -t nfs master:/nfsdata /nfsdata
[root@node1 ~]# cat /etc/fstab
10.0.17.100:/nfsdata /nfsdata nfs default,_netdev 0 0

pv

apiVersion: v1
kind: PersistentVolume
metadata:
 name: nfspv0 # pv 名字
spec:
 capacity: #容量
  storage: 10Gi #存储空间
 accessModes: #存储模式
  - ReadWriteOnce #单个节点读写模式,即卷可以被一个节点以读写方式挂载 块存储只支持RWO
  # - ReadWriteMany #多个节点读写模式,即卷可以被多个节点以读写方式挂载 NFS
  # - ReadOnlyMany #只读方式绑定多个PVC
 persistentVolumeReclaimPolicy: Recycle #持久卷回收策略
 storageClassName: nfs # 存储类的名字
 nfs:
  path: /nfsdata/share # nfs共享路径
  server: 10.0.17.100 # nfs服务器地址
[root@master volume]# kubectl get pv
NAME     CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM             STORAGECLASS   VOLUMEATTRIBUTESCLASS   REASON   AGE
nfspv0   10Gi       RWO            Recycle          Bound    default/nfspvc0   nfs            <unset>

pvc

apiVersion: v1
kind: PersistentVolumeClaim
metadata:
 name: nfspvc0 # pv 名字
spec:
 accessModes: #存储模式
  - ReadWriteOnce #单个节点读写模式,即卷可以被一个节点以读写方式挂载 块存储只支持RWO
  # - ReadWriteMany #多个节点读写模式,即卷可以被多个节点以读写方式挂载 NFS
  # - ReadOnlyMany #只读方式绑定多个PVC
 storageClassName: nfs # 存储类的名字
 resources: 
  requests: 
   storage: 5Gi
[root@master volume]# kubectl get pvc
NAME      STATUS   VOLUME   CAPACITY   ACCESS MODES   STORAGECLASS   VOLUMEATTRIBUTESCLASS   AGE
nfspvc0   Bound    nfspv0   10Gi       RWO            nfs            <unset>                 11m

创建ubuntu 系统/data绑定nfspvc0

apiVersion: v1
kind: Pod
metadata:
  name: volpod
spec:
  volumes:
  - name: data
    persistentVolumeClaim:
      claimName: nfspvc0 # 使用的pvc
  containers:
  - name: ubuntu-ctr
    image: ubuntu:latest
    command:
    - /bin/bash
    - "-c"
    - "sleep 60m"
    volumeMounts:
    - mountPath: /data # ubuntu系统挂载点
      name: data
[root@master volume]# kubectl exec -it volpod -- bash
root@volpod:/# cd /data/
root@volpod:/data# ls
1  3  8716283  876  default
# 进入/data目录发现/nfsdata/share中的文件

StorageClass

yum install -y nfs-utils rpcbind
mkdir /nfsdata/share
chown nobody /nfsdata/share

echo "/nfsdata/share   *(rw,sync,no_subtree_check)" >> /etc/exports
systemctl enable nfs-server && systemctl enable nfs-server
systemctl restart nfs-server && systemctl restart nfs-server
showmount -e master

部署nfs-client-provisioner

vim nfs-client-provisioner.yaml
kind: Deployment
apiVersion: apps/v1
metadata:
 name: nfs-client-provisioner
 namespace: nfs-storageclass
spec:
 replicas: 1
 selector:
  matchLabels:
   app: nfs-client-provisioner
 strategy:
  type: Recreate
 template:
  metadata:
   labels:
    app: nfs-client-provisioner
  spec:
   serviceAccountName: nfs-client-provisioner
   containers:
    - name: nfs-client-provisioner
      # image: registry.k8s.io/sig-storage/nfs-subdir-external-provisioner:v4.0.2
      # image: k8s.dockerproxy.com/sig-storage/nfs-subdir-external-provisioner:v4.0.2
      image: registry.cn-beijing.aliyuncs.com/blice_haiwai/nfs-subdir-external-provisioner:v4.0.2
      volumeMounts:
      - name: nfs-client-root
        mountPath: /persistentvolumes
      env:
      - name: PROVISIONER_NAME
        value: k8s-sigs.io/nfs-subdir-external-provisioner
      - name: NFS_SERVER
       # value: <YOUR NFS SERVER HOSTNAME>
        value: 10.0.17.100
      - name: NFS_PATH
       # value: /var/nfs
        value: /nfsdata/share
   volumes:
    - name: nfs-client-root
      nfs:
       # server: <YOUR NFS SERVER HOSTNAME>
       server: 10.0.17.100
       # share nfs path
       path: /nfsdata/share

ca认证

vim RBAC.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
 name: nfs-client-provisioner
 # replace with namespace where provisioner is deployed
 namespace: nfs-storageclass
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
 name: nfs-client-provisioner-runner
rules:
 - apiGroups: [""]
   resources: ["nodes"]
   verbs: ["get", "list", "watch"]
 - apiGroups: [""]
   resources: ["persistentvolumes"]
   verbs: ["get", "list", "watch", "create", "delete"]
 - apiGroups: [""]
   resources: ["persistentvolumeclaims"]
   verbs: ["get", "list", "watch", "update"]
 - apiGroups: ["storage.k8s.io"]
   resources: ["storageclasses"]
   verbs: ["get", "list", "watch"]
 - apiGroups: [""]
   resources: ["events"]
   verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
 name: run-nfs-client-provisioner
subjects:
#创建名字空间
 - kind: ServiceAccount
   name: nfs-client-provisioner
   # replace with namespace where provisioner is deployed
   namespace: nfs-storageclass
roleRef:
 kind: ClusterRole
 name: nfs-client-provisioner-runner
 apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
 name: leader-locking-nfs-client-provisioner
 # replace with namespace where provisioner is deployed
 namespace: nfs-storageclass
rules:
 - apiGroups: [""]
   resources: ["endpoints"]
   verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
 name: leader-locking-nfs-client-provisioner
 # replace with namespace where provisioner is deployed
 namespace: nfs-storageclass
subjects:
 - kind: ServiceAccount
   name: nfs-client-provisioner
   # replace with namespace where provisioner is deployed
   namespace: nfs-storageclass
roleRef:
 kind: Role
 name: leader-locking-nfs-client-provisioner
 apiGroup: rbac.authorization.k8s.io

创建StorageClass

vim StorageClass.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
 name: nfs-client
 namespace: nfs-storageclass
provisioner: k8s-sigs.io/nfs-subdir-external-provisioner
parameters:
 pathPattern: ${.PVC.namespace}/${.PVC.name}
 onDelete: delete #删除模式

测试pod

vim test.yaml
kind: PersistentVolumeClaim # 创建pvc
apiVersion: v1
metadata:
 name: test-claim
 annotations: 
spec:
 accessModes:
   - ReadWriteMany # 多节点读写
 resources:
   requests:
     storage: 1Mi # 请求资源大小1MB
 storageClassName: nfs-client # 存储类名字
---
kind: Pod
apiVersion: v1
metadata:
 name: test-pod
spec:
 containers:
 - name: test-pod
   image: wangyanglinux/myapp:v1.0
   volumeMounts:
     - name: nfs-pvc
       mountPath: "/usr/local/nginx/html"
 restartPolicy: "Never"
 volumes: # 定义卷
   - name: nfs-pvc #pvc提供的卷
     persistentVolumeClaim:
       claimName: test-claim
[root@master ~]# ls /nfsdata/share/default/test-claim/
hostname.html  pppppp

cm

ConfigMap通常用于存储如下非敏感数据

  • 环境变量的值
  • 整个配置文件(比如Web Server的配置和数据库的配置)
  • 主机名(hostname)
  • 服务端口(Service Port)
  • 账号名称(Account name)

格式:
key: value

主容器的方式

  • 环境变量
  • 容器启动命令参数
  • 某个卷(volume)上的文件 最灵活的方式

命令方式创建

# --from-literal 表示字面key=value
kubectl create configmap test1map \
--from-literal shortname=msb.com \
--from-literal longname=magicsandbox.com

[root@master ~]# kubectl get cm test1map 
NAME       DATA   AGE
test1map   2      14s

[root@master ~]# kubectl describe cm test1map 
Name:         test1map
Namespace:    default
Labels:       <none>
Annotations:  <none>

Data
====
longname:
----
magicsandbox.com
shortname:
----
msb.com

BinaryData
====

Events:  <none>



# --from-file 表示通过文件创建
[root@master cm]# kubectl create cm testmap2 --from-file test.txt 
configmap/testmap2 created
[root@master cm]# kubectl describe cm testmap2 
Name:         testmap2
Namespace:    default
Labels:       <none>
Annotations:  <none>

Data
====
test.txt:
----
ConfigMap,HelloWorld!


BinaryData
====

Events:  <none>

[root@master cm]# kubectl get cm testmap2 -o yaml
apiVersion: v1
data:
  test.txt: |
    ConfigMap,HelloWorld!
kind: ConfigMap
metadata:
  creationTimestamp: "2024-09-25T01:34:00Z"
  name: testmap2
  namespace: default
  resourceVersion: "719604"
  uid: 6c0fd794-5e89-40de-b3a3-74c02799f9cd

声明式创建

kind: ConfigMap
apiVersion: v1
metadata:
  name: multimap
data: 
  given: Nigel
  family: Poulton
[root@master cm]# kubectl apply -f multimap.yaml 
configmap/multimap created

[root@master cm]# kubectl describe cm multimap 
Name:         multimap
Namespace:    default
Labels:       <none>
Annotations:  <none>

Data
====
family:
----
Poulton
given:
----
Nigel

BinaryData
====

Events:  <none>

定义只有一个entry的map

# entry为test.conf | 后面的所有内容需要座位一个字面值看待
key: test.conf
value:     env = plex-test endpoint = 0.0.0.0:31001 char = utf8 vault = PLEX/test log-size = 512M


kind: ConfigMap
apiVersion: v1
metadata:
  name: singlemap
data: 
  test.conf: |
    env = plex-test
    endpoint = 0.0.0.0:31001
    char = utf8
    vault = PLEX/test
    log-size = 512M
[root@master cm]# kubectl describe cm singlemap 
Name:         singlemap
Namespace:    default
Labels:       <none>
Annotations:  <none>

Data
====
test.conf:
----
env = plex-test
endpoint = 0.0.0.0:31001
char = utf8
vault = PLEX/test
log-size = 512M


BinaryData
====

Events:  <none>

作为环境变量

kind: ConfigMap
apiVersion: v1
metadata:
  name: multimap
data:
  given: Nigel
  family: Poulton
---
apiVersion: v1
kind: Pod
metadata:
  labels:
    chapter: configmaps
  name: envpod
spec:
  containers:
    - name: ctr1
      image: busybox
      command: ["sleep"]
      args: ["infinity"]
      env:  # 环境变量配置
        - name: FIRSTNAME # 环境变量名称
          valueFrom:
            configMapKeyRef:
              name: multimap # 需要引用的cm名称
              key: given # 需要引用的key 值为Nigel
        - name: LASTNAME
          valueFrom:
            configMapKeyRef:
              name: multimap
              key: family

查看pod环境变量

[root@master cm]# kubectl exec envpod -- env | grep NAME
HOSTNAME=envpod
FIRSTNAME=Nigel
LASTNAME=Poulton

作为容器的启动命令

kind: ConfigMap
apiVersion: v1
metadata:
  name: multimap
data:
  given: Nigel
  family: Poulton
---
apiVersion: v1
kind: Pod
metadata:
  name: startup-pod
  labels:
    chapter: configmaps
spec:
  restartPolicy: OnFailure
  containers:
    - name: args1
      image: busybox
      command: [ "/bin/sh", "-c", "echo First name $(FIRSTNAME) last name $(LASTNAME)", "wait" ] # 输出环境变量中的$(FIRSTNAME)和$(LASTNAME)
      env: # 以环境变量的方式
        - name: FIRSTNAME
          valueFrom:
            configMapKeyRef:
              name: multimap
              key: given
        - name: LASTNAME
          valueFrom:
            configMapKeyRef:
              name: multimap
              key: family
[root@master cm]# kubectl describe pod startup-pod
    Environment:
      FIRSTNAME:  <set to the key 'given' of config map 'multimap'>   Optional: false
      LASTNAME:   <set to the key 'family' of config map 'multimap'>  Optional: false
[root@master cm]# kubectl logs startup-pod 
First name Nigel last name Poulton

ConfigMap与Volume

步骤

  1. 创建ConfigMap
  2. 在Pod模版中创建一个ConfigMap卷
  3. 将ConfigMap卷挂载到容器中
  4. ConfigMap中的entry会分别作为单独文件出现在容器中
kind: ConfigMap
apiVersion: v1
metadata:
  name: multimap
data:
  given: Nigel
  family: Poulton
---
apiVersion: v1
kind: Pod
metadata:
  labels:
    chapter: configmaps
  name: volmap
spec:
  volumes:
  - name: volmap
    configMap:
      name: multimap
  containers:
  - name: ctr1
    image: ubuntu
    command: [ "sleep" ]
    args: [ "3600" ]
    volumeMounts:
    - name: volmap
      mountPath: /etc/name
kubectl exec -it volmap -- bash
root@volmap:~# ll /etc/name
total 0
drwxrwxrwx 3 root root 87 Sep 25 02:49 ./
drwxr-xr-x 1 root root 18 Sep 25 02:49 ../
drwxr-xr-x 2 root root 33 Sep 25 02:49 ..2024_09_25_02_49_39.2473126764/
lrwxrwxrwx 1 root root 32 Sep 25 02:49 ..data -> ..2024_09_25_02_49_39.2473126764/
lrwxrwxrwx 1 root root 13 Sep 25 02:49 family -> ..data/family
lrwxrwxrwx 1 root root 12 Sep 25 02:49 given -> ..data/given

StatefulSet(部署有状态应用 会话数据 数据库)

StatefulSet特点

  • Pod的名字是可预知和保持不变的
  • DNS主机名是可预知和保持不变的
  • 卷绑定是可预知和保持不变的

创建StatefulSet

首先依照volume章创建StorageClass

后创建governing headless Service 无头服务 管理该StatefulSet所有DNS子域名

# Headless Service for StatefulSet Pod DNS names
apiVersion: v1
kind: Service
metadata:
  name: dullahan
  labels:
    app: web
spec:
  ports:
  - port: 80
    name: web
  clusterIP: None
  selector:
    app: web

部署StatefulSet

apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: tkb-sts # StatefulSet的名字 所有pod都基于tkb-sts
spec:
  replicas: 3 # 定义了三个副本 tkb-sts-0 tkb-sts-1 tkb-sts-2 依续创建
  selector:
    matchLabels:
      app: web
  serviceName: "dullahan"  # 指定 governing Service 名字为上面创建的Service
  template: # 定义Pod模版
    metadata:
      labels:
        app: web
    spec:
      terminationGracePeriodSeconds: 10
      containers:
      - name: ctr-web
        image: nginx:latest
        ports:
        - containerPort: 80
          name: web
        volumeMounts:
        - name: webroot
          mountPath: /usr/share/nginx/html
  volumeClaimTemplates: # 卷申请模板 每次创建新Pod是,自动创建pvc 自动命名
  - metadata:
      name: webroot
    spec:
      accessModes: [ "ReadWriteOnce" ]
      storageClassName: "nfs-client" # StorageClass名字
      resources:
        requests:
          storage: 1Gi

[root@master calico]# kubectl get sts
NAME      READY   AGE
tkb-sts   3/3     14m
[root@master calico]# kubectl get pods -o wide
NAME        READY   STATUS    RESTARTS   AGE     IP               NODE    NOMINATED NODE   READINESS GATES
tkb-sts-0   1/1     Running   0          11m     10.244.166.130   node1   <none>           <none>
tkb-sts-1   1/1     Running   0          7m47s   10.244.104.0     node2   <none>           <none>
tkb-sts-2   1/1     Running   0          7m10s   10.244.104.1     node2   <none>           <none>
[root@master calico]# kubectl get pv
NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM                       STORAGECLASS   VOLUMEATTRIBUTESCLASS   REASON   AGE
pvc-381d656d-342f-4276-aa95-7af89ea75ea3   1Gi        RWO            Delete           Bound    default/webroot-tkb-sts-2   nfs-client     <unset>                          9m50s
pvc-c5f418d8-90b7-40aa-adeb-e5cb63ce9cf6   1Gi        RWO            Delete           Bound    default/webroot-tkb-sts-0   nfs-client     <unset>                          14m
pvc-d9004ba6-3a5f-4251-b741-21e2adc8a9a3   1Gi        RWO            Delete           Bound    default/webroot-tkb-sts-1   nfs-client     <unset>                          10m

[root@master calico]# kubectl get pvc
NAME                STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   VOLUMEATTRIBUTESCLASS   AGE
webroot-tkb-sts-0   Bound    pvc-c5f418d8-90b7-40aa-adeb-e5cb63ce9cf6   1Gi        RWO            nfs-client     <unset>                 14m
webroot-tkb-sts-1   Bound    pvc-d9004ba6-3a5f-4251-b741-21e2adc8a9a3   1Gi        RWO            nfs-client     <unset>                 10m
webroot-tkb-sts-2   Bound    pvc-381d656d-342f-4276-aa95-7af89ea75ea3   1Gi        RWO            nfs-client     <unset>                 10m

[root@master ~]# kubectl get svc -o wide
NAME         TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)          AGE   SELECTOR
dullahan     ClusterIP   None           <none>        80/TCP           53m   app=web

对点测试
部署jumppod

apiVersion: v1
kind: Pod
metadata:
  name: jump-pod
spec:
  terminationGracePeriodSeconds: 1
  containers:
  - image: nigelpoulton/curl:1.0
    name: jump-ctr
    tty: true
    stdin: true

进入jump测试


[root@master ~]# kubectl exec -it jump-pod -- bash
root@jump-pod:/# dig SRV dullahan.default.svc.cluster.local
;; ANSWER SECTION:
dullahan.default.svc.cluster.local. 30 IN SRV   0 33 80 tkb-sts-0.dullahan.default.svc.cluster.local.
dullahan.default.svc.cluster.local. 30 IN SRV   0 33 80 tkb-sts-1.dullahan.default.svc.cluster.local.
dullahan.default.svc.cluster.local. 30 IN SRV   0 33 80 tkb-sts-2.dullahan.default.svc.cluster.local.

解析不到删除pod coredns 让k8s自愈
新建一个ubuntu pod

apiVersion: v1
kind: Pod
metadata:
  name: ubuntu-pod
  labels:
    app: web # svc提供服务标识
spec:
  containers:
  - name: ubuntu
    image: ubuntu
    command: ["sleep"]
    args: ["3600"]
    ports:
        - containerPort: 80
          name: web
root@jump-pod:/# dig SRV dullahan.default.svc.cluster.local
tkb-sts-1.dullahan.default.svc.cluster.local. 30 IN A 10.244.104.0
tkb-sts-0.dullahan.default.svc.cluster.local. 30 IN A 10.244.166.130
tkb-sts-2.dullahan.default.svc.cluster.local. 30 IN A 10.244.104.1
10-244-166-133.dullahan.default.svc.cluster.local. 30 IN A 10.244.166.133

StatefulSet扩缩容

修改StatefulSet内容


replicas: 2
[root@master ~]# kubectl edit sts tkb-sts 
statefulset.apps/tkb-sts edited
[root@master ~]# kubectl get sts
NAME      READY   AGE
tkb-sts   2/2     19h
[root@master ~]# kubectl get pods
NAME         READY   STATUS    RESTARTS   AGE   
tkb-sts-0    1/1     Running   0          19h
tkb-sts-1    1/1     Running   0          19h

# 查看pvc还有3个  缩容扩容不会删除pod副本相关的pvc
[root@master ~]# kubectl get pvc
NAME                STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   VOLUMEATTRIBUTESCLASS   AGE
webroot-tkb-sts-0   Bound    pvc-c5f418d8-90b7-40aa-adeb-e5cb63ce9cf6   1Gi        RWO            nfs-client     <unset>                 19h
webroot-tkb-sts-1   Bound    pvc-d9004ba6-3a5f-4251-b741-21e2adc8a9a3   1Gi        RWO            nfs-client     <unset>                 19h
webroot-tkb-sts-2   Bound    pvc-381d656d-342f-4276-aa95-7af89ea75ea3   1Gi        RWO            nfs-client     <unset>                 19h


#查看挂载情况
[root@master ~]# kubectl describe pvc webroot-tkb-sts-0 | grep Used
Used By:       tkb-sts-0
[root@master ~]# kubectl describe pvc webroot-tkb-sts-1 | grep Used
Used By:       tkb-sts-1
[root@master ~]# kubectl describe pvc webroot-tkb-sts-2 | grep Used
Used By:       <none>


# 修改StatefulSet 副本数量为4
[root@master ~]# kubectl get sts
NAME      READY   AGE
tkb-sts   4/4     19h

[root@master ~]# kubectl get pvc
NAME                STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   VOLUMEATTRIBUTESCLASS   AGE
webroot-tkb-sts-0   Bound    pvc-c5f418d8-90b7-40aa-adeb-e5cb63ce9cf6   1Gi        RWO            nfs-client     <unset>                 19h
webroot-tkb-sts-1   Bound    pvc-d9004ba6-3a5f-4251-b741-21e2adc8a9a3   1Gi        RWO            nfs-client     <unset>                 19h
webroot-tkb-sts-2   Bound    pvc-381d656d-342f-4276-aa95-7af89ea75ea3   1Gi        RWO            nfs-client     <unset>                 19h
webroot-tkb-sts-3   Bound    pvc-aa15e166-d805-4820-a592-1350bcfcb179   1Gi        RWO            nfs-client     <unset>                 43s

# 查看挂载情况
[root@master ~]# kubectl describe pvc webroot-tkb-sts-0 | grep Used
Used By:       tkb-sts-0
[root@master ~]# kubectl describe pvc webroot-tkb-sts-1 | grep Used
Used By:       tkb-sts-1
[root@master ~]# kubectl describe pvc webroot-tkb-sts-2 | grep Used
Used By:       tkb-sts-2
[root@master ~]# kubectl describe pvc webroot-tkb-sts-3 | grep Used
Used By:       tkb-sts-3

# 查看nfs挂载点的文件夹
[root@master ~]# ls /nfsdata/share/default/
webroot-tkb-sts-0  webroot-tkb-sts-1  webroot-tkb-sts-2  webroot-tkb-sts-3

调用Pod顺序

通过spec.PodManagermentPolicy控制Pod启动和停止顺序

  • OrderedReady 按序管理策略
  • Prarllel pod创建删除并行

执行滚动升级

升级按顺序从索引号最大的Pod开始,每次更新一个,直到最小索引号的Pod

模拟故障

删除Pod 查看状态

[root@master ~]# kubectl get pods
NAME        READY   STATUS    RESTARTS   AGE
tkb-sts-0   1/1     Running   0          19h
tkb-sts-1   1/1     Running   0          19h
tkb-sts-2   1/1     Running   0          12m
tkb-sts-3   1/1     Running   0          12m


[root@master ~]# kubectl describe pod tkb-sts-1
Name:             tkb-sts-1
Namespace:        default
Status:           Running
IP:               10.244.104.0
Volumes:
  webroot:
    Type:       PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
    ClaimName:  webroot-tkb-sts-1

  
[root@master ~]# kubectl delete pod tkb-sts-1
[root@master ~]# kubectl get pods -w
NAME        READY   STATUS        RESTARTS   AGE
tkb-sts-0   1/1     Running       0          19h
tkb-sts-1   0/1     Terminating   0          1s
tkb-sts-2   1/1     Running       0          15m
tkb-sts-3   1/1     Running       0          15m
tkb-sts-1   0/1     Terminating   0          2s
tkb-sts-1   0/1     Terminating   0          2s
tkb-sts-1   0/1     Terminating   0          2s
tkb-sts-1   0/1     Pending       0          0s
tkb-sts-1   0/1     Pending       0          0s
tkb-sts-1   0/1     ContainerCreating   0          0s
tkb-sts-1   0/1     ContainerCreating   0          0s


[root@master ~]# kubectl describe pod tkb-sts-1 | grep ClaimName
    ClaimName:  webroot-tkb-sts-1

删除StatefulSet

按顺序关闭Pod

[root@master ~]# kubectl scale statefulset tkb-sts --replicas=0
statefulset.apps/tkb-sts scaled
[root@master ~]# kubectl get sts tkb-sts 
NAME      READY   AGE
tkb-sts   0/0     19h

[root@master ~]# kubectl delete sts tkb-sts 
statefulset.apps "tkb-sts" deleted

[root@master ~]# kubectl delete svc dullahan 
service "dullahan" deleted

[root@master ~]# kubectl delete pvc webroot-tkb-sts-0 webroot-tkb-sts-1 webroot-tkb-sts-2 webroot-tkb-sts-3 
persistentvolumeclaim "webroot-tkb-sts-0" deleted
persistentvolumeclaim "webroot-tkb-sts-1" deleted
persistentvolumeclaim "webroot-tkb-sts-2" deleted
persistentvolumeclaim "webroot-tkb-sts-3" deleted


#由于使用了StorageClass pvc删除后 pv自动删除 但是文件还在 保证持久化
[root@master ~]# ls /nfsdata/share/default/
webroot-tkb-sts-0  webroot-tkb-sts-2
webroot-tkb-sts-1  webroot-tkb-sts-3
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值