kubernetes笔记
第一个yml创建pod记录
kubectl explain pod #查看pod字段说明
kubectl explain pod.apiVersion #查看pod.apiVersion字段说明
kubectl explain pod.spec #查看pod.spec字段说明
kubectl explain pod.spec.containers #查看pod.spec.containers字段说明
cat > pod.yml <<EOF
apiVersion: v1
kind: Pod
metadata:
name: myapp-pod
labels:
app: myapp
version: v1
spec:
containers:
- name: app
image: hub.hdj.com/library/nginx:v1
EOF
kubectl apply -f pod.yml #依据pod.yml创建pod
kubectl get pod #获取pod,查看是否创建成功
kubectl describe pod myapp-pod #查看myapp-pod的详细信息
kubectl logs myapp-pod -c test #查看myapp-pod的日志
kubectl delete pod myapp-pod #删除myapp-pod
kubectl get pod -o wide #获取pod更多信息
pod、svc、deployment删除
kubectl get deployment
kubectl delete deployment --all #删除所有default命名空间的deployment
kubectl get pod
kubectl delete pod --all #删除所有default命名空间的pod
kubectl get svc
kubectl delete svc nginx-deployment #删除default命名空间的nginx-deployment
initc、pod相关操作
实现initc 阻塞manc案例
定义一个init-pod.yml,其中有两个initc,和manc。
cat > init-pod.yml <<EOF
apiVersion: v1
kind: Pod
metadata:
name: myapp-pod
labels:
app: myapp
spec:
containers:
- name: myapp-container
image: busybox
command: ['sh', '-c', 'echo The app is running! && sleep 3600']
initContainers:
- name: init-myservice
image: busybox
command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2;done;']
- name: init-mydb
image: busybox
command: ['sh', '-c', 'until nslookup mydb; do echo waiting for mydb; sleep 2; done;']
EOF
kubectl create -f init-pod.yml #创建myapp-pod
kubectl get pod #查看pod,此时pod还不是running
kubectl describe pod myapp-pod #查看pod描述信息
kubectl logs myapp-pod -c init-myservice #查看init-myservice日志
cat > myservice.yml <<EOF
kind: Service
apiVersion: v1
metadata:
name: myservice
spec:
ports:
- protocol: TCP
port: 80
targetPort: 9376
EOF
kubectl create -f myservice.yml
kubectl get svc
kubectl describe pod myapp-pod
kubectl get pod
cat > mydb.yml <<EOF
kind: Service
apiVersion: v1
metadata:
name: mydb
spec:
ports:
- protocol: TCP
port: 80
targetPort: 9377
EOF
kubectl create -f mydb.yml
kubectl get svc
kubectl get pod #此时可以看到pod的状态为running
kubectl describe pod myapp-pod
检测探针 - 就绪检测
使用httpGet,检测是否存在index1.html,当访问/index1.html返回200时,READY状态将被标记为1/1。
cat > readness.yml <<EOF
apiVersion: v1
kind: Pod
metadata:
name: readiness-httpget-pod
namespace: default
spec:
containers:
- name: readiness-httpget-container
image: hub.hdj.com/library/nginx:v1
imagePullPolicy: IfNotPresent
readinessProbe:
httpGet:
port: 80
path: /index1.html
initialDelaySeconds: 1
periodSeconds: 3
EOF
kubectl create -f readness.yml
#此时查看pod,READY是 0/1。
kubectl get pod
#进入容器,执行sh,写一个index1.html
kubectl exec readiness-httpget-pod -it -- /bin/sh
cd /usr/share/nginx/html/
echo 123>index1.html
exit
#此时查看pod,READY是 1/1。
kubectl get pod
检测探针 - 存活检测
exec方式判断
容器启动时,创建一个文件/tmp/live,等待60秒后删除。livenessProbe,检测/tmp/live是否存在,如果不存在表示容器提供服务异常,容器会重启。创建下面pod最终效果是,隔60秒后,服务会重启。
cat > liveness.yml <<EOF
apiVersion: v1
kind: Pod
metadata:
name: liveness-exec-pod
namespace: default
spec:
containers:
- name: liveness-exec-container
image: hub.hdj.com/library/busybox:v1
imagePullPolicy: IfNotPresent
command: ["/bin/sh","-c","touch /tmp/live ; sleep 60; rm -rf /tmp/live; sleep 3600"]
livenessProbe:
exec:
command: ["test","-e","/tmp/live"]
initialDelaySeconds: 1
periodSeconds: 3
EOF
kubectl create -f liveness.yml
效果如下:
[root@k8s-master01 install-k8s]# kubectl get pod -w
NAME READY STATUS RESTARTS AGE
liveness-exec-pod 1/1 Running 0 53s
liveness-exec-pod 1/1 Running 1 118s
liveness-exec-pod 1/1 Running 2 3m36s
liveness-exec-pod 1/1 Running 3 5m16s
httpGet方式判断
httpget的方式监测/index.html,当手动删除/index.html后,会重建pod。
cat > liveness-httpget.yml <<EOF
apiVersion: v1
kind: Pod
metadata:
name: liveness-httpget-pod
namespace: default
spec:
containers:
- name: liveness-httpget-container
image: hub.hdj.com/library/nginx:v1
imagePullPolicy: IfNotPresent
ports:
- name: http
containerPort: 80
livenessProbe:
httpGet:
port: http
path: /index.html
initialDelaySeconds: 1
periodSeconds: 3
timeoutSeconds: 10
EOF
kubectl exec liveness-httpget-pod -it -- rm -rf /usr/share/nginx/html/index.html #删除容器中的index.html
最终效果:
[root@k8s-master01 install-k8s]# kubectl get pod -w
NAME READY STATUS RESTARTS AGE
liveness-httpget-pod 1/1 Running 0 14s
liveness-httpget-pod 1/1 Running 1 89s
tcp方式判断
创建一个pod,暴露80端口,在livenessProbe中,使用TCPSocket监测8110,他会一直重启。
cat > liveness-tcp.yml <<EOF
apiVersion: v1
kind: Pod
metadata:
name: probe-tcp
spec:
containers:
- name: nginx
image: hub.hdj.com/library/nginx:v1
livenessProbe:
initialDelaySeconds: 5
timeoutSeconds: 1
tcpSocket:
port: 8110
EOF
kubectl create -f liveness-tcp.yml
最终效果:
[root@k8s-master01 install-k8s]# kubectl get pod -w
probe-tcp 1/1 Running 0 1s
probe-tcp 1/1 Running 1 29s
probe-tcp 1/1 Running 2 58s
probe-tcp 1/1 Running 3 88s
probe-tcp 1/1 Running 4 119s
probe-tcp 0/1 CrashLoopBackOff 4 2m29s
probe-tcp 1/1 Running 5 3m14s
probe-tcp 0/1 CrashLoopBackOff 5 3m39s
检测探针 - 就绪检测&存活检测
创建一个pod,httpget监测index.html是否存在,如果存在容器启动完成。启动后,httpget监测index.html是否存在,如果存在表示存活。
cat > readness-liveness-httpget.yml <<EOF
apiVersion: v1
kind: Pod
metadata:
name: readness-liveness-httpget-pod
namespace: default
spec:
containers:
- name: readness-liveness-httpget-container
image: hub.hdj.com/library/nginx:v1
imagePullPolicy: IfNotPresent
ports:
- name: http
containerPort: 80
readinessProbe:
httpGet:
port: 80
path: /index.html
initialDelaySeconds: 1
periodSeconds: 3
livenessProbe:
httpGet:
port: http
path: /index.html
initialDelaySeconds: 1
periodSeconds: 3
timeoutSeconds: 10
EOF
kubectl create -f readness-liveness-httpget.yml
启动、退出动作
容器启动时,打印一句日志。容器退出时打印一句日志。
cat > start-stop.yml <<EOF
apiVersion: v1
kind: Pod
metadata:
name: start-stop-pod
spec:
containers:
- name: start-stop-container
image: hub.hdj.com/library/nginx:v1
lifecycle:
postStart:
exec:
command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
preStop:
exec:
command: ["/bin/sh", "-c", "echo Hello from the postStop handler > /usr/share/message"]
EOF
kubectl create -f start-stop.yml
控制器
rs
cat > rs.yml <<EOF
apiVersion: extensions/v1beta1
kind: ReplicaSet
metadata:
name: frontend
spec:
replicas: 3
selector:
matchLabels:
tier: frontend
template:
metadata:
labels:
tier: frontend
spec:
containers:
- name: hdj-nginx
image: hub.hdj.com/library/nginx:v1
env:
- name: GET_HOSTS_FROM
value: dns
ports:
- containerPort: 80
EOF
操作效果:
[root@k8s-master01]# kubectl apply -f rs.yml #创建rs
replicaset.extensions/frontend created
[root@k8s-master01 controller-study]# kubectl get rs #查看rs
NAME DESIRED CURRENT READY AGE
frontend 3 3 3 17s
[root@k8s-master01]# kubectl get pod --show-labels #查看pod,并查看labels
NAME READY STATUS RESTARTS AGE LABELS
frontend-rjrsw 1/1 Running 0 73s tier=frontend
frontend-vlqwv 1/1 Running 0 73s tier=frontend
frontend-wfcmw 1/1 Running 0 73s tier=frontend
[root@k8s-master01]# kubectl label pod frontend-vlqwv tier=frontend1 --overwrite=True #修改其中的一个pod的label
pod/frontend-vlqwv labeled
[root@k8s-master01]# kubectl get pod --show-labels #再查看时,会多出一个pod,因为label修改过之后,label的数量不够3了,就会再创建一个
NAME READY STATUS RESTARTS AGE LABELS
frontend-rjrsw 1/1 Running 0 2m2s tier=frontend
frontend-tv7n8 1/1 Running 0 6s tier=frontend
frontend-vlqwv 1/1 Running 0 2m2s tier=frontend1
frontend-wfcmw 1/1 Running 0 2m2s tier=frontend
[root@k8s-master01]# kubectl delete rs --all #删除所有rs
replicaset.extensions "frontend" deleted
[root@k8s-master01]# kubectl get pod --show-labels #再查看pod时,会看到上句只删除了tier=frontend的pod,tier=frontend1的pod并没有删除
NAME READY STATUS RESTARTS AGE LABELS
frontend-vlqwv 1/1 Running 0 2m44s tier=frontend1
deployment
创建deployment,deployment会创建rs,利用rs维护pod。
cat > deployment.yml <<EOF
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: nginx-deployment
spec:
replicas: 3
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: hub.hdj.com/library/nginx:v1
ports:
- containerPort: 80
EOF
kubectl apply -f deployment.yml --record #加--record,可以记录版本修改记录
kubectl scale deployment nginx-deployment --replicas=10 #扩容,将副本数改为10
kubectl get pod #再查看pod,会有10个pod
kubectl set image deployment/nginx-deployment nginx=wangyanglinux/myapp:v2 #重新设置镜像
kubectl rollout undo deployment/nginx-deployment #回滚上次操作
kubectl rollout status deployment/nginx-deployment #查看操作状态
kubectl rollout history deployment/nginx-deployment #查看操作记录
kubectl rollout undo deployment/nginx-deployment --to-revision=3 #回滚到第3个版本
假如您创建了一个有5个 niginx:1.7.9 replica的 Deployment,但是当还只有3个 nginx:1.7.9 的 replica 创建出来的时候您就开始更新含有5个 nginx:1.9.1 replica 的 Deployment。在这种情况下,Deployment 会立即杀掉已创建的3个 nginx:1.7.9 的 Pod,并开始创建 nginx:1.9.1 的 Pod。它不会等到所有的5个 nginx:1.7.9 的Pod 都创建完成后才开始改变航道
可以通过设置 .spec.revisonHistoryLimit 项来指定 deployment 最多保留多少 revision 历史记录。默认的会保留所有的 revision;如果将该项设置为0,Deployment 就不允许回退了
daemonset
daemonset会维护每个节点有且只有一个pod。
cat > daemonset.yml <<EOF
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: deamonset-example
labels:
app: daemonset
spec:
selector:
matchLabels:
name: deamonset-example
template:
metadata:
labels:
name: deamonset-example
spec:
containers:
- name: daemonset-example
EOF
kubectl create -f daemonset.yml
操作如下:
[root@k8s-master01]# kubectl get pod -o wide #查看pod,在node01,node02上都有一个pod
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
deamonset-example-chwrs 1/1 Running 0 18s 10.244.2.54 k8s-node02 <none> <none>
deamonset-example-zz6nr 1/1 Running 0 18s 10.244.1.54 k8s-node01 <none> <none>
[root@k8s-master01]# kubectl delete pod deamonset-example-chwrs #删除一个pod
pod "deamonset-example-chwrs" deleted
[root@k8s-master01]# kubectl get pod -o wide #再查看时,还是显示 node01,node02上都有一个pod
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
deamonset-example-kmptk 1/1 Running 0 2s 10.244.2.55 k8s-node02 <none> <none>
deamonset-example-zz6nr 1/1 Running 0 2m14s 10.244.1.54 k8s-node01 <none> <none>
job
通过job管理pod。
例子实现功能:使用perl镜像构建,计算pi的后2000位
cat > job.yml <<EOF
apiVersion: batch/v1
kind: Job
metadata:
name: pi
spec:
template:
metadata:
name: pi
spec:
containers:
- name: pi
image: perl
command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"]
restartPolicy: Never
EOF
kubectl create -f job.yml
kubectl get job #查看job
kubectl logs pi-gdfds #查看pod日志
cronjob
周期性执行任务。cronjob会创建job,通过job管理pod。
例子实现功能:每一分钟创建一个新的job,打印一句时间日志。
cat > cronjob.yml <EOF
apiVersion: batch/v1beta1
kind: CronJob
metadata:
name: hello
spec:
schedule: "*/1 * * * *"
jobTemplate:
spec:
template:
spec:
containers:
- name: hello
image: busybox
args:
- /bin/sh
- -c
- date; echo Hello from the Kubernetes cluster
restartPolicy: OnFailure
EOF
kubectl create -f cronjob.yml
kubectl get cronjob #查看cronjob
kubectl get job #查看job,每隔一分钟会增加一个新的job
kubectl get pod #每隔新的job,会重新创建新的pod来执行任务
服务
ClusterIP svc
先部署一个deployment,然后再通过svc代理。
svc通过label匹配pod,然后再实现代理。
cat > svc-deployment.yml <<EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: myapp-deploy
namespace: default
spec:
replicas: 3
selector:
matchLabels:
app: myapp
release: stabel
template:
metadata:
labels:
app: myapp
release: stabel
env: test
spec:
containers:
- name: myapp
image: wangyanglinux/myapp:v2
imagePullPolicy: IfNotPresent
ports:
- name: http
containerPort: 80
EOF
kubectl apply -f svc-deployment.yml
cat > svc.yml <<EOF
apiVersion: v1
kind: Service
metadata:
name: myapp
namespace: default
spec:
type: ClusterIP
selector:
app: myapp
release: stabel
ports:
- name: http
port: 80
targetPort: 80
EOF
kubectl apply -f svc.yml
操作如下:
[root@k8s-master01 service]# kubectl get svc #查看svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 2d19h
myapp ClusterIP 10.99.87.110 <none> 80/TCP 26s
[root@k8s-master01 service]# ipvsadm -Ln #查看ipvs代理,可以看到有10.99.87.110的代理
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 10.96.0.1:443 rr
-> 192.168.183.10:6443 Masq 1 3 0
TCP 10.96.0.10:53 rr
-> 10.244.0.14:53 Masq 1 0 0
-> 10.244.0.15:53 Masq 1 0 0
TCP 10.96.0.10:9153 rr
-> 10.244.0.14:9153 Masq 1 0 0
-> 10.244.0.15:9153 Masq 1 0 0
TCP 10.99.87.110:80 rr
-> 10.244.1.60:80 Masq 1 0 0
-> 10.244.1.61:80 Masq 1 0 0
-> 10.244.2.62:80 Masq 1 0 0
UDP 10.96.0.10:53 rr
-> 10.244.0.14:53 Masq 1 0 0
-> 10.244.0.15:53 Masq 1 0 0
[root@k8s-master01 service]# curl 10.99.87.110 #访问ipvs的地址,可以轮询访问到服务
Hello MyApp | Version: v2 | <a href="hostname.html">Pod Name</a>
[root@k8s-master01 service]# curl 10.99.87.110/hostname.html
myapp-deploy-6cc7c66999-4nmz7
[root@k8s-master01 service]# curl 10.99.87.110/hostname.html
myapp-deploy-6cc7c66999-mv68q
[root@k8s-master01 service]# curl 10.99.87.110/hostname.html
myapp-deploy-6cc7c66999-gvxmr
headless svc
无头服务会代理所有的pod。
svc创建成功之后,会将路由写到coredns里面。命名规则:当前服务名称.命名空间名称.集群名称。默认命名空间名称是:default,默认集群名称为svc.cluster.local.。
cat > svc-headless.yml <<EOF
apiVersion: v1
kind: Service
metadata:
name: myapp-headless
namespace: default
spec:
selector:
app: myapp
clusterIP: "None"
ports:
- port: 80
targetPort: 80
EOF
kubectl apply -f svc-headless.yml
yum -y install bind-utils #安装dig命令
操作效果:
[root@k8s-master01 service]# kubectl get pod -n kube-system -o wide|grep coredns #查看coredns的IP
coredns-5c98db65d4-m84ww 1/1 Running 3 2d20h 10.244.0.15 k8s-master01 <none> <none>
coredns-5c98db65d4-mtlqz 1/1 Running 3 2d20h 10.244.0.14 k8s-master01 <none> <none>
[root@k8s-master01 service]# dig -t A myapp-headless.default.svc.cluster.local. @10.244.0.14 |grep myapp-headless #通过coredns解析,得到当前所有的pod
; <<>> DiG 9.11.4-P2-RedHat-9.11.4-9.P2.el7 <<>> -t A myapp-headless.default.svc.cluster.local. @10.244.0.14
;myapp-headless.default.svc.cluster.local. IN A
myapp-headless.default.svc.cluster.local. 30 IN A 10.244.1.61
myapp-headless.default.svc.cluster.local. 30 IN A 10.244.1.60
myapp-headless.default.svc.cluster.local. 30 IN A 10.244.2.62
NodePort svc
通过NodePort方式发布服务,会再每个节点上暴露一个>30000的随机端口,此时,可以在k8s容器之外访问。
cat > svc-nodeport.yml <EOF
apiVersion: v1
kind: Service
metadata:
name: myapp
namespace: default
spec:
type: NodePort
selector:
app: myapp
release: stabel
ports:
- name: http
port: 80
targetPort: 80
EOF
kubectl apply -f svc-nodeport.yml
最终效果:
[root@k8s-master01 service]# kubectl get svc #查看svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 2d20h
myapp NodePort 10.99.87.110 <none> 80:31768/TCP 79m
[root@k8s-master01 service]# ipvsadm -Ln #查看ipvs的录用规则,可以看到192.168.183.10:31768被路由到了10.244.1.60:80、10.244.1.61:80、10.244.2.62:80
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 172.17.0.1:31768 rr
-> 10.244.1.60:80 Masq 1 0 0
-> 10.244.1.61:80 Masq 1 0 0
-> 10.244.2.62:80 Masq 1 0 0
TCP 192.168.183.10:31768 rr
-> 10.244.1.60:80 Masq 1 0 0
-> 10.244.1.61:80 Masq 1 0 1
-> 10.244.2.62:80 Masq 1 0 1
[root@k8s-master01 service]# netstat -nap|grep :31768 #查看主机暴露的端口31768
tcp6 0 0 :::31768 :::* LISTEN 79184/kube-proxy
此时可以通过下面任意一个地址访问到pod。
http://192.168.183.10:31768
http://192.168.183.20:31768
http://192.168.183.21:31768
ExternalName svc
ExternalName 可以实现dns导向,即dns层代理。
cat > svc-extrnalname.yml <<EOF
kind: Service
apiVersion: v1
metadata:
name: my-service-1
namespace: default
spec:
type: ExternalName
externalName: hub.hdj.com
EOF
kubectl create -f svc-extrnalname.yml
最终效果:
[root@k8s-master01 service]# kubectl get svc #查看svc,可以看到my-service-1解析到了hub.hdj.com
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 2d22h
my-service-1 ExternalName <none> hub.hdj.com <none> 6s
[root@k8s-master01 service]# dig -t A my-service-1.default.svc.cluster.local. @10.244.0.14 |grep my-service-1 #根据coredns解析my-service-1,可以得到hub.hdj.com
; <<>> DiG 9.11.4-P2-RedHat-9.11.4-9.P2.el7 <<>> -t A my-service-1.default.svc.cluster.local. @10.244.0.14
;my-service-1.default.svc.cluster.local. IN A
my-service-1.default.svc.cluster.local. 5 IN CNAME hub.hdj.com.
ingress-nginx
ingress-nginx地址:https://kubernetes.github.io/ingress-nginx/
由于kubernetes svc只提供了4层代理。如需要实现7层代理,需使用ingress-nginx来扩展。
下载 https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.25.0/deploy/static/mandatory.yaml。
其中用到的镜像:quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.25.0 我们直接pull不到,需要将yaml中的quay.io改为中国科技大的镜像:quay.mirrors.ustc.edu.cn 即可。也可以下载好镜像,将镜像上传至每个节点,再导入。我这儿采用第二种方式。
wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.25.0/deploy/static/mandatory.yaml
wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.25.0/deploy/static/provider/baremetal/service-nodeport.yaml
tar -zxf ingree.contro.tar.gz
docker load -i ingree.contro.tar
kubectl apply -f mandatory.yaml
kubectl apply -f service-nodeport.yaml
存储
k8s中存储有ConfigMap 、secret、volume、persistent volume、
ConfigMap
ConfigMap 一般用来存储配置文件。
使用文件创建
mkdir dir
cd dir
cat > game.properties <<EOF
enemies=aliens
lives=3
enemies.cheat=true
enemies.cheat.level=noGoodRotten
secret.code.passphrase=UUDDLRLRBABAS
secret.code.allowed=true
secret.code.lives=30
EOF
cat > ui.properties <<EOF
color.good=purple
color.bad=yellow
allow.textmode=true
how.nice.to.look=fairlyNice
EOF
cd ..
kubectl create configmap game-config --from-file=./dir #创建configmap
kubectl get cm #查看configmap
kubectl get cm game-config -o yaml #以yaml格式查看configmap
kubectl describe cm game-config #查看configmap
使用字面值创建
kubectl create configmap special-config --from-literal=special.how=very --from-literal=special.type=charm
kubectl describe cm special-config
将configmap注入到pod环境变量中
cat > env/env.yml <<EOF
apiVersion: v1
kind: ConfigMap
metadata:
name: env-config
namespace: default
data:
log_level: INFO
EOF
kubectl > apply -f env/pod-env.yml <<EOF
#创建一个pod,启动之后打印环境变量env。
cat env/pod-env.yml
apiVersion: v1
kind: Pod
metadata:
name: dapi-test-pod
spec:
containers:
- name: test-container
image: hub.hdj.com/library/nginx:v1
command: [ "/bin/sh", "-c", "env" ]
env:
- name: SPECIAL_LEVEL_KEY
valueFrom:
configMapKeyRef:
name: special-config
key: special.how
- name: SPECIAL_TYPE_KEY
valueFrom:
configMapKeyRef:
name: special-config
key: special.type
envFrom:
- configMapRef:
name: env-config
restartPolicy: Never
EOF
kubectl apply -f env/pod-env.yml
表现效果
[root@k8s-master01 configmap]# kubectl get pod
NAME READY STATUS RESTARTS AGE
dapi-test-pod 0/1 Completed 0 9s
[root@k8s-master01 configmap]# kubectl log dapi-test-pod #查看日志
log is DEPRECATED and will be removed in a future version. Use logs instead.
KUBERNETES_PORT=tcp://10.96.0.1:443
KUBERNETES_SERVICE_PORT=443
HOSTNAME=dapi-test-pod
HOME=/root
PKG_RELEASE=1~buster
SPECIAL_TYPE_KEY=charm
KUBERNETES_PORT_443_TCP_ADDR=10.96.0.1
NGINX_VERSION=1.17.8
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
KUBERNETES_PORT_443_TCP_PORT=443
NJS_VERSION=0.3.8
KUBERNETES_PORT_443_TCP_PROTO=tcp
SPECIAL_LEVEL_KEY=very
log_level=INFO
KUBERNETES_PORT_443_TCP=tcp://10.96.0.1:443
KUBERNETES_SERVICE_PORT_HTTPS=443
KUBERNETES_SERVICE_HOST=10.96.0.1
PWD=/
将configmap注入到启动命令中
cat > pod-command-env.yml <<EOF
apiVersion: v1
kind: Pod
metadata:
name: dapi-test-commond-pod
spec:
containers:
- name: test-container
image: hub.hdj.com/library/nginx:v1
command: [ "/bin/sh", "-c", "echo $(SPECIAL_LEVEL_KEY) $(SPECIAL_TYPE_KEY) $(log_level)" ]
env:
- name: SPECIAL_LEVEL_KEY
valueFrom:
configMapKeyRef:
name: special-config
key: special.how
- name: SPECIAL_TYPE_KEY
valueFrom:
configMapKeyRef:
name: special-config
key: special.type
envFrom:
- configMapRef:
name: env-config
restartPolicy: Never
EOF
kubectl apply -f pod-command-env.yml
最终效果
kubectl get pod
NAME READY STATUS RESTARTS AGE
dapi-test-commond-pod 0/1 Completed 0 6s
dapi-test-pod 0/1 Completed 0 13m
[root@k8s-master01 env]# kubectl log dapi-test-commond-pod
log is DEPRECATED and will be removed in a future version. Use logs instead.
very charm INFO
通过数据卷插件使用ConfigMap
创建一个pod,将名为special-config的ConfigMap,注册到containers的volumes中起名为config-volume,然后将config-volume挂载到test-container的/etc/config目录中。此时可以在/etc/config目录中看到special-config的configmap。
cat > pod-volumes.yml <<EOF
apiVersion: v1
kind: Pod
metadata:
name: dapi-test-volume-pod
spec:
containers:
- name: test-container
image: hub.hdj.com/library/nginx:v1
command: [ "/bin/sh", "-c", "cat /etc/config/special.how;sleep 600s;" ]
volumeMounts:
- name: config-volume
mountPath: /etc/config
volumes:
- name: config-volume
configMap:
name: special-config
restartPolicy: Never
EOF
kubectl apply -f pod-volumes.yml
表现效果:
[root@k8s-master01 env]# kubectl get pod
NAME READY STATUS RESTARTS AGE
dapi-test-volume-pod 0/1 Completed 0 4s
[root@k8s-master01 env]# kubectl logs dapi-test-volume-pod
very
[root@k8s-master01 env]# kubectl exec dapi-test-volume-pod -it -- /bin/bash
root@dapi-test-volume-pod:/# cd /etc/config
root@dapi-test-volume-pod:/etc/config# ls
special.how special.type
root@dapi-test-volume-pod:/etc/config# cat special.how
very
root@dapi-test-volume-pod:/etc/config# cat special.type
charm
configmap热更新
cat > hot-update.yml <<EOF
apiVersion: v1
kind: ConfigMap
metadata:
name: log-config
namespace: default
data:
log_level: INFO
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: my-nginx
spec:
replicas: 1
template:
metadata:
labels:
run: my-nginx
spec:
containers:
- name: my-nginx
image: hub.hdj.com/library/nginx:v1
ports:
- containerPort: 80
volumeMounts:
- name: config-volume
mountPath: /etc/config
volumes:
- name: config-volume
configMap:
name: log-config
EOF
kubectl apply -f hot-update.yml
kubectl exec `kubectl get pods -l run=my-nginx -o=name|cut -d "/" -f2` cat /etc/config/log_level #查看log_level ,此时是INFO
kubectl edit cm log-config #修改cm log-config,将内容中的log_level修改为DEBUG
kubectl exec `kubectl get pods -l run=my-nginx -o=name|cut -d "/" -f2` cat /etc/config/log_level #查看log_level ,此时是DEBUG
secret
kubernetes提供的加密存储密码的方式。
创建secret
创建一个secret,用户名是admin,密码是:1f2d1e2e67df。采用Opaque类型加密,所以使用base64方式对明文加密。
cat > secrets.yml <<EOF
apiVersion: v1
kind: Secret
metadata:
name: mysecret
type: Opaque
data:
password: MWYyZDFlMmU2N2Rm
username: YWRtaW4=
EOF
kubectl create -f secrets.yml
kubectl get secret #查看secret
将secret挂载到volumes上
将secret挂载到/etc/mysecret目录中。
cat > pod-secret-volume.yml <<EOF
apiVersion: v1
kind: Pod
metadata:
labels:
name: seret-test
name: seret-test
spec:
volumes:
- name: secrets
secret:
secretName: mysecret
containers:
- image: hub.hdj.com/library/nginx:v1
name: db
volumeMounts:
- name: secrets
mountPath: '/etc/mysecret'
readOnly: true
EOF
kubectl create -f pod-secret-volume.yml
操作效果:
[root@k8s-master01 secret]# kubectl get pod
NAME READY STATUS RESTARTS AGE
seret-test 1/1 Running 0 7s
[root@k8s-master01 secret]# kubectl exec seret-test -it -- /bin/sh #进入容器,可以看到/etc/mysecrst目录有password、username,查看时,直接看到明文。
# cd /etc/mysecret
# ls
password username
# cat password
1f2d1e2e67df
# cat username
admin
将secret注册到环境变量中
cat > env-secret.yml <<EOF
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: pod-deployment
spec:
replicas: 2
template:
metadata:
labels:
app: pod-deployment
spec:
containers:
- name: pod-1
image: hub.hdj.com/library/nginx:v1
ports:
- containerPort: 80
env:
- name: TEST_USER
valueFrom:
secretKeyRef:
name: mysecret
key: username
- name: TEST_PASSWORD
valueFrom:
secretKeyRef:
name: mysecret
key: password
EOF
kubectl apply -f env-secret.yml
最终效果:
[root@k8s-master01 secret]# kubectl get pod
NAME READY STATUS RESTARTS AGE
pod-deployment-5974888d48-dtmhk 1/1 Running 0 9s
pod-deployment-5974888d48-tsldz 1/1 Running 0 9s
[root@k8s-master01 secret]# kubectl exec pod-deployment-5974888d48-tsldz -it -- /bin/sh #进入容器,查看环境变量,可以得到明文的值
# echo $TEST_USER
admin
# echo $TEST_PASSWORD
1f2d1e2e67df
访问harbor私有仓库
在kubernetes中,访问docker私有仓库时,需要让kubernetes存在docker仓库认证。
在hub.hdj.com上创建一个私有仓库,privatelib。
给私有仓库中推一个镜像。
docker tag wangyanglinux/myapp:v2 hub.hdj.com/privatelib/wangyanglinux:v2 #标记tag
docker push hub.hdj.com/privatelib/wangyanglinux:v2 #推到私有仓库中
docker logout hub.hdj.com #退出docker在hub.hdj.com的登录
docker pull hub.hdj.com/privatelib/wangyanglinux:v2 #此时拉取镜像时,需要登录
创建 docker registry 认证的 secret
kubectl create secret docker-registry myregistrykey --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL
操作效果:
[root@k8s-master01 secret]# kubectl create secret docker-registry myregistrykey --docker-server=hub.hdj.com --docker-username=admin --docker-password=Harbor12345 --docker-email=huangdengji@163.com
secret/myregistrykey created
创建pod,
cat > privatelib-pod.yml <<EOF
apiVersion: v1
kind: Pod
metadata:
name: foo
spec:
containers:
- name: foo
image: hub.hdj.com/privatelib/wangyanglinux:v2
imagePullSecrets:
- name: myregistrykey
EOF
kubectl create -f privatelib-pod.yml
kubectl get pod #查看pod,可以看到运行正常,即从私有镜像库中下载镜像成功
volumes
多容器之间文件共享,文件长期保持。
kubernetes支持的卷类型:
awsElasticBlockStore azureDisk azureFile cephfs csi downwardAPI emptyDir
fc flocker gcePersistentDisk gitRepo glusterfs hostPath iscsi local nfs
persistentVolumeClaim projected portworxVolume quobyte rbd scaleIO secret
storageos vsphereVolume
emptyDir
每次pod重建的时候会重置emptyDir为空目录。但是pod崩溃不会丢失emptyDir的数据。
挂载一个emptyDir的volume,将这个volume挂载到两个container中,在test-container1中的挂载目录是:/cache/c1,在test-container2中的挂载目录是:/cache/c2。
cat > emptyDir.yml <<EOF
apiVersion: v1
kind: Pod
metadata:
name: test-pd
spec:
containers:
- image: hub.hdj.com/library/nginx:v1
name: test-container1
volumeMounts:
- mountPath: /cache/c1
name: cache-volume
- image: hub.hdj.com/library/busybox:v1
name: test-container2
volumeMounts:
- mountPath: /cache/c2
name: cache-volume
command: [ "/bin/sh", "-c", "sleep 6000s" ]
volumes:
- name: cache-volume
emptyDir: {}
EOF
kubectl create -f emptyDir.yml
操作效果:
进入test-container1中/cache/c1目录下,写一个date >> index.html
kubectl exec test-pd -c test-container1 -it -- /bin/sh
# cd /cache/c1
# date >> index.html
# cat index.html
Tue Feb 11 23:33:17 UTC 2020
进入test-container2中/cache/c2目录下,查看index.html,可以看到刚才在test-container1中写入的时间。再写一个date >> index.html
[root@k8s-master01 ~]# kubectl exec test-pd -c test-container2 -it -- /bin/sh
/ # cd /cache/c2
# cat index.html
Tue Feb 11 23:33:17 UTC 2020
/cache/c2 # date >> index.html
/cache/c2 # cat index.html
Tue Feb 11 23:33:17 UTC 2020
Tue Feb 11 23:34:27 UTC 2020
再进入test-container1中/cache/c1目录下,查看index.html,可以看到刚才在test-container2中写入的时间。
kubectl exec test-pd -c test-container1 -it -- /bin/sh
# cd /cache/c1
# cat index.html
Tue Feb 11 23:33:17 UTC 2020
Tue Feb 11 23:34:27 UTC 2020
hostPath
hostPath 将主机节点的文件系统中的文件或目录挂载到集群中。
hostPath 卷的 type有下面一些。
值 | 行为 |
---|---|
空字符串(默认)用于向后兼容,这意味着在挂载 hostPath 卷之前不会执行任何检查。 | |
DirectoryOrCreate | 如果在给定的路径上没有任何东西存在,那么将根据需要在那里创建一个空目录,权限设置为 0755,与 Kubelet 具有相同的组和所有权。 |
Directory | 给定的路径下必须存在目录 |
FileOrCreate | 如果在给定的路径上没有任何东西存在,那么会根据需要创建一个空文件,权限设置为 0644,与 Kubelet 具有相同的组和所有权。 |
File | 给定的路径下必须存在文件 |
Socket | 给定的路径下必须存在 UNIX 套接字 |
CharDevice | 给定的路径下必须存在字符设备 |
BlockDevice | 给定的路径下必须存在块设备 |
将/data目录挂载到pod中
cat > hostPath.yml <<EOF
apiVersion: v1
kind: Pod
metadata:
name: test-pd
spec:
containers:
- image: hub.hdj.com/library/nginx:v1
name: test-container
volumeMounts:
- mountPath: /test-pd
name: test-volume
volumes:
- name: test-volume
hostPath:
# directory location on host
path: /data
# this field is optional
type: Directory
EOF
kubectl create -f hostPath.yml
表现效果:
进入pod中的test-pd目录,写一个date>>index.html
[root@k8s-master01 volumes]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
test-pd 1/1 Running 0 13s 10.244.1.75 k8s-node01 <none> <none>
[root@k8s-master01 volumes]# kubectl exec test-pd -it -- /bin/sh
# cd test-pd
# date>>index.html
# cat index.html
Wed Feb 12 01:01:28 UTC 2020
在k8s-node01中的/data目录下,查看index.html ,可以看到在容器中写入的内容,再写入date >> index.html
[root@k8s-node01 data]# cat index.html
Wed Feb 12 01:01:28 UTC 2020
[root@k8s-node01 data]# date >> index.html
[root@k8s-node01 data]# cat index.html
Wed Feb 12 01:01:28 UTC 2020
2020年 02月 12日 星期三 09:01:47 CST
再进入pod中的test-pd目录,查看index.html,可以看到在k8s-node01中写入的内容
[root@k8s-master01 volumes]# kubectl exec test-pd -it -- /bin/sh
# cd test-pd
# cat index.html
Wed Feb 12 01:01:28 UTC 2020
2020年 02月 12日 星期三 09:01:47 CST
持久化卷pv
PersistentVolume(pv):持久化卷,可以挂载外部的存储服务,如nfs等。
PersistentVolumeClaim(PVC):持久化卷引用,对持久化卷的匹配等。
PV 访问模式
PersistentVolume 可以以资源提供者支持的任何方式挂载到主机上。如下表所示,供应商具有不同的功能,每个PV 的访问模式都将被设置为该卷支持的特定模式。例如,NFS 可以支持多个读/写客户端,但特定的 NFS PV 可能以只读方式导出到服务器上。每个 PV 都有一套自己的用来描述特定功能的访问模式
- ReadWriteOnce——该卷可以被单个节点以读/写模式挂载
- ReadOnlyMany——该卷可以被多个节点以只读模式挂载
- ReadWriteMany——该卷可以被多个节点以读/写模式挂载
在命令行中,访问模式缩写为:
- RWO - ReadWriteOnce
- ROX - ReadOnlyMany
- RWX - ReadWriteMany
回收策略
Retain(保留)——手动回收
Recycle(回收)——基本擦除( rm -rf /thevolume/* )
Delete(删除)——关联的存储资产(例如 AWS EBS、GCE PD、Azure Disk 和 OpenStack Cinder 卷)将被删除
当前,只有 NFS 和 HostPath 支持回收策略。AWS EBS、GCE PD、Azure Disk 和 Cinder 卷支持删除策略
状态
- Available(可用)——一块空闲资源还没有被任何声明绑定
- Bound(已绑定)——卷已经被声明绑定
- Released(已释放)——声明被删除,但是资源还未被集群重新声明
- Failed(失败)——该卷的自动回收失败
下面试下一个nfs通过PV挂载至kubernetes中,再通过pvc挂载至pod中。
安装nfs:
yum install -y nfs-common nfs-utils rpcbind
mkdir /nfsdata
mkdir /nfsdata/nfs{1..3}
ls /nfsdata/
chmod 777 -R /nfsdata/
cat > /etc/exports <<EOF
/nfsdata/nfs1 *(rw,no_root_squash,no_all_squash,sync)
/nfsdata/nfs2 *(rw,no_root_squash,no_all_squash,sync)
/nfsdata/nfs3 *(rw,no_root_squash,no_all_squash,sync)
EOF
systemctl start rpcbind
systemctl start nfs
mkdir /test
mkdir /test/nfs{1..3}
mount -t nfs 192.168.183.100:/nfsdata/nfs1 /test/nfs1 #测试nfs1
cd /test/nfs1
echo 1>>1.html
umount /test/nfs1
rm -rf /test/nfs1
mount -t nfs 192.168.183.100:/nfsdata/nfs1 /test/nfs1 #测试nfs2
cd /test/nfs2
echo 1>>1.html
umount /test/nfs2
rm -rf /test/nfs2
mount -t nfs 192.168.183.100:/nfsdata/nfs1 /test/nfs1 #测试nfs3
cd /test/nfs3
echo 1>>1.html
umount /test/nfs3
rm -rf /test/nfs3
创建三个PV:
cat > pv.yml <<EOF
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfspv1
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs
nfs:
path: /nfsdata/nfs1
server: 192.168.183.200
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfspv2
spec:
capacity:
storage: 5Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs
nfs:
path: /nfsdata/nfs2
server: 192.168.183.200
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfspv3
spec:
capacity:
storage: 2Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs
nfs:
path: /nfsdata/nfs3
server: 192.168.183.200
EOF
kubectl apply -f pvc.yml
创建PVC:
cat > pvc.yml <<EOF
apiVersion: v1
kind: Service
metadata:
name: nginx
labels:
app: nginx
spec:
ports:
- port: 80
name: web
clusterIP: None
selector:
app: nginx
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: web
spec:
selector:
matchLabels:
app: nginx
serviceName: "nginx"
replicas: 3
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: hub.hdj.com/library/nginx:v1
ports:
- containerPort: 80
name: web
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html
volumeClaimTemplates:
- metadata:
name: www
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: "nfs"
resources:
requests:
storage: 1Gi
EOF
kubectl apply -f pvc.yml
最终效果:
[root@k8s-master01 volumes]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 5d4h
nginx ClusterIP None <none> 80/TCP 80m
[root@k8s-master01 volumes]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
nfspv1 10Gi RWO Retain Bound default/www-web-2 nfs 89m
nfspv2 5Gi RWO Retain Bound default/www-web-1 nfs 89m
nfspv3 2Gi RWO Retain Bound default/www-web-0 nfs 89m
[root@k8s-master01 volumes]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
www-web-0 Bound nfspv3 2Gi RWO nfs 77m
www-web-1 Bound nfspv2 5Gi RWO nfs 76m
www-web-2 Bound nfspv1 10Gi RWO nfs 76m
[root@k8s-master01 volumes]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
web-0 1/1 Running 0 78m 10.244.2.77 k8s-node02 <none> <none>
web-1 1/1 Running 0 77m 10.244.1.76 k8s-node01 <none> <none>
web-2 1/1 Running 0 77m 10.244.2.78 k8s-node02 <none> <none>
在nfs服务的/nfsdata中写入index.html。
echo "1111111111">>/nfsdata/nfs2/index.html
echo "22222222">>/nfsdata/nfs2/index.html
echo "33333333">>/nfsdata/nfs3/index.html
chown 777 -R /nfsdata
然后访问各个pod暴露的服务,可以得到在nfs服务器中写的内容
[root@k8s-master01 volumes]# curl 10.244.1.76
22222222
[root@k8s-master01 volumes]# curl 10.244.2.77
33333333
[root@k8s-master01 volumes]# curl 10.244.2.78
1111111111
删除一个pod web-0,再次查看pod,可以看到web-0被重建了,再访问web-0,可以得到与之前一样的内容。
[root@k8s-master01 volumes]# kubectl delete pod web-0
pod "web-0" deleted
[root@k8s-master01 volumes]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
web-0 1/1 Running 0 17s 10.244.2.79 k8s-node02 <none> <none>
web-1 1/1 Running 0 84m 10.244.1.76 k8s-node01 <none> <none>
web-2 1/1 Running 0 84m 10.244.2.78 k8s-node02 <none> <none>
[root@k8s-master01 volumes]# curl 10.244.2.79
33333333
pvc特性
- 匹配 Pod name ( 网络标识 ) 的模式为: ( s t a t e f u l s e t 名 称 ) − (statefulset名称)- (statefulset名称)−(序号),比如上面的示例:web-0,web-1,web-2
- StatefulSet 为每个 Pod 副本创建了一个 DNS 域名,这个域名的格式为: $(podname).(headless server name),也就意味着服务间是通过Pod域名来通信而非 Pod IP,因为当Pod所在Node发生故障时, Pod 会被飘移到其它 Node 上,Pod IP 会发生变化,但是 Pod 域名不会有变化
- StatefulSet 使用 Headless 服务来控制 Pod 的域名,这个域名的 FQDN 为:$(servicename).$(namespace).svc.cluster.local,其中,“cluster.local” 指的是集群的域名
- 根据 volumeClaimTemplates,为每个 Pod 创建一个 pvc,pvc 的命名规则匹配模式:(volumeClaimTemplates.name)-(pod_name),比如上面的 volumeMounts.name=www, Pod name=web-[0-2],因此创建出来的 PVC 是 www-web-0、www-web-1、www-web-2
- 删除 Pod 不会删除其 pvc,手动删除 pvc 将自动释放 pv
创建一个pod,使用$(podname).(headless server name)访问StatefulSet 的pod
cat > test-pd.yml <<EOF
apiVersion: v1
kind: Pod
metadata:
name: test-pd
spec:
containers:
- image: alpine
name: test-container1
command: [ "/bin/sh", "-c", "sleep 6000s" ]
EOF
kubectl create -f test-pd.yml
进入这个pod,ping web-1,可以看到,可以ping通。
[root@k8s-master01 volumes]# kubectl exec test-pd -it -- /bin/sh #进入这个pod
/ # ping web-1.nginx
PING web-1.nginx (10.244.1.76): 56 data bytes
64 bytes from 10.244.1.76: seq=0 ttl=64 time=0.214 ms
64 bytes from 10.244.1.76: seq=1 ttl=64 time=0.066 ms
删除web-1,查看pod,可以看到web-1已经重建了地址也变了,然后再ping web-1,发现可以ping通。
[root@k8s-master01 volumes]# kubectl delete pod web-1 #删除web-1
pod "web-1" deleted
[root@k8s-master01 volumes]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
test-pd 1/1 Running 0 8m16s 10.244.1.80 k8s-node01 <none> <none>
web-0 1/1 Running 0 88m 10.244.2.79 k8s-node02 <none> <none>
web-1 1/1 Running 0 11s 10.244.1.81 k8s-node01 <none> <none>
web-2 1/1 Running 0 172m 10.244.2.78 k8s-node02 <none> <none>
[root@k8s-master01 volumes]# kubectl exec test-pd -it -- /bin/sh
/ # ping web-1.nginx
PING web-1.nginx (10.244.1.81): 56 data bytes
64 bytes from 10.244.1.81: seq=0 ttl=64 time=0.064 ms
64 bytes from 10.244.1.81: seq=1 ttl=64 time=0.077 ms
通过coredns解析$(servicename).$(namespace).svc.cluster.local。可以得到StatefulSet 的三个地址。
[root@k8s-master01 volumes]# kubectl get pod -n kube-system -o wide|grep coredns #查看coredns地址
coredns-5c98db65d4-m84ww 1/1 Running 7 5d6h 10.244.0.15 k8s-master01 <none> <none>
coredns-5c98db65d4-mtlqz 1/1 Running 7 5d6h 10.244.0.14 k8s-master01 <none> <none>
[root@k8s-master01 volumes]# dig -t A nginx.default.svc.cluster.local. @10.244.0.14 |grep nginx #通过coredns解析 StatefulSet 的无头服务地址
; <<>> DiG 9.11.4-P2-RedHat-9.11.4-9.P2.el7 <<>> -t A nginx.default.svc.cluster.local. @10.244.0.14
;nginx.default.svc.cluster.local. IN A
nginx.default.svc.cluster.local. 21 IN A 10.244.2.79
nginx.default.svc.cluster.local. 21 IN A 10.244.2.78
nginx.default.svc.cluster.local. 21 IN A 10.244.1.81
Statefulset的启停顺序
- 有序部署:部署StatefulSet时,如果有多个Pod副本,它们会被顺序地创建(从0到N-1)并且,在下一个Pod运行之前所有之前的Pod必须都是Running和Ready状态。
- 有序删除:当Pod被删除时,它们被终止的顺序是从N-1到0。
- 有序扩展:当对Pod执行扩展操作时,与部署一样,它前面的Pod必须都处于Running和Ready状态。
StatefulSet使用场景
- 稳定的持久化存储,即Pod重新调度后还是能访问到相同的持久化数据,基于PVC 来实现。
- 稳定的网络标识符,即 Pod 重新调度后其 PodName 和 HostName 不变。
- 有序部署,有序扩展,基于 init containers 来实现。
- 有序收缩
释放pv、pvc
[root@k8s-master01 volumes]# kubectl delete -f pvc.yml
service "nginx" deleted
statefulset.apps "web" deleted
[root@k8s-master01 volumes]# kubectl get pod
No resources found.
[root@k8s-master01 volumes]# kubectl get statefulset
No resources found.
[root@k8s-master01 volumes]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 5d6h
[root@k8s-master01 volumes]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
www-web-0 Bound nfspv3 2Gi RWO nfs 3h17m
www-web-1 Bound nfspv2 5Gi RWO nfs 3h16m
www-web-2 Bound nfspv1 10Gi RWO nfs 3h16m
[root@k8s-master01 volumes]# kubectl delete pvc --all
persistentvolumeclaim "www-web-0" deleted
persistentvolumeclaim "www-web-1" deleted
persistentvolumeclaim "www-web-2" deleted
[root@k8s-master01 volumes]# kubectl get pvc
No resources found.
[root@k8s-master01 volumes]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
nfspv1 10Gi RWO Retain Released default/www-web-2 nfs 3h30m
nfspv2 5Gi RWO Retain Released default/www-web-1 nfs 3h30m
nfspv3 2Gi RWO Retain Released default/www-web-0 nfs 3h30m
[root@k8s-master01 volumes]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
nfspv1 10Gi RWO Retain Released default/www-web-2 nfs 3h30m
nfspv2 5Gi RWO Retain Released default/www-web-1 nfs 3h30m
nfspv3 2Gi RWO Retain Released default/www-web-0 nfs 3h30m
[root@k8s-master01 volumes]# kubectl get pv nfspv1 -o yaml
apiVersion: v1
kind: PersistentVolume
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"v1","kind":"PersistentVolume","metadata":{"annotations":{},"name":"nfspv1"},"spec":{"accessModes":["ReadWriteOnce"],"capacity":{"storage":"10Gi"},"nfs":{"path":"/nfsdata/nfs1","server":"192.168.183.200"},"persistentVolumeReclaimPolicy":"Retain","storageClassName":"nfs"}}
pv.kubernetes.io/bound-by-controller: "yes"
creationTimestamp: "2020-02-12T09:59:11Z"
finalizers:
- kubernetes.io/pv-protection
name: nfspv1
resourceVersion: "351310"
selfLink: /api/v1/persistentvolumes/nfspv1
uid: 0f3aa860-190a-4bfd-835e-0f7258f7f718
spec:
accessModes:
- ReadWriteOnce
capacity:
storage: 10Gi
claimRef:
apiVersion: v1
kind: PersistentVolumeClaim
name: www-web-2
namespace: default
resourceVersion: "333994"
uid: ef02c9a5-1fb4-45f8-b874-9d68e286dfdb
nfs:
path: /nfsdata/nfs1
server: 192.168.183.200
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs
volumeMode: Filesystem
status:
phase: Released
[root@k8s-master01 volumes]# kubectl edit pv nfspv1 #修改nfspv1 ,删除claimRef属性,及其子属性
persistentvolume/nfspv1 edited
[root@k8s-master01 volumes]# kubectl get pv #此时可以看到nfspv1状态为Available
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
nfspv1 10Gi RWO Retain Available nfs 3h33m
nfspv2 5Gi RWO Retain Released default/www-web-1 nfs 3h33m
nfspv3 2Gi RWO Retain Released default/www-web-0 nfs 3h33m
其余两个pv同样方式修改,删除claimRef属性及其子属性,最终效果。
[root@k8s-master01 volumes]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
nfspv1 10Gi RWO Retain Available nfs 3h39m
nfspv2 5Gi RWO Retain Available nfs 3h39m
nfspv3 2Gi RWO Retain Available nfs 3h39m