1、了解k8s亲和性和反亲和性,以及污点和容忍的概念
考虑以下场景:
广州1,深圳2两个zone,现在我们只希望pod实例部署在广州可用区;
创建一个pod,利用污点创建pod到master节点上。
一. 利用污点创建pod到master节点上
vim nginx-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
labels:
app: nginx
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: pod
image: nginx:1.7.9
ports:
- containerPort: 80
tolerations:
- key: "node-role.kubernetes.io/master"
operator: "Exists"
kubectl taint nodes node2 xx1=yy1:NoSchedule
kubectl taint nodes node3 xx1=yy1:NoSchedule
给node节点都设置污点
使用pod的方式创建(二选一)
apiVersion: v1
kind: Pod
metadata:
name: pod-3
labels:
app: pod-3
spec:
containers:
- name: nginx-pod3
image: nginx:1.7.9
ports:
- containerPort: 80
tolerations:
- key: "node-role.kubernetes.io/master"
operator: "Equal"
effect: "NoSchedule"
一样的效果
创建deployment看是不是都创建到node1 也就是master去了
容忍是根据node1的污点设置的
可以看到都创建到master节点了
删除污点
[root@node1 ~]# kubectl taint nodes node3 xx1=yy1:NoSchedule-
node/node3 untainted
[root@node1 ~]# kubectl taint nodes node2 xx1=yy1:NoSchedule-
node/node2 untainted
删除创建的pod
重新get pod -o wide查看发现在node节点上创建出了新的pod
补充:
如果将docker镜像从一台主机传输到另一台主机
[root@node1 nginx]# docker save -o nginx.tar nginx:1.7.9
[root@node1 nginx]# scp nginx.tar root@node2:/root/
[root@node2 ~]# docker load -i nginx.tar
如a服务器镜像迁移到b服务器:
a:docker save -o 自定义文件名 迁移镜像名:版本
b:scp root@a服务器ip:/自定义的文件名 /root/自定义文件名2
b:docker load -i 自定义文件名2
只希望pod实例部署在广州可用区
[root@node1 nginx]# kubectl label node node2 zone=guangzhou
node/node2 labeled
[root@node1 nginx]# kubectl get node --show-labels
NAME STATUS ROLES AGE VERSION LABELS
node1 Ready master 26d v1.19.4 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=node1,kubernetes.io/os=linux,node-role.kubernetes.io/master=
node2 Ready <none> 26d v1.19.4 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=node2,kubernetes.io/os=linux,zone=guangzhou
node3 Ready <none> 26d v1.19.4 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=node3,kubernetes.io/os=linux
vim nginx-pod.yaml
apiVersion: v1
kind: Pod
metadata:
name: affinity
labels:
app: node-affinity-pod
spec:
containers:
- name: with-node-affinity
image: nginx:1.7.9
ports:
- containerPort: 80
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: zone
operator: In
values:
- guangzhou
[root@node1 nginx]# kubectl apply -f nginx-pod.yml
[root@node1 nginx]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
affinity 1/1 Running 0 3m21s 10.244.104.39 node2 <none> <none>