1.概述
污点就是给k8s集群中的节点设置的污点,通过设置污点,来规划资源创建时所在的节点
污点的类型:
PreferNoshedule:节点设置这个污点类型后表示该节点接受调度,但是会降低调度的概率
NoShedule:表示该节点不接受新的调度,以前有的资源,可以继续存在
NoExecute:表示不接受新的调度,同时会驱逐以前的资源调度
2.污点的管理
·查看污点
[root@master job]# kubectl describe nodes |grep -i taint
Taints: node-role.kubernetes.io/master:NoSchedule
Taints: <none>
Taints: <none>
查看前两行
[root@master job]# kubectl describe nodes |grep -i taint -A 2
Taints: node-role.kubernetes.io/master:NoSchedule
Unschedulable: false
Lease:
--
Taints: <none>
Unschedulable: false
Lease:
--
Taints: <none>
Unschedulable: false
Lease:
·创建污点
1.创建前查看pod详情
[root@master job]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
dm002-7b46896f67-7q5vd 1/1 Running 0 14s 10.100.2.158 worker2 <none> <none>
dm002-7b46896f67-ckq86 1/1 Running 0 14s 10.100.2.155 worker2 <none> <none>
dm002-7b46896f67-nspxv 1/1 Running 0 14s 10.100.2.156 worker2 <none> <none>
dm002-7b46896f67-r92tj 1/1 Running 0 14s 10.100.2.154 worker2 <none> <none>
dm002-7b46896f67-wmggx 1/1 Running 0 13s 10.100.2.157 worker2 <none> <none>
2.创建污点
创建污点有两种方式:
第一种:key=value:污点类型
第二种:key:污点类型
3.创建污点
[root@master job]# kubectl taint node worker2 k8s=demo:NoExecute
node/worker2 tainted
4.创建污点后,查看pod
[root@master job]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
dm002-7b46896f67-2d64t 0/1 Pending 0 3s <none> <none> <none> <none>
dm002-7b46896f67-2tczc 0/1 Pending 0 3s <none> <none> <none> <none>
dm002-7b46896f67-6lbmn 0/1 Pending 0 4s <none> <none> <none> <none>
dm002-7b46896f67-8xt9t 0/1 Pending 0 4s <none> <none> <none> <none>
dm002-7b46896f67-w8mjs 0/1 Pending 0 3s <none> <none> <none> <none>
·删除污点
[root@master job]# kubectl taint node worker2 k8s=demo:NoExecute-
node/worker2 untainted
[root@master job]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
dm002-7b46896f67-2s8vc 1/1 Running 0 30s 10.100.2.177 worker2 <none> <none>
dm002-7b46896f67-phzcb 1/1 Running 0 30s 10.100.2.178 worker2 <none> <none>
dm002-7b46896f67-q8z6h 1/1 Running 0 30s 10.100.2.174 worker2 <none> <none>
dm002-7b46896f67-x5dzv 1/1 Running 0 30s 10.100.2.175 worker2 <none> <none>
dm002-7b46896f67-xwvg5 1/1 Running 0 30s 10.100.2.176 worker2 <none> <none>
·修改污点
[root@master job]# kubectl taint node worker2 k8s=test:NoExecute
node/worker2 tainted
[root@master job]# kubectl taint node worker2 k8s=demo:NoSchedule --overwrite
node/worker2 modified
3.污点容忍tolerations
通过给k8s集群节点设置不同类型的污点,来控制资源创建的节点范围
污点容忍就是即便节点设置了污点,资源也可以创建在这个节点上;
即在节点上设置了污点之后,在创建资源时,可以无视这个节点上的污点规则,也就可以在有污点的节点上创建资源
假设一个节点上有两个污点,但是你创建pod的时候,还想要在这个节点上创建,那么你就需要再资源清单中,写入容忍这两个污点,才会创建成功
·示例
利用之前给k8s233创建两个污点
1.编辑一个正常的资源清单
[root@master job]# cat deploy.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: test001
spec:
replicas: 5
selector:
matchLabels:
k8s: k8s
template:
metadata:
labels:
k8s: k8s
spec:
containers:
- name: test-c
image: nginx:1.20.1-alpine
ports:
- name: port
containerPort: 80
创建查看资源,由于设置了污点,所以节点都创建在了worker1节点上
[root@master job]# kubectl apply -f deploy.yaml
deployment.apps/test001 created
[root@master job]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
test001-f8998c97c-5hwkr 1/1 Running 0 62s 10.100.1.227 worker1 <none> <none>
test001-f8998c97c-hspvt 1/1 Running 0 62s 10.100.1.229 worker1 <none> <none>
test001-f8998c97c-mrxvg 1/1 Running 0 62s 10.100.1.226 worker1 <none> <none>
test001-f8998c97c-q7lhx 1/1 Running 0 62s 10.100.1.225 worker1 <none> <none>
test001-f8998c97c-v85ws 1/1 Running 0 62s 10.100.1.228 worker1 <none> <none>
2. 修改资源清单,设置污点容忍
[root@master job]# cat deploy.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: test001
spec:
replicas: 5
selector:
matchLabels:
k8s: k8s
template:
metadata:
labels:
k8s: k8s
spec:
#设置污点容忍
tolerations:
#指定污点的key
- key: node-role.kubernets.io/master
#指定污点的类型
effect: NoSchedule
#设置key和value的关系
#--Exists:key匹配所有value
#--Equsl:key和value必须全部匹配到
operator: Exists
- key: k8s
value: test
effect: NoExecute
operator: Equal
- key: k8s
value: demo
effect: NoSchedule
operator: Equal
containers:
- name: test-c
image: nginx:1.20.1-alpine
ports:
- name: port
containerPort: 80
重新创建资源
[root@master job]# kubectl apply -f deploy.yaml
deployment.apps/test001 configured
查看pod详情,发现worker1和worker2节点上都有
[root@master job]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
test001-7955599844-2lt2x 1/1 Running 0 30s 10.100.2.187 worker2 <none> <none>
test001-7955599844-b56p8 1/1 Running 0 30s 10.100.1.250 worker1 <none> <none>
test001-7955599844-j47bv 1/1 Running 0 30s 10.100.2.188 worker2 <none> <none>
test001-7955599844-ncrqf 1/1 Running 0 30s 10.100.1.249 worker1 <none> <none>
test001-7955599844-zbpmn 1/1 Running 0 30s 10.100.2.189 worker2 <none> <none>
3.修改资源清单,无视所有污点
[root@master job]# cat deploy.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: test001
spec:
replicas: 5
selector:
matchLabels:
k8s: k8s
template:
metadata:
labels:
k8s: k8s
spec:
tolerations:
#不写污点的key的相关属性表示匹配所有key
- operator: Exists
containers:
- name: test-c
image: nginx:1.20.1-alpine
ports:
- name: port
containerPort: 80
重新创建资源
[root@master job]# kubectl apply -f deploy.yaml
deployment.apps/test001 configured
查看pod详情
[root@master job]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
test001-6588d66c78-kmzj8 1/1 Running 0 14s 10.100.2.195 worker2 <none> <none>
test001-6588d66c78-rz9rl 1/1 Running 0 14s 10.100.1.10 worker1 <none> <none>
test001-6588d66c78-t4t5f 1/1 Running 0 14s 10.100.0.7 master <none> <none>
test001-6588d66c78-v9pvp 1/1 Running 0 14s 10.100.2.196 worker2 <none> <none>
test001-6588d66c78-xssvd 1/1 Running 0 15s 10.100.1.8 worker1 <none> <none>