1、节点选择器
-
没有亲和性的话,就会随机的调度到node节点上面去
-
可以指定调度到node节点上面去
1、nodename角度
根据nodename来调度
busybox要有一个持续的进程才能运行
apiVersion: v1
kind: Pod
metadata:
name: p1
namespace: default
labels:
app: n1
spec:
nodeName: node1
containers:
- name: tomocat1
image: docker.io/library/tomcat:8.5-jre8-alpine
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8080
- name: busybox1
image: docker.io/library/busybox:1.28
imagePullPolicy: IfNotPresent
command:
- "/bin/sh" #分配一个终端
- "-c" #命令后面执行的参数
- "sleep 3600000"
#k8s内部进行访问,浏览器访问不了
[root@master qinhexing]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
p1 2/2 Running 0 88s 10.244.166.140 node1 <none> <none>
[root@master qinhexing]# curl 10.244.166.140:8080
#Pod 的许多字段(如 nodeName、容器命令和参数、卷等)在 Pod 创建后都是不可变的。
需要强制删除pod --force --grace-period=0
内部访问
2、nodeselector角度
根据node上面标签来进行调度
#调度到node上面有disk=node1,re=1的标签,发现调度不了
[root@master qinhexing]# cat nodeselector.yaml
apiVersion: v1
kind: Pod
metadata:
name: tomcat1
namespace: dev
spec:
nodeSelector:
disk: node1
re: rrr
containers:
- name: t1
image: docker.io/library/tomcat:8.5-jre8-alpine
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8080
#发现node上面没有指定的标签,处于pending状态
#3个node不能匹配pod标签
[root@master qinhexing]# kubectl describe pod -n dev
Node-Selectors: disk=node1
re=rrr
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Warning FailedScheduling 75s default-scheduler 0/3 nodes are available: 1 node(s) had untolerated taint {node-role.kubernetes.io/control-plane: }, 3 node(s) didn't match Pod's node affinity/selector. preemption: 0/3 nodes are available: 3 Preemption is not helpful for scheduling..
#打上标签,发现Pod进行调度了
[root@master qinhexing]# kubectl label nodes node1 disk=node1 re=rrr
node/node1 labeled
[root@master qinhexing]# kubectl get pod -n dev
NAME READY STATUS RESTARTS AGE
tomcat1 1/1 Running 0 4m5s
#pod调度到node1上面了
Normal Scheduled 50s default-scheduler Successfully assigned dev/tomcat1 to node1
2、node亲和性
#硬亲和性,node必须符合才行
matchexpression:node标签
[root@master qinhexing]# cat nodeaff-re.yaml
apiVersion: v1
kind: Pod
metadata:
name: nodeaff-r
namespace: dev
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions: #匹配的是node上面的标签
- key: a #a=q1的node节点上面去
operator: "In"
values:
- q1
containers:
- name: nodeaff-r
image: docker.io/library/nginx:1.9.1
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
#软亲和性,尽量满足,没有的话,找其他的节点
[root@master qinhexing]# cat nodeaff-per.yaml
apiVersion: v1
kind: Pod
metadata:
name: nodeaff-pre
namespace: dev
spec:
containers:
- name: nodeaff-pre
image: docker.io/library/nginx
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 80 #权重占80
preference:
matchExpressions:
- key: a
operator: "In"
values:
- q1
3、pod亲和性
-
根据pod与pod之间的关系, 有相同特征的话,就调度到同一个节点上面,访问的话,速度就更加的快一点
-
互为依赖数据库和web服务器,一起工作
-
根据已有的pod上面的标签,来进行调度
#硬亲和性
[root@master qinhexing]# cat podaff-re.yaml
apiVersion: v1
kind: Pod
metadata:
name: pod-first
namespace: dev
labels:
a: qq1
spec:
containers:
- name: pod-first
image: docker.io/library/nginx
imagePullPolicy: IfNotPresent
---
apiVersion: v1
kind: Pod
metadata:
name: podaff-second
namespace: dev
spec:
containers:
- name: podaff-re
image: docker.io/ikubernetes/myapp:v1
imagePullPolicy: IfNotPresent
affinity:
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: kubernetes.io/hostname #根据主机名来判断2个pod是否在同一个节点上面,也可以是自己的标签
labelSelector: #标签选择器
matchExpressions:
- key: a
operator: "In"
values:
- qq1
#软亲和性
4、pod反亲和性
- 2个pod都占用很大的资源,使用反亲和性,就不会调用在同一个节点上面了
#to是一样的话,那么这个pod就不能进行调度了,因为判断的标签都是一样的则主机就是一样的,那么就不行
[root@master qinhexing]# cat podanti.yaml
apiVersion: v1
kind: Pod
metadata:
name: pod1-anti
namespace: dev
spec:
containers:
- name: pod1-anti
image: docker.io/library/nginx
imagePullPolicy: IfNotPresent
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: kubernetes.io/hostname
labelSelector:
matchExpressions:
- key: a
operator: "In"
values:
- qq1
#需要注意的就是topologyKey,一样的话,那么就无法进行调度了,因为判断的节点都是一样的
5、污点和容忍度
- 污点就是节点上面的,是否运行Pod存在
1、污点等级
-
NoExecute 已经存在和创建的都移走
-
NoSchedule 创建的移走,已经存在的不影响
-
PreferNoSchedule 最后调度到这个节点上面
[root@master /]# kubectl explain node.spec.taints
#手动打上一个污点
[root@master qinhexing]# kubectl get pod -n dev -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod-first 1/1 Running 0 25m 10.244.104.17 node2 <none> <none>
pod1-anti 1/1 Running 0 11m 10.244.166.145 node1 <none> <none>
podaff-second 1/1 Running 0 25m 10.244.104.18 node2 <none> <none>
[root@master qinhexing]# kubectl taint node node2 a=b:NoExecute
node/node2 tainted
[root@master qinhexing]# kubectl get pod -n dev
NAME READY STATUS RESTARTS AGE
pod1-anti 1/1 Running 0 11m
2、容忍度
#查看node2上的污点
[root@master qinhexing]# kubectl describe node node2 | grep -i taint
Taints: a=b:NoExecute
#删除这个污点
[root@master qinhexing]# kubectl taint node node2 a:NoExecute-
node/node2 untainted
[root@master qinhexing]# kubectl describe node node2 | grep -i taint
Taints: <none>
#容忍这个污点
equal等于的关系
exist就是key满足关系即可
[root@master qinhexing]# cat nodename.yaml
apiVersion: v1
kind: Pod
metadata:
name: p1
namespace: dev
labels:
app: n1
spec:
#nodeName: node2
containers:
- name: tomocat1
image: docker.io/library/tomcat:8.5-jre8-alpine
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8080
- name: busybox1
image: docker.io/library/busybox:1.28
imagePullPolicy: IfNotPresent
command:
- "/bin/sh"
- "-c"
- "sleep 3600000"
tolerations: #容忍度
- key: a
operator: "Equal" #如果是Exists,value值为空即可
value: b