这里写目录标题
一、pod相关(驱逐、强制删除)
1、kubectl drain可以让node在维护期间排除节点。drain本意排水,意思是将出问题的node下的pod转移到其它node下运行。
kubectl drain nodename --delete-local-data --ignore-daemonsets --force
2、将node置为SchedulingDisabled不可调度状态
kubectl cordon nodename
将node置为SchedulingDisabled不可调度状态,后续的新创建pod容器时scheduler调度不会考虑该node,旧的pod容器不会受影响,仍可以对外提供正常服务。(特殊情况:pod容器如果跟node绑定的话,容器下次更新就不会回到原宿主机,该情况如何处理呢?可能设置成不可调度状态就不太合适。调度器 预调度策略)
恢复调度
kubectl uncordon nodename
3、Terminating可使用kubectl中的强制删除命令
# 删除POD
kubectl delete pod PODNAME --force --grace-period=0
二、让Master也能当作Node使用
#将 Master 也当作 Node 使用
kubectl taint node nodename node-role.kubernetes.io/master-
[root@app01 home]# kubectl taint node app01 node-role.kubernetes.io/master-
node/app01 untainted
[root@app01 home]#
#将 Master 恢复成 Master Only 状态
kubectl taint node nodename node-role.kubernetes.io/master="":NoSchedule
[root@app01 rabbitmq]# kubectl taint node app01 node-role.kubernetes.io/master="":NoSchedule
node/app01 tainted
[root@app01 rabbitmq]#
三、修改nodeport端口范围(nodePort 端口默认范围为:30000-32767)
- 编辑 kube-apiserver.yaml文件
vim /etc/kubernetes/manifests/kube-apiserver.yaml
- 找到 --service-cluster-ip-range 这一行,在这一行的下一行增加 如下内容
- --service-node-port-range=30000-50000
示例如下:
kind: Pod
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
creationTimestamp: null
labels:
component: kube-apiserver
tier: control-plane
name: kube-apiserver
namespace: kube-system
spec:
containers:
2. command:
- kube-apiserver
- --authorization-mode=Node,RBAC
- --advertise-address=192.168.180.37
- --allow-privileged=true
- --client-ca-file=/etc/kubernetes/pki/ca.crt
- --disable-admission-plugins=PersistentVolumeLabel
- --enable-admission-plugins=NodeRestriction
- --enable-bootstrap-token-auth=true
- --etcd-cafile=/etc/kubernetes/pki/etcd/ca.crt
- --etcd-certfile=/etc/kubernetes/pki/apiserver-etcd-client.crt
- --etcd-keyfile=/etc/kubernetes/pki/apiserver-etcd-client.key
- --etcd-servers=https://127.0.0.1:2379
- --insecure-port=0
- --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt
- --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt
- --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key
- --requestheader-allowed-names=front-proxy-client
- --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt
- --requestheader-extra-headers-prefix=X-Remote-Extra-
- --requestheader-group-headers=X-Remote-Group
- --requestheader-username-headers=X-Remote-User
- --secure-port=6443
- --service-account-key-file=/etc/kubernetes/pki/sa.pub
- --service-cluster-ip-range=10.96.0.0/12
- --service-node-port-range=30000-50000
- --tls-cert-file=/etc/kubernetes/pki/apiserver.crt
- --tls-private-key-file=/etc/kubernetes/pki/apiserver.key
- 最后 重启 kubelet
systemctl daemon-reload
systemctl restart kubelet
四、修改k8sDNS
1、查看coredns配置
[root@dev15 ~]# kubectl get configmap coredns -n kube-system -o yaml
apiVersion: v1
data:
Corefile: |
.:53 {
errors
health
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
upstream
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
proxy . /etc/resolv.conf
cache 30
reload
}
kind: ConfigMap
metadata:
creationTimestamp: "2019-10-21T05:57:32Z"
name: coredns
namespace: kube-system
resourceVersion: "77720660"
selfLink: /api/v1/namespaces/kube-system/configmaps/coredns
uid: ac49f58c-f3c7-11e9-a4a8-005056974fea
[root@dev15 ~]#
2、增加DNS配置
hosts {
192.168.168.168 sft-ap.pfizer.com sft-am.pfizer.com
fallthrough
}
[root@dev15 ~]# kubectl edit configmap coredns -n kube-system
apiVersion: v1
data:
Corefile: |
.:53 {
errors
health
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
upstream
fallthrough in-addr.arpa ip6.arpa
}
hosts {
192.168.168.168 sft-ap.pfizer.com sft-am.pfizer.com
fallthrough
}
prometheus :9153
proxy . /etc/resolv.conf
cache 30
reload
}
kind: ConfigMap
metadata:
creationTimestamp: "2019-10-21T05:57:32Z"
name: coredns
namespace: kube-system
resourceVersion: "77720660"
selfLink: /api/v1/namespaces/kube-system/configmaps/coredns
uid: ac49f58c-f3c7-11e9-a4a8-005056974fea
[root@dev15 ~]#
五、运行nslookup容器
kubectl run -it --image busybox:1.28.4 dns-test --restart=Never --rm sh
六、查看configmap文件
[root@dev15 kubeadm_v1.11.0]# kubectl get cm --all-namespaces
NAMESPACE NAME DATA AGE
filebeat-namespace filebeat-config 1 394d
filebeat-namespace filebeat-inputs 1 394d
kube-public cluster-info 4 626d
kube-system coredns 1 626d
kube-system extension-apiserver-authentication 6 626d
kube-system kube-flannel-cfg 2 395d
kube-system kube-proxy 2 626d
kube-system kubeadm-config 2 626d
kube-system kubelet-config-1.11 1 626d
kube-system kubelet-config-1.12 1 428d
kube-system kubelet-config-1.13 1 428d
kube-system kubernetes-dashboard-settings 1 626d
kube-system prometheus-config 1 220d
[root@dev15 net.d]# kubectl get configmap coredns -n kube-system -o yaml
apiVersion: v1
data:
Corefile: |
.:53 {
errors
health
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
upstream
fallthrough in-addr.arpa ip6.arpa
}
hosts {
192.168.168.12 sft-ap.pfizer.com sft-am.pfizer.com
fallthrough
}
prometheus :9153
proxy . /etc/resolv.conf
cache 30
reload
}
kind: ConfigMap
metadata:
creationTimestamp: "2019-10-21T05:57:32Z"
name: coredns
namespace: kube-system
resourceVersion: "95879867"
selfLink: /api/v1/namespaces/kube-system/configmaps/coredns
uid: ac49f58c-f3c7-11e9-a4a8-005056974fea
[root@dev15 net.d]#