[root@master ~]# kubectl get pods -o wide -n kube-system
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
coredns-6955765f44-4qv5c 1/1 Running 0 20h 10.244.2.2 node2 <none> <none>
coredns-6955765f44-bwlps 1/1 Running 0 20h 10.244.2.3 node2 <none> <none>
etcd-master 1/1 Running 1 20h 192.168.70.110 master <none> <none>
kube-apiserver-master 1/1 Running 1 20h 192.168.70.110 master <none> <none>
kube-controller-manager-master 1/1 Running 1 20h 192.168.70.110 master <none> <none>
kube-flannel-ds-amd64-2csdd 1/1 Running 0 9m38s 192.168.70.112 node2 <none> <none>
kube-flannel-ds-amd64-4729t 0/1 Terminating 0 19h 192.168.70.110 master <none> <none>
kube-flannel-ds-amd64-vbbqn 1/1 Running 0 9m37s 192.168.70.111 node1 <none> <none>
kube-proxy-s2tp2 1/1 Running 1 20h 192.168.70.112 node2 <none> <none>
kube-proxy-xcp8q 1/1 Running 1 20h 192.168.70.111 node1 <none> <none>
kube-proxy-zngnw 1/1 Running 1 20h 192.168.70.110 master <none> <none>
kube-scheduler-master 1/1 Running 1 20h 192.168.70.110 master <none> <none>
[root@master ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master NotReady master 20h v1.17.4
node1 Ready <none> 20h v1.17.4
node2 Ready <none> 20h v1.17.4
删除terminating的pod
[root@master ~]# kubectl delete pod/kube-flannel-ds-amd64-4729t -n kube-system --grace-period=0 --force
warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.
pod "kube-flannel-ds-amd64-4729t" force deleted
再次查看pods状态,发现状态变为pending,后缀也改变,IP也变成none
[root@master ~]# kubectl get pods -n kube-system -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
coredns-6955765f44-4qv5c 1/1 Running 0 20h 10.244.2.2 node2 <none> <none>
coredns-6955765f44-bwlps 1/1 Running 0 20h 10.244.2.3 node2 <none> <none>
etcd-master 1/1 Running 1 20h 192.168.70.110 master <none> <none>
kube-apiserver-master 1/1 Running 1 20h 192.168.70.110 master <none> <none>
kube-controller-manager-master 1/1 Running 1 20h 192.168.70.110 master <none> <none>
kube-flannel-ds-amd64-2csdd 1/1 Running 0 34m 192.168.70.112 node2 <none> <none>
kube-flannel-ds-amd64-rzx7z 0/1 Pending 0 4m54s <none> master <none> <none>
kube-flannel-ds-amd64-vbbqn 1/1 Running 0 34m 192.168.70.111 node1 <none> <none>
kube-proxy-s2tp2 1/1 Running 1 20h 192.168.70.112 node2 <none> <none>
kube-proxy-xcp8q 1/1 Running 1 20h 192.168.70.111 node1 <none> <none>
kube-proxy-zngnw 1/1 Running 1 20h 192.168.70.110 master <none> <none>
kube-scheduler-master 1/1 Running 1 20h 192.168.70.110 master <none> <none>
于是我重新执行了一下kube-flannel.yml
[root@master ~]# kubectl apply -f kube-flannel.yml
还是不行,然后查看日志
[root@master ~]# journalctl -xeu kubelet > a
看到了这样一句话:error failed to decode: yaml: line 35: could not find expected ':'
8月 04 13:55:46 master kubelet[12245]: F0804 13:55:46.266838 12245 server.go:198] failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to decode: yaml: line 35: could not find expected ':'
于是去查看config.yaml文件:
[root@master ~]# vi /var/lib/kubelet/config.yaml
#/var/lib/kubelet/config.yaml
1 apiVersion: kubelet.config.k8s.io/v1beta1
2 authentication:
3 anonymous:
4 enabled: false
5 webhook:
6 cacheTTL: 0s
7 enabled: true
8 x509:
9 clientCAFile: /etc/kubernetes/pki/ca.crt
10 authorization:
11 mode: Webhook
12 webhook:
13 cacheAuthorizedTTL: 0s
14 cacheUnauthorizedTTL: 0s
15 clusterDNS:
16 - 10.96.0.10
17 clusterDomain: cluster.local
18 cpuManagerReconcilePeriod: 0s
19 evictionPressureTransitionPeriod: 0s
20 fileCheckFrequency: 0s
21 healthzBindAddress: 127.0.0.1
22 healthzPort: 10248
23 httpCheckFrequency: 0s
24 imageMinimumGCAge: 0s
25 kind: KubeletConfiguration
26 nodeStatusReportFrequency: 0s
27 nodeStatusUpdateFrequency: 0s
28 rotateCertificates: true
29 runtimeRequestTimeout: 0s
30 staticPodPath: /etc/kubernetes/manifests
31 streamingConnectionIdleTimeout: 0s
32 syncFrequency: 0s
33 volumeStatsAggPeriod: 0s
34 cgroupDriver: systemd
找到了问题: yaml格式错误...
yaml语言格式要求在 : 后面需要加一个空格
也就是在cgroupDriver: 和 systemd中间加一个空格
然后重启kubelet
[root@master ~]# systemctl restart kubelet
再次查看发现问题解决!
[root@master ~]# kubectl get pods -n kube-system -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
coredns-6955765f44-4qv5c 1/1 Running 0 21h 10.244.2.2 node2 <none> <none>
coredns-6955765f44-bwlps 1/1 Running 0 21h 10.244.2.3 node2 <none> <none>
etcd-master 1/1 Running 1 21h 192.168.70.110 master <none> <none>
kube-apiserver-master 1/1 Running 1 21h 192.168.70.110 master <none> <none>
kube-controller-manager-master 1/1 Running 1 21h 192.168.70.110 master <none> <none>
kube-flannel-ds-amd64-2csdd 1/1 Running 0 82m 192.168.70.112 node2 <none> <none>
kube-flannel-ds-amd64-rzx7z 1/1 Running 0 53m 192.168.70.110 master <none> <none>
kube-flannel-ds-amd64-vbbqn 1/1 Running 0 82m 192.168.70.111 node1 <none> <none>
kube-proxy-s2tp2 1/1 Running 1 21h 192.168.70.112 node2 <none> <none>
kube-proxy-xcp8q 1/1 Running 1 21h 192.168.70.111 node1 <none> <none>
kube-proxy-zngnw 1/1 Running 1 21h 192.168.70.110 master <none> <none>
kube-scheduler-master 1/1 Running 1 21h 192.168.70.110 master <none> <none>
[root@master ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master Ready master 21h v1.17.4
node1 Ready <none> 21h v1.17.4
node2 Ready <none> 21h v1.17.4
撒花*★,°*:.☆( ̄▽ ̄)/$:*.°★* 。