1.calico 网络失败
kubectl get pod --all-namespaces -o wide
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
kube-system calico-kube-controllers-554647c955-8sxdh 0/1 Pending 0 4m <none> <none> <none> <none>
kube-system calico-node-6mqkf 0/1 Init:0/2 0 4m 192.168.1.15 k8s4 <none> <none>
kube-system calico-node-7bhwx 0/1 Init:0/2 0 4m 192.168.1.17 k8s6 <none> <none>
kube-system calico-node-llwt6 0/1 Init:0/2 0 4m 192.168.1.12 k8s1 <none> <none>
kube-system calico-node-mcb4c 0/1 Init:0/2 0 4m 192.168.1.16 k8s5 <none> <none>
kube-system calico-node-mkdwr 0/1 Init:0/2 0 4m 192.168.1.14 k8s3 <none> <none>
kube-system calico-node-xn2jz 0/1 Init:0/2 0 4m 192.168.1.13 k8s2 <none> <none>
kube-system calico-typha-6454f6cfd7-j7729 0/1 Pending 0 4m <none> <none> <none> <none>
[root@K8S1 work]# kubectl get pod -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system calico-kube-controllers-554647c955-p7bjc 0/1 Pending 0 4m36s
kube-system calico-node-j7825 0/1 Init:0/2 0 4m36s
kube-system calico-node-ktmf5 0/1 Init:0/2 0 4m36s
kube-system calico-node-nf5wg 0/1 Init:0/2 0 4m36s
kube-system calico-node-rwpfq 0/1 Init:0/2 0 4m36s
kube-system calico-node-scm7s 0/1 Init:0/2 0 4m36s
kube-system calico-node-tpqbl 0/1 Init:0/2 0 4m36s
kube-system calico-typha-6454f6cfd7-z5bvk 0/1 Pending 0 4m36s
2.查看状态
[root@K8S1 work]# kubectl describe pods calico-node-rwpfq --namespace kube-system
Name: calico-node-rwpfq
Namespace: kube-system
Priority: 2000001000
Priority Class Name: system-node-critical
Node: k8s3/192.168.1.14
Start Time: Sat, 20 Jul 2024 21:58:45 +0800
Labels: controller-revision-hash=6cdbc66bf4
k8s-app=calico-node
pod-template-generation=1
Annotations: <none>
Status: Pending
IP: 192.168.1.14
IPs:
IP: 192.168.1.14
Controlled By: DaemonSet/calico-node
Init Containers:
upgrade-ipam:
Container ID:
Image: 192.168.1.11:443/myharbor/cni:v3.22.0
Image ID:
Port: <none>
Host Port: <none>
Command:
/opt/cni/bin/calico-ipam
-upgrade
State: Waiting
Reason: PodInitializing
Ready: False
Restart Count: 0
Environment Variables from:
kubernetes-services-endpoint ConfigMap Optional: true
Environment:
KUBERNETES_NODE_NAME: (v1:spec.nodeName)
CALICO_NETWORKING_BACKEND: <set to the key 'calico_backend' of config map 'calico-config'> Optional: false
Mounts:
/host/opt/cni/bin from cni-bin-dir (rw)
/var/lib/cni/networks from host-local-net-dir (rw)
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-54hkd (ro)
install-cni:
Container ID:
Image: 192.168.1.11:443/myharbor/cni:v3.22.0
Image ID:
Port: <none>
Host Port: <none>
Command:
/opt/cni/bin/install
State: Waiting
Reason: PodInitializing
Ready: False
Restart Count: 0
Environment Variables from:
kubernetes-services-endpoint ConfigMap Optional: true
Environment:
CNI_CONF_NAME: 10-calico.conflist
CNI_NETWORK_CONFIG: <set to the key 'cni_network_config' of config map 'calico-config'> Optional: false
KUBERNETES_NODE_NAME: (v1:spec.nodeName)
CNI_MTU: <set to the key 'veth_mtu' of config map 'calico-config'> Optional: false
SLEEP: false
Mounts:
/host/etc/cni/net.d from cni-net-dir (rw)
/host/opt/cni/bin from cni-bin-dir (rw)
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-54hkd (ro)
Containers:
calico-node:
Container ID:
Image: 192.168.1.11:443/myharbor/node:v3.22.0
Image ID:
Port: <none>
Host Port: <none>
State: Waiting
Reason: PodInitializing
Ready: False
Restart Count: 0
Requests:
cpu: 250m
Liveness: exec [/bin/calico-node -felix-live -bird-live] delay=10s timeout=10s period=10s #success=1 #failure=6
Readiness: exec [/bin/calico-node -felix-ready -bird-ready] delay=0s timeout=10s period=10s #success=1 #failure=3
Environment Variables from:
kubernetes-services-endpoint ConfigMap Optional: true
Environment:
DATASTORE_TYPE: kubernetes
FELIX_TYPHAK8SSERVICENAME: <set to the key 'typha_service_name' of config map 'calico-config'> Optional: false
WAIT_FOR_DATASTORE: true
NODENAME: (v1:spec.nodeName)
CALICO_NETWORKING_BACKEND: <set to the key 'calico_backend' of config map 'calico-config'> Optional: false
CLUSTER_TYPE: k8s,bgp
IP_AUTODETECTION_METHOD: interface=ens.*
IP: autodetect
CALICO_IPV4POOL_IPIP: Always
CALICO_IPV4POOL_VXLAN: Never
CALICO_IPV6POOL_VXLAN: Never
FELIX_IPINIPMTU: <set to the key 'veth_mtu' of config map 'calico-config'> Optional: false
FELIX_VXLANMTU: <set to the key 'veth_mtu' of config map 'calico-config'> Optional: false
FELIX_WIREGUARDMTU: <set to the key 'veth_mtu' of config map 'calico-config'> Optional: false
CALICO_IPV4POOL_CIDR: 172.16.0.0/16
CALICO_DISABLE_FILE_LOGGING: true
FELIX_DEFAULTENDPOINTTOHOSTACTION: ACCEPT
FELIX_IPV6SUPPORT: false
FELIX_HEALTHENABLED: true
Mounts:
/host/etc/cni/net.d from cni-net-dir (rw)
/lib/modules from lib-modules (ro)
/run/xtables.lock from xtables-lock (rw)
/sys/fs/ from sysfs (rw)
/var/lib/calico from var-lib-calico (rw)
/var/log/calico/cni from cni-log-dir (ro)
/var/run/calico from var-run-calico (rw)
/var/run/nodeagent from policysync (rw)
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-54hkd (ro)
Conditions:
Type Status
Initialized False
Ready False
ContainersReady False
PodScheduled True
Volumes:
lib-modules:
Type: HostPath (bare host directory volume)
Path: /lib/modules
HostPathType:
var-run-calico:
Type: HostPath (bare host directory volume)
Path: /var/run/calico
HostPathType:
var-lib-calico:
Type: HostPath (bare host directory volume)
Path: /var/lib/calico
HostPathType:
xtables-lock:
Type: HostPath (bare host directory volume)
Path: /run/xtables.lock
HostPathType: FileOrCreate
sysfs:
Type: HostPath (bare host directory volume)
Path: /sys/fs/
HostPathType: DirectoryOrCreate
cni-bin-dir:
Type: HostPath (bare host directory volume)
Path: /opt/cni/bin
HostPathType:
cni-net-dir:
Type: HostPath (bare host directory volume)
Path: /etc/cni/net.d
HostPathType:
cni-log-dir:
Type: HostPath (bare host directory volume)
Path: /var/log/calico/cni
HostPathType:
host-local-net-dir:
Type: HostPath (bare host directory volume)
Path: /var/lib/cni/networks
HostPathType:
policysync:
Type: HostPath (bare host directory volume)
Path: /var/run/nodeagent
HostPathType: DirectoryOrCreate
kube-api-access-54hkd:
Type: Projected (a volume that contains injected data from multiple sources)
TokenExpirationSeconds: 3607
ConfigMapName: kube-root-ca.crt
ConfigMapOptional: <nil>
DownwardAPI: true
QoS Class: Burstable
Node-Selectors: kubernetes.io/os=linux
Tolerations: :NoSchedule op=Exists
:NoExecute op=Exists
CriticalAddonsOnly op=Exists
node.kubernetes.io/disk-pressure:NoSchedule op=Exists
node.kubernetes.io/memory-pressure:NoSchedule op=Exists
node.kubernetes.io/network-unavailable:NoSchedule op=Exists
node.kubernetes.io/not-ready:NoExecute op=Exists
node.kubernetes.io/pid-pressure:NoSchedule op=Exists
node.kubernetes.io/unreachable:NoExecute op=Exists
node.kubernetes.io/unschedulable:NoSchedule op=Exists
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 6m22s default-scheduler Successfully assigned kube-system/calico-node-rwpfq to k8s3
Warning FailedCreatePodSandBox 62s (x26 over 6m21s) kubelet Failed to create pod sandbox: rpc error: code = NotFound desc = failed to get sandbox image "192.168.1.11:443/myharbor/pause:3.6": failed to pull image "192.168.1.11:443/myharbor/pause:3.6": failed to pull and unpack image "192.168.1.11:443/myharbor/pause:3.6": failed to resolve reference "192.168.1.11:443/myharbor/pause:3.6": 192.168.1.11:443/myharbor/pause:3.6: not found
--找到原因:无法获取镜像。
failed to get sandbox image "192.168.1.11:443/myharbor/pause:3.6"
3. 检查镜像
[root@K8S1 soft]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
192.168.1.11:443/myharbor/typha v3.22.0 35c2fa1ee0a1 2 years ago 127MB
192.168.1.11:443/myharbor/kube-controllers v3.22.0 df76d42861ee 2 years ago 132MB
192.168.1.11:443/myharbor/cni v3.22.0 f86797de8afd 2 years ago 236MB
192.168.1.11:443/myharbor/pod2daemon-flexvol v3.22.0 59daef946c8c 2 years ago 21.4MB
192.168.1.11:443/myharbor/node v3.22.0 f109b1742d34 2 years ago 213MB
--果然没有pause镜像 ,导入镜像。
docker load < docker-pause_3.6.tar
...................
[root@K8S1 soft]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
192.168.1.11:443/myharbor/typha v3.22.0 35c2fa1ee0a1 2 years ago 127MB
192.168.1.11:443/myharbor/kube-controllers v3.22.0 df76d42861ee 2 years ago 132MB
192.168.1.11:443/myharbor/cni v3.22.0 f86797de8afd 2 years ago 236MB
192.168.1.11:443/myharbor/pod2daemon-flexvol v3.22.0 59daef946c8c 2 years ago 21.4MB
192.168.1.11:443/myharbor/node v3.22.0 f109b1742d34 2 years ago 213MB
192.168.1.11:443/myharbor/pause 3.6 6270bb605e12 2 years ago 683kB
4.重建calico网络
[root@K8S1 work]# kubectl delete -f /data/k8s/work/calico.yaml
configmap "calico-config" deleted
customresourcedefinition.apiextensions.k8s.io "bgpconfigurations.crd.projectcalico.org" deleted
customresourcedefinition.apiextensions.k8s.io "bgppeers.crd.projectcalico.org" deleted
customresourcedefinition.apiextensions.k8s.io "blockaffinities.crd.projectcalico.org" deleted
customresourcedefinition.apiextensions.k8s.io "caliconodestatuses.crd.projectcalico.org" deleted
customresourcedefinition.apiextensions.k8s.io "clusterinformations.crd.projectcalico.org" deleted
customresourcedefinition.apiextensions.k8s.io "felixconfigurations.crd.projectcalico.org" deleted
customresourcedefinition.apiextensions.k8s.io "globalnetworkpolicies.crd.projectcalico.org" deleted
customresourcedefinition.apiextensions.k8s.io "globalnetworksets.crd.projectcalico.org" deleted
customresourcedefinition.apiextensions.k8s.io "hostendpoints.crd.projectcalico.org" deleted
customresourcedefinition.apiextensions.k8s.io "ipamblocks.crd.projectcalico.org" deleted
customresourcedefinition.apiextensions.k8s.io "ipamconfigs.crd.projectcalico.org" deleted
customresourcedefinition.apiextensions.k8s.io "ipamhandles.crd.projectcalico.org" deleted
customresourcedefinition.apiextensions.k8s.io "ippools.crd.projectcalico.org" deleted
customresourcedefinition.apiextensions.k8s.io "ipreservations.crd.projectcalico.org" deleted
customresourcedefinition.apiextensions.k8s.io "kubecontrollersconfigurations.crd.projectcalico.org" deleted
customresourcedefinition.apiextensions.k8s.io "networkpolicies.crd.projectcalico.org" deleted
customresourcedefinition.apiextensions.k8s.io "networksets.crd.projectcalico.org" deleted
clusterrole.rbac.authorization.k8s.io "calico-kube-controllers" deleted
clusterrolebinding.rbac.authorization.k8s.io "calico-kube-controllers" deleted
clusterrole.rbac.authorization.k8s.io "calico-node" deleted
clusterrolebinding.rbac.authorization.k8s.io "calico-node" deleted
service "calico-typha" deleted
deployment.apps "calico-typha" deleted
poddisruptionbudget.policy "calico-typha" deleted
daemonset.apps "calico-node" deleted
serviceaccount "calico-node" deleted
deployment.apps "calico-kube-controllers" deleted
serviceaccount "calico-kube-controllers" deleted
poddisruptionbudget.policy "calico-kube-controllers" deleted
[root@K8S1 work]# kubectl apply -f /data/k8s/work/calico.yaml
configmap/calico-config created
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/caliconodestatuses.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipreservations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/kubecontrollersconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrole.rbac.authorization.k8s.io/calico-node created
clusterrolebinding.rbac.authorization.k8s.io/calico-node created
service/calico-typha created
deployment.apps/calico-typha created
poddisruptionbudget.policy/calico-typha created
daemonset.apps/calico-node created
serviceaccount/calico-node created
deployment.apps/calico-kube-controllers created
serviceaccount/calico-kube-controllers created
poddisruptionbudget.policy/calico-kube-controllers created
5.检查POD
[root@K8S1 work]# kubectl get pod -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system calico-kube-controllers-554647c955-rb5fs 1/1 Running 0 33s
kube-system calico-node-7zpwg 1/1 Running 0 33s
kube-system calico-node-9qrhz 1/1 Running 0 33s
kube-system calico-node-bvjnh 1/1 Running 0 33s
kube-system calico-node-vtnsr 1/1 Running 0 33s
kube-system calico-node-vvkdz 1/1 Running 0 33s
kube-system calico-node-zkxkq 1/1 Running 0 33s
kube-system calico-typha-6454f6cfd7-shs92 1/1 Running 0 33s
--果然calico 网络部署成功。