c4d计算机丢失api-ms,ingress-nginx 的service 设置一旦 externalIPs后etcd和apiserver就无法正常工作...

[root@node-01 bin]# kubectl apply -f /opt/nacos-server/ingress-nginx.yaml

service/nacos-ingress-svc created

Error from server: error when creating "/opt/nacos-server/ingress-nginx.yaml": etcdserver: request timed out

[root@node-01 bin]# kubectl apply -f /opt/nacos-server/ingress-nginx.yaml

Error from server: error when retrieving current configuration of:

Resource: "/v1, Resource=services", GroupVersionKind: "/v1, Kind=Service"

Name: "nacos-ingress-svc", Namespace: "nacos"

from server for: "/opt/nacos-server/ingress-nginx.yaml": etcdserver: request timed out

Error from server: error when retrieving current configuration of:

Resource: "extensions/v1beta1, Resource=ingresses", GroupVersionKind: "extensions/v1beta1, Kind=Ingress"

Name: "nacos-ingress", Namespace: "nacos"

from server for: "/opt/nacos-server/ingress-nginx.yaml": etcdserver: request timed out

[root@node-01 bin]# kubectl get pod -A -owide

The connection to the server apiserver.cluster.local:6443 was refused - did you specify the right host or port?

kubelet的日志:

12月 24 14:12:23 node-01 kubelet[5322]: 2020-12-24 14:12:23.021 [INFO][10993] k8s.go 362: Calico CNI using IPs: [100.67.79.133/32] ContainerID="22f6d575f706e034797920f0c84972d41b7b468e0d73f79e96067d8ff7dec07e" Namespace="nacos" Pod="nacos-2" WorkloadEndpoint="node--01-k8s-nacos--2-eth0"

12月 24 14:12:23 node-01 kubelet[5322]: 2020-12-24 14:12:23.021 [INFO][10993] network_linux.go 76: Setting the host side veth name to calic5254285b15 ContainerID="22f6d575f706e034797920f0c84972d41b7b468e0d73f79e96067d8ff7dec07e" Namespace="nacos" Pod="nacos-2" WorkloadEndpoint="node--01-k8s-nacos--2-eth0"

12月 24 14:12:23 node-01 kubelet[5322]: 2020-12-24 14:12:23.022 [INFO][10993] network_linux.go 396: Disabling IPv4 forwarding ContainerID="22f6d575f706e034797920f0c84972d41b7b468e0d73f79e96067d8ff7dec07e" Namespace="nacos" Pod="nacos-2" WorkloadEndpoint="node--01-k8s-nacos--2-eth0"

12月 24 14:12:23 node-01 kubelet[5322]: 2020-12-24 14:12:23.033 [INFO][10993] k8s.go 388: Added Mac, interface name, and active container ID to endpoint ContainerID="22f6d575f706e034797920f0c84972d41b7b468e0d73f79e96067d8ff7dec07e" Namespace="nacos" Pod="nacos-2" WorkloadEndpoint="node--01-k8s-nacos--2-eth0" endpoint=&v3.WorkloadEndpoint{TypeMeta:v1.TypeMeta{Kind:"WorkloadEndpoint", APIVersion:"projectcalico.org/v3"}, ObjectMeta:v1.ObjectMeta{Name:"node--01-k8s-nacos--2-eth0", GenerateName:"nacos-", Namespace:"nacos", SelfLink:"", UID:"52ce03db-b5b1-4b82-a4c3-a46994407527", ResourceVersion:"1672", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:63744387140, loc:(*time.Location)(0x2346b20)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"projectcalico.org/orchestrator":"k8s", "projectcalico.org/serviceaccount":"nacos-serviceaccount", "app":"nacos", "controller-revision-hash":"nacos-cccf5dfc7", "statefulset.kubernetes.io/pod-name":"nacos-2", "projectcalico.org/namespace":"nacos"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Initializers:(*v1.Initializers)(nil), Finalizers:[]string(nil), ClusterName:""}, Spec:v3.WorkloadEndpointSpec{Orchestrator:"k8s", Workload:"", Node:"node-01", ContainerID:"22f6d575f706e034797920f0c84972d41b7b468e0d73f79e96067d8ff7dec07e", Pod:"nacos-2", Endpoint:"eth0", IPNetworks:[]string{"100.67.79.133/32"}, IPNATs:[]v3.IPNAT(nil), IPv4Gateway:"", IPv6Gateway:"", Profiles:[]string{"kns.nacos", "ksa.nacos.nacos-serviceaccount"}, InterfaceName:"calic5254285b15", MAC:"1a:43:9a:94:09:f7", Ports:[]v3.EndpointPort{v3.EndpointPort{Name:"client-port", Protocol:numorstring.Protocol{Type:1, NumVal:0x0, StrVal:"TCP"}, Port:0x2290}, v3.EndpointPort{Name:"rpc-port", Protocol:numorstring.Protocol{Type:1, NumVal:0x0, StrVal:"TCP"}, Port:0x1ea8}}}}

12月 24 14:12:23 node-01 kubelet[5322]: 2020-12-24 14:12:23.045 [INFO][10993] k8s.go 420: Wrote updated endpoint to datastore ContainerID="22f6d575f706e034797920f0c84972d41b7b468e0d73f79e96067d8ff7dec07e" Namespace="nacos" Pod="nacos-2" WorkloadEndpoint="node--01-k8s-nacos--2-eth0"

12月 24 14:13:34 node-01 kubelet[5322]: E1224 14:13:34.658068 5322 controller.go:178] failed to update node lease, error: rpc error: code = Unknown desc = context deadline exceeded

12月 24 14:13:38 node-01 kubelet[5322]: I1224 14:13:38.143010 5322 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: d37437367bca99dd04eb93d09213fb879990bd4932d46f69c0fe818dc96bf28e

12月 24 14:13:38 node-01 kubelet[5322]: I1224 14:13:38.143246 5322 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: a7c4d5000c0aec532df111f27ad5224af7c3481ec597c68aa10f6fb5baae0757

12月 24 14:13:38 node-01 kubelet[5322]: E1224 14:13:38.161951 5322 pod_workers.go:191] Error syncing pod 7f46dba1a07570ae9608fc048d969101 ("kube-controller-manager-node-01_kube-system(7f46dba1a07570ae9608fc048d969101)"), skipping: failed to "StartContainer" for "kube-controller-manager" with CrashLoopBackOff: "back-off 10s restarting failed container=kube-controller-manager pod=kube-controller-manager-node-01_kube-system(7f46dba1a07570ae9608fc048d969101)"

12月 24 14:13:39 node-01 kubelet[5322]: E1224 14:13:39.773850 5322 event.go:260] Server rejected event '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"kube-apiserver-node-01.165392100aea0ba8", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"593", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"kube-system", Name:"kube-apiserver-node-01", UID:"42ad136983d0ca046f68ffbd0ace394a", APIVersion:"v1", ResourceVersion:"", FieldPath:"spec.containers{kube-apiserver}"}, Reason:"Unhealthy", Message:"Liveness probe failed: HTTP probe failed with statuscode: 500", Source:v1.EventSource{Component:"kubelet", Host:"node-01"}, FirstTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:63744386889, loc:(*time.Location)(0x7021540)}}, LastTimestamp:v1.Time{Time:time.Time{wall:0xbff12a425df8ff60, ext:359448196425, loc:(*time.Location)(0x7021540)}}, Count:2, Type:"Warning", EventTime:v1.MicroTime{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'etcdserver: request timed out' (will not retry!)

12月 24 14:13:41 node-01 kubelet[5322]: E1224 14:13:41.661837 5322 controller.go:178] failed to update node lease, error: rpc error: code = Unknown desc = context deadline exceeded

12月 24 14:13:41 node-01 kubelet[5322]: E1224 14:13:41.689846 5322 event.go:269] Unable to write event: 'Patch https://apiserver.cluster.local:6443/api/v1/namespaces/kube-system/events/etcd-node-01.1653920e31ade864: read tcp 192.168.137.101:49034->192.168.137.101:6443: use of closed network connection' (may retry after sleeping)

12月 24 14:13:47 node-01 kubelet[5322]: I1224 14:13:47.083628 5322 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: a7c4d5000c0aec532df111f27ad5224af7c3481ec597c68aa10f6fb5baae0757

12月 24 14:13:47 node-01 kubelet[5322]: E1224 14:13:47.084031 5322 pod_workers.go:191] Error syncing pod 7f46dba1a07570ae9608fc048d969101 ("kube-controller-manager-node-01_kube-system(7f46dba1a07570ae9608fc048d969101)"), skipping: failed to "StartContainer" for "kube-controller-manager" with CrashLoopBackOff: "back-off 10s restarting failed container=kube-controller-manager pod=kube-controller-manager-node-01_kube-system(7f46dba1a07570ae9608fc048d969101)"

12月 24 14:13:48 node-01 kubelet[5322]: E1224 14:13:48.739340 5322 controller.go:178] failed to update node lease, error: rpc error: code = Unknown desc = context deadline exceeded

12月 24 14:13:55 node-01 kubelet[5322]: E1224 14:13:55.767823 5322 controller.go:178] failed to update node lease, error: rpc error: code = Unknown desc = context deadline exceeded

12月 24 14:13:55 node-01 kubelet[5322]: E1224 14:13:55.769451 5322 event.go:269] Unable to write event: 'Patch https://apiserver.cluster.local:6443/api/v1/namespaces/kube-system/events/etcd-node-01.1653920e31ade864: read tcp 192.168.137.101:52584->192.168.137.101:6443: use of closed network connection' (may retry after sleeping)

12月 24 14:13:58 node-01 kubelet[5322]: I1224 14:13:58.751374 5322 topology_manager.go:219] [topologymanager] RemoveContainer - Container ID: a7c4d5000c0aec532df111f27ad5224af7c3481ec597c68aa10f6fb5baae0757

12月 24 14:13:59 node-01 kubelet[5322]: W1224 14:13:59.063828 5322 docker_container.go:224] Deleted previously existing symlink file: "/var/log/pods/kube-system_kube-controller-manager-node-01_7f46dba1a07570ae9608fc048d969101/kube-controller-manager/2.log"

12月 24 14:14:02 node-01 kubelet[5322]: E1224 14:14:02.792873 5322 controller.go:178] failed to update node lease, error: rpc error: code = Unknown desc = context deadline exceeded

12月 24 14:14:02 node-01 kubelet[5322]: I1224 14:14:02.792953 5322 controller.go:106] failed to update lease using latest lease, fallback to ensure lease, err: failed 5 attempts to update node lease

12月 24 14:14:12 node-01 kubelet[5322]: E1224 14:14:12.793190 5322 controller.go:136] failed to ensure node lease exists, will retry in 200ms, error: Get https://apiserver.cluster.local:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/node-01?timeout=10s: net/http: request canceled (Client.Timeout exceeded while awaiting headers)

12月 24 14:14:14 node-01 kubelet[5322]: E1224 14:14:14.769586 5322 desired_state_of_world_populator.go:321] Error processing volume "datadir" for pod "nacos-2_nacos(52ce03db-b5b1-4b82-a4c3-a46994407527)": error processing PVC nacos/datadir-nacos-2: failed to fetch PVC from API server: etcdserver: request timed out

12月 24 14:14:14 node-01 kubelet[5322]: E1224 14:14:14.770828 5322 event.go:260] Server rejected event '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"etcd-node-01.1653920e31ade864", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"574", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"kube-system", Name:"etcd-node-01", UID:"31cd8d75e2cd7c1618759f92306008e0", APIVersion:"v1", ResourceVersion:"", FieldPath:"spec.containers{etcd}"}, Reason:"Unhealthy", Message:"Liveness probe failed: HTTP probe failed with statuscode: 503", Source:v1.EventSource{Component:"kubelet", Host:"node-01"}, FirstTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:63744386881, loc:(*time.Location)(0x7021540)}}, LastTimestamp:v1.Time{Time:time.Time{wall:0xbff12a42e17d5a50, ext:361507202107, loc:(*time.Location)(0x7021540)}}, Count:2, Type:"Warning", EventTime:v1.MicroTime{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'etcdserver: request timed out' (will not retry!)

12月 24 14:14:14 node-01 kubelet[5322]: W1224 14:14:14.772148 5322 status_manager.go:556] Failed to get status for pod "kube-controller-manager-node-01_kube-system(7f46dba1a07570ae9608fc048d969101)": etcdserver: request timed out

12月 24 14:14:21 node-01 kubelet[5322]: E1224 14:14:21.772834 5322 controller.go:136] failed to ensure node lease exists, will retry in 400ms, error: etcdserver: request timed out

12月 24 14:14:21 node-01 kubelet[5322]: E1224 14:14:21.774619 5322 event.go:260] Server rejected event '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"kube-controller-manager-node-01.1653925c504d39b2", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", Reso

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值