[toc]
一、安装要求
在开始之前,部署Kubernetes集群机器需要满足以下几个条件:
- 一台或多台机器,操作系统 欧拉系统
- 硬件配置:2GB或更多RAM,2个CPU或更多CPU,硬盘30GB或更多
- 集群中所有机器之间网络互通
- 可以访问外网,需要拉取镜像
- 禁止swap分区
二、准备环境
角色 | IP |
---|---|
k8s-master | 192.168.4.114 |
k8s-node1 | 192.168.4.115 |
k8s-node2 | 192.168.4.116 |
k8s-node3 | 192.168.4.118 |
#关闭防火墙:
systemctl stop firewalld
systemctl disable firewalld
#关闭selinux:
sed -i 's/enforcing/disabled/' /etc/selinux/config # 永久
setenforce 0 # 临时
#关闭swap:
# 临时关闭
swapoff -a
# 永久
sed -ri 's/.*swap.*/#&/' /etc/fstab
#设置主机名:
hostnamectl set-hostname <hostname>
#在master添加hosts:
cat >> /etc/hosts << EOF
192.168.4.114 k8s-master
192.168.4.115 k8s-node1
192.168.4.116 k8s-node2
192.168.4.118 k8s-node3
EOF
#将桥接的IPv4流量传递到iptables的链:
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
EOF
sysctl --system
#时间同步:
yum install ntpdate -y
ntpdate time.windows.com
#修改时区
ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
#修改语言
sudo echo 'LANG="en_US.UTF-8"' >> /etc/profile;source /etc/profile
三、安装Docker/kubeadm/kubelet【所有节点】
Kubernetes默认CRI(容器运行时)为Docker,因此先安装Docker。
3.1 安装Docker
#下载镜像源
wget -O /etc/yum.repos.d/docker-ce.repo https://repo.huaweicloud.com/docker-ce/linux/centos/docker-ce.repo
#更改镜像源
$ vim /etc/yum.repos.d/docker-ce.repo
[docker-ce-stable]
name=Docker CE Stable - $basearch
baseurl=https://repo.huaweicloud.com/docker-ce/linux/centos/7/$basearch/stable
enabled=1
gpgcheck=1
gpgkey=https://repo.huaweicloud.com/docker-ce/linux/centos/gpg
[docker-ce-stable-debuginfo]
name=Docker CE Stable - Debuginfo $basearch
baseurl=https://repo.huaweicloud.com/docker-ce/linux/centos/7/debug-$basearch/stable
enabled=0
gpgcheck=1
gpgkey=https://repo.huaweicloud.com/docker-ce/linux/centos/gpg
[docker-ce-stable-source]
name=Docker CE Stable - Sources
baseurl=https://repo.huaweicloud.com/docker-ce/linux/centos/7/source/stable
enabled=0
gpgcheck=1
gpgkey=https://repo.huaweicloud.com/docker-ce/linux/centos/gpg
[docker-ce-test]
name=Docker CE Test - $basearch
baseurl=https://repo.huaweicloud.com/docker-ce/linux/centos/7/$basearch/test
enabled=0
gpgcheck=1
gpgkey=https://repo.huaweicloud.com/docker-ce/linux/centos/gpg
[docker-ce-test-debuginfo]
name=Docker CE Test - Debuginfo $basearch
baseurl=https://repo.huaweicloud.com/docker-ce/linux/centos/7/debug-$basearch/test
enabled=0
gpgcheck=1
gpgkey=https://repo.huaweicloud.com/docker-ce/linux/centos/gpg
[docker-ce-test-source]
name=Docker CE Test - Sources
baseurl=https://repo.huaweicloud.com/docker-ce/linux/centos/7/source/test
enabled=0
gpgcheck=1
gpgkey=https://repo.huaweicloud.com/docker-ce/linux/centos/gpg
[docker-ce-nightly]
name=Docker CE Nightly - $basearch
baseurl=https://repo.huaweicloud.com/docker-ce/linux/centos/7/$basearch/nightly
enabled=0
gpgcheck=1
gpgkey=https://repo.huaweicloud.com/docker-ce/linux/centos/gpg
[docker-ce-nightly-debuginfo]
name=Docker CE Nightly - Debuginfo $basearch
baseurl=https://repo.huaweicloud.com/docker-ce/linux/centos/7/debug-$basearch/nightly
enabled=0
gpgcheck=1
gpgkey=https://repo.huaweicloud.com/docker-ce/linux/centos/gpg
[docker-ce-nightly-source]
name=Docker CE Nightly - Sources
baseurl=https://repo.huaweicloud.com/docker-ce/linux/centos/7/source/nightly
enabled=0
gpgcheck=1
gpgkey=https://repo.huaweicloud.com/docker-ce/linux/centos/gpg
#安装最新版docker
#yum -y install docker-ce
yum -y install docker-ce-19.03.3-3.el7
#启动docker
systemctl daemon-reload
systemctl start docker
systemctl enable docker
docker ps
#确认镜像目录是否改变
docker info |grep "Docker Root Dir"
#配置harbor地址
#vim /lib/systemd/system/docker.service
#改为
ExecStart=/usr/bin/dockerd -H fd:// --insecure-registry https://docker.gayj.cn:444 --containerd=/run/containerd/containerd.sock
#配置镜像下载加速器和数据路径:
mkdir -p /data/docker
cat >/etc/docker/daemon.json <<EOF
{
"data-root": "/data/docker" ,
"registry-mirrors": ["https://almtd3fa.mirror.aliyuncs.com"],
"exec-opts":["native.cgroupdriver=systemd"]
}
EOF
#删除之前的数据目录
rm -rvf /var/lib/dockerdoc
systemctl daemon-reload
systemctl restart docker
docker ps
#确认镜像目录是否改变
docker info |grep "Docker Root Dir"
3.2 添加阿里云YUM软件源
cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
3.3 安装kubeadm,kubelet和kubectl
由于版本更新频繁,这里指定版本号部署:
yum install -y kubelet-1.20.7 kubeadm-1.20.7 kubectl-1.20.7
systemctl enable kubelet.service
3.4 更换数据目录
【数据目录修改】http://t.zoukankan.com/deny-p-14169356.html
3.5 重装k8s
kubeadm reset -f
rm -rf /var/lib/etcd /var/lib/kubelet /var/lib/dockershim /var/run/kubernetes /var/lib/cni /etc/cni/net.d
四、部署Kubernetes Master
https://kubernetes.io/zh/docs/reference/setup-tools/kubeadm/kubeadm-init/#config-file
https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/#initializing-your-control-plane-node
在192.168.4.114(Master)执行。
kubeadm init \
--apiserver-advertise-address=192.168.4.114 \
--image-repository registry.aliyuncs.com/google_containers \
--kubernetes-version v1.20.7 \
--service-cidr=10.96.0.0/12 \
--pod-network-cidr=10.244.0.0/16
# --ignore-preflight-errors=all 忽略报错注释掉。
- –apiserver-advertise-address 集群通告地址
- –image-repository 由于默认拉取镜像地址k8s.gcr.io国内无法访问,这里指定阿里云镜像仓库地址
- –kubernetes-version K8s版本,与上面安装的一致
- –service-cidr 集群内部虚拟网络,Pod统一访问入口
- –pod-network-cidr Pod网络,,与下面部署的CNI网络组件yaml中保持一致
或者使用配置文件引导:
$ vi kubeadm.conf
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
kubernetesVersion: v1.20.0
imageRepository: registry.aliyuncs.com/google_containers
networking:
podSubnet: 10.244.0.0/16
serviceSubnet: 10.96.0.0/12
$ kubeadm init --config kubeadm.conf --ignore-preflight-errors=all
拷贝kubectl使用的连接k8s认证文件到默认路径:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
$ kubectl get nodes
NAME STATUS ROLES AGE VERSION
localhost.localdomain NotReady control-plane,master 20s v1.20.0
五、加入Kubernetes Node
在192.168.4.114/115/118(Node)执行。
向集群添加新节点,执行在kubeadm init输出的kubeadm join命令:
kubeadm join 192.168.4.114:6443 --token 7gqt13.kncw9hg5085iwclx \
--discovery-token-ca-cert-hash sha256:66fbfcf18649a5841474c2dc4b9ff90c02fc05de0798ed690e1754437be35a01
默认token有效期为24小时,当过期之后,该token就不可用了。这时就需要重新创建token,可以直接使用命令快捷生成:
kubeadm token create --print-join-command
https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-join/
六、部署容器网络(CNI)
https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/#pod-network
注意:只需要部署下面其中一个,推荐Calico。
Calico是一个纯三层的数据中心网络方案,Calico支持广泛的平台,包括Kubernetes、OpenStack等。
Calico 在每一个计算节点利用 Linux Kernel 实现了一个高效的虚拟路由器( vRouter) 来负责数据转发,而每个 vRouter 通过 BGP 协议负责把自己上运行的 workload 的路由信息向整个 Calico 网络内传播。
此外,Calico 项目还实现了 Kubernetes 网络策略,提供ACL功能。
https://docs.projectcalico.org/getting-started/kubernetes/quickstart
wget -c https://docs.projectcalico.org/v3.20/manifests/calico.yaml --no-check-certificate
下载完后还需要修改里面定义Pod网络(CALICO_IPV4POOL_CIDR),与前面kubeadm init指定的一样
修改完后应用清单:
#修改为:
- name: CALICO_IPV4POOL_CIDR
value: "10.244.0.0/16"
key: calico_backend
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
value: "k8s,bgp"
# 下方新增
- name: IP_AUTODETECTION_METHOD
value: "interface=ens192"
# ens192为本地网卡名字
#生效
kubectl apply -f calico.yaml
kubectl get pods -n kube-system
七、测试kubernetes集群
1)master去掉污点
#kubectl taint nodes <your-master-node-name> node-role.kubernetes.io/master:NoSchedule-
kubectl taint nodes k8s-master node-role.kubernetes.io/master:NoSchedule-
2)验证k8s集群
- 验证Pod工作
- 验证Pod网络通信
- 验证DNS解析
在Kubernetes集群中创建一个pod,验证是否正常运行:
kubectl create deployment nginx --image=nginx
kubectl expose deployment nginx --port=80 --type=NodePort
kubectl get pod,svc
访问地址:http://NodeIP:Port
八、部署 Dashboard
wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.3/aio/deploy/recommended.yaml
默认Dashboard只能集群内部访问,修改Service为NodePort类型,暴露到外部:
$ vi recommended.yaml
...
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
ports:
- port: 443
targetPort: 8443
nodePort: 30000
selector:
k8s-app: kubernetes-dashboard
type: NodePort
...
$ kubectl apply -f recommended.yaml
$ kubectl get pods -n kubernetes-dashboard
NAME READY STATUS RESTARTS AGE
dashboard-metrics-scraper-6b4884c9d5-gl8nr 1/1 Running 0 13m
kubernetes-dashboard-7f99b75bf4-89cds 1/1 Running 0 13m
访问地址:https://NodeIP:30000
创建service account并绑定默认cluster-admin管理员集群角色:
# 创建用户
kubectl create serviceaccount dashboard-admin -n kube-system
# 用户授权
kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin
#解决WEB页面报错
kubectl create clusterrolebinding system:anonymous --clusterrole=cluster-admin --user=system:anonymous
#访问:https://node节点:30000/
https://192.168.4.115:30000/
# 获取用户Token
kubectl describe secrets -n kube-system $(kubectl -n kube-system get secret | awk '/dashboard-admin/{print $1}')
使用输出的token登录Dashboard。
九、解决谷歌不能登录dashboard
#查看
kubectl get secrets -n kubernetes-dashboard
#删除默认的secret,用自签证书创建新的secret
kubectl delete secret kubernetes-dashboard-certs -n kubernetes-dashboard
#创建ca
openssl genrsa -out ca.key 2048
openssl req -new -x509 -key ca.key -out ca.crt -days 3650 -subj "/C=CN/ST=HB/L=WH/O=DM/OU=YPT/CN=CA"
openssl x509 -in ca.crt -noout -text
#签发Dashboard证书
openssl genrsa -out dashboard.key 2048
openssl req -new -key dashboard.key -out dashboard.csr -subj "/O=white/CN=dasnboard"
openssl x509 -req -in dashboard.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out dashboard.crt -days 3650
#生成新的secret
kubectl create secret generic kubernetes-dashboard-certs --from-file=dashboard.crt=/opt/dashboard/dashboard.crt --from-file=dashboard.key=/opt/dashboard/dashboard.key -n kubernetes-dashboard
# 删除默认的secret,用自签证书创建新的secret
#kubectl create secret generic #kubernetes-dashboard-certs \
#--from-file=/etc/kubernetes/pki/apiserver.key \
#--from-file=/etc/kubernetes/pki/apiserver.crt \
#-n kubernetes-dashboard
# vim recommended.yaml 加入证书路径
args:
- --auto-generate-certificates
- --tls-key-file=dashboard.key
- --tls-cert-file=dashboard.crt
# - --tls-key-file=apiserver.key
# - --tls-cert-file=apiserver.crt
#重新生效
kubectl apply -f recommended.yaml
#删除pods重新生效
kubectl get pod -n kubernetes-dashboard | grep -v NAME | awk '{print "kubectl delete po " $1 " -n kubernetes-dashboard"}' | sh
访问:https://192.168.4.116:30000/#/login
# 获取用户Token
kubectl describe secrets -n kube-system $(kubectl -n kube-system get secret | awk '/dashboard-admin/{print $1}')
十、安装metrics
#下载
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/metrics-server:v0.5.0
#改名
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/metrics-server:v0.5.0 k8s.gcr.io/metrics-server/metrics-server:v0.5.0
#删除已经改名镜像
docker rmi -f registry.cn-hangzhou.aliyuncs.com/google_containers/metrics-server:v0.5.0
#创建目录
mkdir -p /opt/k8s/metrics-server
cd /opt/k8s/metrics-server
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: metrics-server
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-view: "true"
name: system:aggregated-metrics-reader
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: metrics-server
name: system:metrics-server
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- nodes/stats
- namespaces
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: metrics-server
name: metrics-server-auth-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-app: metrics-server
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-app: metrics-server
name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
ports:
- name: https
port: 443
protocol: TCP
targetPort: https
selector:
k8s-app: metrics-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: metrics-server
strategy:
rollingUpdate:
maxUnavailable: 0
template:
metadata:
labels:
k8s-app: metrics-server
spec:
containers:
- args:
- --cert-dir=/tmp
- --secure-port=443
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --kubelet-use-node-status-port
- --metric-resolution=15s
- --kubelet-insecure-tls
image: k8s.gcr.io/metrics-server/metrics-server:v0.5.0
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /livez
port: https
scheme: HTTPS
periodSeconds: 10
name: metrics-server
ports:
- containerPort: 443
name: https
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /readyz
port: https
scheme: HTTPS
initialDelaySeconds: 20
periodSeconds: 10
resources:
requests:
cpu: 100m
memory: 200Mi
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
volumeMounts:
- mountPath: /tmp
name: tmp-dir
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: metrics-server
volumes:
- emptyDir: {}
name: tmp-dir
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
labels:
k8s-app: metrics-server
name: v1beta1.metrics.k8s.io
spec:
group: metrics.k8s.io
groupPriorityMinimum: 100
insecureSkipTLSVerify: true
service:
name: metrics-server
namespace: kube-system
version: v1beta1
versionPriority: 100
十一、切换容器引擎为Containerd
https://kubernetes.io/zh/docs/setup/production-environment/container-runtimes/#containerd
1、配置先决条件
cat <<EOF | sudo tee /etc/modules-load.d/containerd.conf
overlay
br_netfilter
EOF
sudo modprobe overlay
sudo modprobe br_netfilter
#查看模块
lsmod |grep overlay
lsmod |grep br_netfilter
# 设置必需的 sysctl 参数,这些参数在重新启动后仍然存在。
cat <<EOF | sudo tee /etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF
# 生效
sudo sysctl --system
2、安装containerd
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager \
--add-repo \
https://download.docker.com/linux/centos/docker-ce.repo
yum update -y && sudo yum install -y containerd.io
mkdir -p /etc/containerd
containerd config default | sudo tee /etc/containerd/config.toml
systemctl restart containerd
3、修改配置文件
$ vi /etc/containerd/config.toml
[plugins."io.containerd.grpc.v1.cri"]
sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.2"
...
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true
...
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
endpoint = ["https://b9pmyelo.mirror.aliyuncs.com"]
systemctl restart containerd
systemctl enable containerd
systemctl stop docker
4、配置kubelet使用containerd
vi /etc/sysconfig/kubelet
KUBELET_EXTRA_ARGS=--container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --cgroup-driver=systemd
systemctl restart kubelet
5、验证
[root@k8s-master ~]# kubectl get node -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
k8s-master Ready control-plane,master 24h v1.20.0 192.168.4.114 <none> CentOS Linux 7 (Core) 3.10.0-1127.19.1.el7.x86_64 docker://20.10.8
k8s-node1 Ready <none> 24h v1.20.0 192.168.4.115 <none> CentOS Linux 7 (Core) 3.10.0-1127.19.1.el7.x86_64 docker://20.10.8
k8s-node2 Ready <none> 24h v1.20.0 192.168.4.116 <none> CentOS Linux 7 (Core) 3.10.0-1127.19.1.el7.x86_64 docker://20.10.8
k8s-node3 Ready <none> 24h v1.22.1 192.168.4.118 <none> CentOS Linux 7 (Core) 3.10.0-1127.19.1.el7.x86_64 containerd://1.4.9
十一、crictl管理容器
crictl 是 CRI 兼容的容器运行时命令行接口。 你可以使用它来检查和调试 Kubernetes 节点上的容器运行时和应用程序。 crictl 和它的源代码在 cri-tools 代码库。
【视频】https://asciinema.org/a/179047
【使用网址】https://kubernetes.io/zh/docs/tasks/debug-application-cluster/crictl/
十二、常用命令
1、常用命令
#查看master组件状态:
kubectl get cs
#查看node状态:
kubectl get node
#查看Apiserver代理的URL:
kubectl cluster-info
#查看集群详细信息:
kubectl cluster-info dump
#查看资源信息:
kubectl describe <资源> <名称>
#查看K8S信息
kubectl api-resources
2、命令操作+
[root@k8s-master ~]# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME STATUS MESSAGE ERROR
scheduler Unhealthy Get "http://127.0.0.1:10251/healthz": dial tcp 127.0.0.1:10251: connect: connection refused
controller-manager Unhealthy Get "http://127.0.0.1:10252/healthz": dial tcp 127.0.0.1:10252: connect: connection refused
etcd-0 Healthy {"health":"true"}
#修改yaml,注释掉port
$ vim /etc/kubernetes/manifests/kube-scheduler.yaml
$ vim /etc/kubernetes/manifests/kube-controller-manager.yaml
# - --port=0
#重启
systemctl restart kubelet
#查看master组件
[root@k8s-master ~]# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME STATUS MESSAGE ERROR
controller-manager Healthy ok
scheduler Healthy ok
etcd-0 Healthy {"health":"true"}