一,分发kube-controller-manager二进制文件
source /root/env.sh
for master_ip in ${MASTER_IPS[@]}
do
echo -e "\033[31m>>> ${master_ip} \033[0m"
scp /opt/kubernetes/package/kubernetes/server/bin/kube-controller-manager root@${master_ip}:/opt/kubernetes/bin
done
#验证是否分发成功source /root/env.sh
for master_ip in ${MASTER_IPS[@]}
do
echo -e "\033[31m>>> ${master_ip} \033[0m"
ssh root@${master_ip} "ls -ld /opt/kubernetes/bin/kube-controller-manager"
done
二,生成kube-controller-manager的systemd文件
cat > /usr/lib/systemd/system/kube-controller-manager.service << EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
[Service]
ExecStart=/opt/kubernetes/bin/kube-controller-manager \\
--bind-address=127.0.0.1 \\
--master=http://127.0.0.1:8080 \\
--allocate-node-cidrs=true \\
--service-cluster-ip-range=10.1.0.0/16 \\
--cluster-cidr=10.2.0.0/16 \\
--cluster-name=kubernetes \\
--cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \\
--cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--root-ca-file=/opt/kubernetes/ssl/ca.pem \\
--leader-elect=true \\
--v=2 \\
--logtostderr=false \\
--log-dir=/opt/kubernetes/log
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
三,分发kube-controller-manager.service配置文件
source /root/env.sh
for master_ip in ${MASTER_IPS[@]}
do
echo -e "\033[31m>>> ${master_ip} \033[0m"
scp /usr/lib/systemd/system/kube-controller-manager.service root@${master_ip}:/usr/lib/systemd/system
done
#验证是否分发成功
source /root/env.sh
for master_ip in ${MASTER_IPS[@]}
do
echo -e "\033[31m>>> ${master_ip} \033[0m"
ssh root@${master_ip} "ls -ld /usr/lib/systemd/system/kube-controller-manager.service"
done
四,启动kube-controller-manager服务
source /root/env.sh
for master_ip in ${MASTER_IPS[@]}
do
echo -e "\033[31m>>> ${master_ip} \033[0m"
ssh root@${master_ip} "systemctl daemon-reload && systemctl restart kube-controller-manager && systemctl enable kube-controller-manager"
done
五,验证kube-controller-manager服务是否启动成功
source /root/env.sh
for master_ip in ${MASTER_IPS[@]}
do
echo -e "\033[31m>>> ${master_ip} \033[0m"
ssh root@${master_ip} "systemctl status kube-controller-manager | grep Active"
done
确保状态为 active (running)
,否则查看日志,确认原因
journalctl -u kube-controller-manager
六,验证集群状态
kubectl get cs
输出:
NAME STATUS MESSAGE ERROR
scheduler Unhealthy Get http://127.0.0.1:10251/healthz: dial tcp 127.0.0.1:10251: connect: connection refused
controller-manager Healthy ok
etcd-1 Healthy {"health":"true"}
etcd-0 Healthy {"health":"true"}
etcd-2 Healthy {"health":"true"}
#kube-controller-manager 的权限
kubectl describe clusterrole system:kube-controller-manager
#输出:
Name: system:kube-controller-manager
Labels: kubernetes.io/bootstrapping=rbac-defaults
Annotations: rbac.authorization.kubernetes.io/autoupdate: true
PolicyRule:
Resources Non-Resource URLs Resource Names Verbs
--------- ----------------- -------------- -----
secrets [ ] [ ] [create delete get update]
endpoints [ ] [ ] [create get update]
serviceaccounts [ ] [ ] [create get update]
events [ ] [ ] [create patch update]
tokenreviews.authentication.k8s.io [ ] [ ] [create]
subjectaccessreviews.authorization.k8s.io [ ] [ ] [create]
configmaps [ ] [ ] [get]
namespaces [ ] [ ] [get]
*.* [ ] [ ] [list watch]
#查看当前的 leader
kubectl get endpoints kube-controller-manager --namespace=kube-system -o yaml
#输出:
apiVersion: v1
kind: Endpoints
metadata:
annotations:
control-plane.alpha.kubernetes.io/leader: '{"holderIdentity":"k8s-master01_15cefbc8-8145-11e9-b6ae-fa163e67fe51","leaseDurationSeconds":15,"acquireTime":"2019-05-28T12:35:48Z","renewTime":"2019-05-28T12:44:06Z","leaderTransitions":1
}' creationTimestamp: "2019-05-28T12:33:32Z"
name: kube-controller-manager
namespace: kube-system
resourceVersion: "1184"
selfLink: /api/v1/namespaces/kube-system/endpoints/kube-controller-manager
uid: cdf39ffe-8144-11e9-a235-fa163e67fe51
#可见,当前的 leader 为k8s-master01节点。
#测试 kube-controller-manager 集群的高可用(可选)
#停掉一个或两个节点的 kube-controller-manager 服务,观察其它节点的日志,看是否获取了 leader 权限。