一,分发kube-proxy二进制文件
source /root/env.sh
for master_ip in ${MASTER_IPS[@]}
do
echo -e "\033[31m>>> ${master_ip} \033[0m"
scp /opt/kubernetes/package/kubernetes/node/bin/kube-proxy root@${master_ip}:/opt/kubernetes/bin
done
#验证是否分发成功
source /root/env.sh
for master_ip in ${MASTER_IPS[@]}
do
echo -e "\033[31m>>> ${master_ip} \033[0m"
ssh root@${master_ip} "ls -ld /opt/kubernetes/bin/kube-proxy "
done
二,配置角色和认证参数
#master01操作
kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
三,创建kubelet bootstrapping kubeconfig文件,设置集群参数
kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=https://172.27.128.200:6443 \
--kubeconfig=bootstrap.kubeconfig
四,设置客户端认证参数
kubectl config set-credentials kubelet-bootstrap \
--token=416569d477d651706738c3b6b8e2023e \
--kubeconfig=bootstrap.kubeconfig
五,设置上下文认证参数
kubectl config set-context default \
--cluster=kubernetes \
--user=kubelet-bootstrap \
--kubeconfig=bootstrap.kubeconfig
六,选择默认上下文
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
七, 分发bootstrap.kubeconfig的config文件
#执行上面的操作后,会在当前目录生成一个bootstrap.kubeconfig的config文件
source /root/env.sh
for master_ip in ${MASTER_IPS[@]}
do
echo -e "\033[31m>>> ${master_ip} \033[0m"
scp bootstrap.kubeconfig root@${master_ip}:/opt/kubernetes/cfg/
done
#验证是否分发成功
source /root/env.sh
for master_ip in ${MASTER_IPS[@]}
do
echo -e "\033[31m>>> ${master_ip} \033[0m"
ssh root@${master_ip} "ls -ld /opt/kubernetes/cfg/bootstrap.kubeconfig"
done
八,设置支持CNI
source /root/env.sh
for master_ip in ${MASTER_IPS[@]}
do
echo -e "\033[31m>>> ${master_ip} \033[0m"
ssh root@${master_ip} "mkdir -p /etc/cni/net.d"
ssh root@${master_ip} 'cat > /etc/cni/net.d/10-default.conf << EOF
{
"name": "flannel",
"type": "flannel",
"delegate": {
"bridge": "docker0",
"isDefaultGateway": true,
"mtu": 1400
}
}
EOF'
done
#验证是否分发成功
source /root/env.sh
for master_ip in ${MASTER_IPS[@]}
do
echo -e "\033[31m>>> ${master_ip} \033[0m"
ssh root@${master_ip} "cat /etc/cni/net.d/10-default.conf"
done
九,配置kube-proxy使用LVS
source /root/env.sh
for master_ip in ${MASTER_IPS[@]}
do
echo -e "\033[31m>>> ${master_ip} \033[0m"
ssh root@${master_ip} "yum install -y ipvsadm ipset conntrack"
done
十,创建 kube-proxy 证书
cd /opt/kubernetes/ssl
cat > kube-proxy-csr.json << EOF
{
"CN": "system:kube-proxy",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
- CN:指定该证书的 User 为
system:kube-proxy
; - 预定义的 RoleBinding
system:node-proxier
将Usersystem:kube-proxy
与 Rolesystem:node-proxier
绑定,该 Role 授予了调用kube-apiserver
Proxy 相关 API 的权限; - 该证书只会被 kube-proxy 当做 client 证书使用,所以 hosts 字段为空;
十一,生成证书
cd /opt/kubernetes/ssl
cfssl gencert -ca=/opt/kubernetes/ssl/ca.pem \
-ca-key=/opt/kubernetes/ssl/ca-key.pem \
-config=/opt/kubernetes/ssl/ca-config.json \
-profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
十二,分发证书
cd /opt/kubernetes/ssl
source /root/env.sh
for master_ip in ${MASTER_IPS[@]}
do
echo -e "\033[31m>>> ${master_ip} \033[0m"
scp kube-proxy-key.pem kube-proxy.pem root@${master_ip}:/opt/kubernetes/ssl/
done
#验证是否发送成功
source /root/env.sh
for master_ip in ${MASTER_IPS[@]}
do
echo -e "\033[31m>>> ${master_ip} \033[0m"
ssh root@${master_ip} "ls -ld /opt/kubernetes/ssl/{kube-proxy-key.pem,kube-proxy.pem}"
done
十三,创建kube-proxy配置文件
kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=https://172.27.128.200:6443 \
--kubeconfig=kube-proxy.kubeconfig
--embed-certs=true:将 ca.pem 和 admin.pem 证书内容嵌入到生成的 kubectl-proxy.kubeconfig 文件中(不加时,写入的是证书文件路径)
十四,创建kube-proxy用户
kubectl config set-credentials kube-proxy \
--client-certificate=/opt/kubernetes/ssl/kube-proxy.pem \
--client-key=/opt/kubernetes/ssl/kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=kube-proxy.kubeconfig
十五,设置默认上下文
kubectl config set-context default \
--cluster=kubernetes \
--user=kube-proxy \
--kubeconfig=kube-proxy.kubeconfig
十六,切换上下文为default
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
十七,分发 kubeconfig 文件
cd /opt/kubernetes/ssl
source /root/env.sh
for master_ip in ${MASTER_IPS[@]}
do
echo -e "\033[31m>>> ${master_ip} \033[0m"
scp kube-proxy.kubeconfig root@${master_ip}:/opt/kubernetes/cfg/
done
#验证是否发送成功
source /root/env.sh
for master_ip in ${MASTER_IPS[@]}
do
echo -e "\033[31m>>> ${master_ip} \033[0m"
ssh root@${master_ip} "ls -ld /opt/kubernetes/cfg/kube-proxy.kubeconfig"
done
十八,创建kube-proxy服务配置文件
cat > kube-proxy.service << EOF
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
[Service]
WorkingDirectory=/var/lib/kube-proxy
ExecStart=/opt/kubernetes/bin/kube-proxy \\
--bind-address=##NODE_IP## \\
--hostname-override=##NODE_IP## \\
--kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig \\
--masquerade-all \\
--feature-gates=SupportIPVSProxyMode=true \\
--proxy-mode=ipvs \\
--ipvs-min-sync-period=5s \\
--ipvs-sync-period=5s \\
--ipvs-scheduler=rr \\
--logtostderr=true \\
--v=2 \\
--logtostderr=false \\
--log-dir=/opt/kubernetes/log
Restart=on-failure
RestartSec=5
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
#bindAddress
: 监听地址
#hostnameOverride
: 参数值必须与 kubelet 的值一致,否则 kube-proxy 启动后会找不到该 Node,从而不会创建任何 ipvs 规则
使用变量创建各节点配置
source /root/env.sh
for (( i=0; i < 3; i++ ))
do
sed -e "s/##NODE_IP##/${NODE_IPS[i]}/" kube-proxy.service > kube-proxy-${NODE_IPS[i]}.service
done
#验证是否更改成功
ls -ld kube-proxy-*.service
十九,分发kube-proxy服务配置文件
cd /opt/kubernetes/ssl
source /root/env.sh
for node_ip in ${NODE_IPS[@]}
do
echo -e "\033[31m>>> ${node_ip} \033[0m"
scp kube-proxy-${node_ip}.service root@${node_ip}:/usr/lib/systemd/system/kube-proxy.service
done
#验证是否发送成功
source /root/env.sh
for node_ip in ${NODE_IPS[@]}
do
echo -e "\033[31m>>> ${node_ip} \033[0m"
ssh root@${node_ip} "ls -ld /usr/lib/systemd/system/kube-proxy.service"
done
二十,启动kube-proxy服务
#启动服务前必须先创建工作目录
source /root/env.sh
for node_ip in ${NODE_IPS[@]}
do
echo -e "\033[31m>>> ${node_ip} \033[0m"
ssh root@${node_ip} "mkdir /var/lib/kube-proxy"
ssh root@${node_ip} "systemctl restart kube-proxy && systemctl enable kube-proxy"
done
二十一,验证kube-proxy服务
source /root/env.sh
for node_ip in ${NODE_IPS[@]}
do
echo -e "\033[31m>>> ${node_ip} \033[0m"
ssh root@${node_ip} "systemctl status kube-proxy | grep Active"
done
#确保状态为 active (running)
,否则查看日志,确认原因
journalctl -u kube-proxy
二十二,查看监听端口
source /root/env.sh
for node_ip in ${NODE_IPS[@]}
do
echo -e "\033[31m>>> ${node_ip} \033[0m"
ssh root@${node_ip} "netstat -nltp | grep kube-proxy"
done
二十三,查看 ipvs 路由规则
source /root/env.sh
for node_ip in ${NODE_IPS[@]}
do
echo -e "\033[31m>>> ${node_ip} \033[0m"
ssh root@${node_ip} "ipvsadm -ln"
done
#输出:
>>> 172.27.128.11
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 10.1.0.1:443 rr
-> 172.27.128.11:6443 Masq 1 0 0
-> 172.27.128.12:6443 Masq 1 0 0
-> 172.27.128.13:6443 Masq 1 0 0
>>> 172.27.128.12
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 10.1.0.1:443 rr
-> 172.27.128.11:6443 Masq 1 0 0
-> 172.27.128.12:6443 Masq 1 0 0
-> 172.27.128.13:6443 Masq 1 0 0
>>> 172.27.128.13
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 10.1.0.1:443 rr
-> 172.27.128.11:6443 Masq 1 0 0
-> 172.27.128.12:6443 Masq 1 0 0
-> 172.27.128.13:6443 Masq 1 0 0
#可见所有通过 https 访问 K8S SVC kubernetes 的请求都转发到 kube-apiserver 节点的 6443 端口