1.集群规划
主机名 | 角色 | ip |
h134.host.com | kube-proxy | 192.168.146.134 |
h135.host.com | kube-proxy | 192.168.146.135 |
2.签发kube-proxy证书
对象:h136
创建签发证书的请求文件/opt/certs/kube-proxy-csr.json
{
"CN": "system:kube-proxy",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "beijing",
"L": "beijing",
"O": "od",
"OU": "ops"
}
]
}
签发
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client kube-proxy.json | cfssl-json -bare kube-proxy-client
######有如下文件,前3个是根据第4个请求文件生成
-rw-r--r-- 1 root root 1005 Jul 13 01:26 kube-proxy-client.csr
-rw------- 1 root root 1675 Jul 13 01:26 kube-proxy-client-key.pem
-rw-r--r-- 1 root root 1375 Jul 13 01:26 kube-proxy-client.pem
-rw-r--r-- 1 root root 266 Jul 13 01:21 kube-proxy-csr.json
将证书推送至h134,h135
scp kube-proxy-client{,-key}.pem root@h134:/opt/kubernetes/server/bin/cert
scp kube-proxy-client{,-key}.pem root@h135:/opt/kubernetes/server/bin/cert
3.部署
3.1 kube-proxy 配置文件
h134或h135的/opt/kubernetes/server/bin/conf下执行下面4步,生成的kube-proxy.kubeconf文件,需要在两台proxy节点的/opt/kubernetes/server/bin/conf下都存在
3.1.1 set-cluster
kubectl config set-cluster myk8s --certificate-authority=/opt/kubernetes/server/bin/cert/ca.pem --embed-certs=true --server=https://192.168.146.130:7443 --kubeconfig=kube-proxy.kubeconfig
3.1.2 set-credential
kubectl config set-credentials kube-proxy \
--client-certificate=/opt/kubernetes/server/bin/cert/kube-proxy-client.pem \
--client-key=/opt/kubernetes/server/bin/cert/kube-proxy-client-key.pem \
--embed-certs=true \
--kubeconfig=kube-proxy.kubeconfig
3.1.3 set-context
kubectl config set-context myk8s-context \
--cluster=myk8s \
--user=kube-proxy \
--kubeconfig=kube-proxy.kubeconfig
3.1.4 use-context
kubectl config use-context myk8s-context --kubeconfig=kube-proxy.kubeconfig
### 这里和上一篇文章所不同的是,省略了角色的绑定,原因是在做kube-proxy证书签发的时候
kube-proxy-csr.json中的CN字段设置的就是可以让k8s使用的角色,乱改会有问题
3.2 加载ipvs模块
对象:h134,h135
该模块是为了kube_proxy调度流量。
加载ipvs模块,/root/ipvs.sh
#!/bin/bash
ipvs_mods_dir="/usr/lib/modules/$(uname -r)/kernel/net/netfilter/ipvs"
for i in $(ls $ipvs_mods_dir|grep -o "^[^.]*")
do
/sbin/modinfo -F filename $i &>/dev/null
if [ $? -eq 0 ];then
/sbin/modprobe $i
fi
done
加载后,用命令验证ipvs模块的加载情况
lsmod | grep ip_vs
3.3 kube-proxy启动脚本
对象:h134,h135
3.3.1 脚本
vim /opt/kubernetes/server/bin/kube-proxy.sh
#! /bin/bash
./kube-proxy \
--cluster-cidr 172.7.0.0/16 \
--hostname-override h134.host.com \
--proxy-mode=ipvs \
--ipvs-scheduler=nq \
--kubeconfig ./conf/kube-proxy.kubeconfig
##PS:上面--proxy-mode参数,代表了kube-proxy调度流量时所用的方式,还可以用iptables的方式,另外如果用iptables的方式则--ipvs-scheduler只能配成rr模式,这里不做展开,资料自查即可
3.3.2 创建目录
mkdir -p /data/logs/kubernetes/kube-proxy
3.3.3 创建supervisord服务托管
对象:h134,h135
[program:kube-proxy-h134]
command=/opt/kubernetes/server/bin/kube-proxy.sh
numprocs=1
directory=/opt/kubernetes/server/bin
autostart=true
autorestart=true
startsecs=30
startretries=3
exitcodes=0,2
stopsignal=QUIT
stopwaitsecs=10
user=root
redirect_stderr=true
stdout_logfile=/data/logs/kubernetes/kube-proxy/proxy.stdout.log
stdout_logfile_maxbytes=64MB
stdout_logfile_backups=4
stdout_capture_maxbytes=1MB
stdout_events_enabled=false
更新并查看状态
supervisorctl update
supervisorctl status
3.3.4 验证proxy
ipvsadm可以不装,这里只是为了验证
yum install -y ipvsadm
ipvsadm -L -n
###有如下输出
[root@h134 conf]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 192.168.0.1:443 nq
-> 192.168.146.134:6443 Masq 1 0 0
-> 192.168.146.135:6443 Masq 1 0 0
kubectl get svc
kubectl get svc
###如下输出
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 192.168.0.1 <none> 443/TCP 12d