k8s二进制安装篇4-运算节点安装

一,kubelet 部署

1,签发证书
证书签发在 192.168.1.245 操作

vim kubelet-csr.json
{
    "CN": "k8s-kubelet",
    "hosts": [
    "127.0.0.1",
    "192.168.1.247",
    "192.168.1.248",
    "192.168.1.250",
    "192.168.1.251",
    "192.168.1.246"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "guangdong",
            "L": "guangzhou",
            "O": "zz",
            "OU": "ops"
        }
    ]
}
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server kubelet-csr.json | cfssljson -bare kubelet

2,下发证书:

scp kubelet.pem kubelet-key.pem 192.168.1.246:/opt/kubernetes/server/bin/certs/
scp kubelet.pem kubelet-key.pem 192.168.1.247:/opt/kubernetes/server/bin/certs/
mkdir /opt/kubernetes/server/bin/certs/(248)
scp kubelet.pem kubelet-key.pem ca.pem 192.168.1.248:/opt/kubernetes/server/bin/certs/

3,创建kubelet配置(246)
set-cluster # 创建需要连接的集群信息,可以创建多个k8s集群信息

 kubectl config set-cluster myk8s  --certificate-authority=/opt/kubernetes/server/bin/certs/ca.pem  --embed-certs=true  --server=https://192.168.1.242:6443  --kubeconfig=/opt/kubernetes/conf/kubelet.kubeconfig

set-credentials # 创建用户账号,即用户登陆使用的客户端私有和证书,可以创建多个证书

kubectl config set-credentials k8s-node --client-certificate=/opt/kubernetes/server/bin/certs/client.pem --client-key=/opt/kubernetes/server/bin/certs/client-key.pem --embed-certs=true --kubeconfig=/opt/kubernetes/conf/kubelet.kubeconfig

set-context # 设置context,即确定账号和集群对应关系

kubectl config set-context myk8s-context  --cluster=myk8s  --user=k8s-node --kubeconfig=/opt/kubernetes/conf/kubelet.kubeconfig

use-context # 设置当前使用哪个context

kubectl config use-context myk8s-context --kubeconfig=/opt/kubernetes/conf/kubelet.kubeconfig

4,把此配置文件传给另两台就不用做以上四步:

scp /opt/kubernetes/conf/kubelet.kubeconfig 192.168.1.247:/opt/kubernetes/conf/
scp /opt/kubernetes/conf/kubelet.kubeconfig 192.168.1.248:/opt/kubernetes/conf/

5,启动脚本:
授权k8s-node用户
此步骤只需要在一台master节点执行
授权 k8s-node 用户绑定集群角色 system:node ,让 k8s-node 成为具备运算节点的权限。

vim k8s-node.yaml	
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: k8s-node
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:node
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: User
  name: k8s-node



kubectl create -f k8s-node.yaml 	
kubectl get clusterrolebinding k8s-node

6,装备pause镜像
将pause镜像放入到harbor私有仓库中,仅在 246 操作:

docker image pull kubernetes/pause
docker tag kubernetes/pause:latest 192.168.1.245:8080/public/pause:latest
docker push 192.168.1.245:8080/public/pause:latest
vim /etc/init.d/kubelet
#!/bin/bash
# chkconfig: 2345 10 90 
# description: myservice ....
APP_NAME=/opt/kubernetes/server/bin/kubelet
NAME=kubelet
now=`date "+%Y%m%d%H%M%S"`

usage() {
    echo "Usage: sh demo.sh [start|stop|restart|status]"
    exit 1
}

is_exist() { 
    pid=`ps -ef | grep $APP_NAME | grep -v grep | awk '{print $2}' `
    if [ -z "${pid}" ]; then
      return 1
    else
      return 0
    fi
}

start() {
   is_exist
   if [ $? -eq "0" ]; then
     echo "${NAME} is already running. pid=${pid} ."
   else
     cd / && nohup  /opt/kubernetes/server/bin/kubelet  --anonymous-auth=false --cgroup-driver systemd --cluster-dns 192.168.0.2 --cluster-domain cluster.local --runtime-cgroups=/systemd/system.slice --kubelet-cgroups=/systemd/system.slice --fail-swap-on="false" --client-ca-file /opt/kubernetes/server/bin/certs/ca.pem --tls-cert-file /opt/kubernetes/server/bin/certs/kubelet.pem --tls-private-key-file /opt/kubernetes/server/bin/certs/kubelet-key.pem --hostname-override 192.168.1.248 --image-gc-high-threshold 20 --image-gc-low-threshold 10 --kubeconfig   /opt/kubernetes/conf/kubelet.kubeconfig --log-dir /data/logs/kubernetes/kube-kubelet --pod-infra-container-image 192.168.1.245:8080/public/pause:latest --root-dir /data/kubelet  &
   fi
}

stop() {
   is_exist
   if [ $? -eq "0" ]; then
     kill -9 $pid
   else
     echo "${NAME} is not running"
   fi
}

status() {
   is_exist
   if [ $? -eq "0" ]; then
     echo "${NAME} is running. Pid is ${pid}"
   else
     echo "${NAME} is not running."
   fi
}

restart() {
   stop
   start
}

case "$1" in
   "start")
     start
     ;;
   "stop")
     stop
     ;;
   "status")
     status
     ;;
   "restart")
     restart
     ;;
   *)
     usage
     ;;
esac

chmod  +x   /etc/init.d/kubelet
mkdir -p /data/logs/kubernetes/kube-kubelet /data/kubelet 
service kubelet start
service kubelet status
kubectl get nodes
#开机自启
chkconfig --add kubelet
chkconfig kubelet  on

#验证
]# kubectl get nodes
NAME            STATUS   ROLES    AGE     VERSION
192.168.1.246   Ready    <none>   11m     v1.19.14
192.168.1.247   Ready    <none>   9m19s   v1.19.14
192.168.1.248   Ready    <none>   63s     v1.19.14

二,kube-proxy部署

1,签发证书
证书签发在 192.168.1.245操作

cd /opt/certs/
vim kube-proxy-csr.json  # CN 其实是k8s中的角色
{
    "CN": "system:kube-proxy",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "guangdong",
            "L": "guangzhou",
            "O": "zz",
            "OU": "ops"
        }
    ]
}

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client kube-proxy-csr.json |cfssljson -bare kube-proxy-client

2,下发证书
scp kube-proxy-client-key.pem kube-proxy-client.pem 192.168.1.246:/opt/kubernetes/server/bin/certs/

scp kube-proxy-client-key.pem kube-proxy-client.pem 192.168.1.247:/opt/kubernetes/server/bin/certs/

scp kube-proxy-client-key.pem kube-proxy-client.pem 192.168.1.248:/opt/kubernetes/server/bin/certs/

3,在所有node节点创建,涉及服务器:192.168.1.246,192.168.1.247,192.168.1.248
①创建kube-proxy配置(192.168.1.246)
set-cluster # 创建需要连接的集群信息,可以创建多个k8s集群信息

kubectl config set-cluster myk8s --certificate-authority=/opt/kubernetes/server/bin/certs/ca.pem --embed-certs=true --server=https://192.168.1.242:6443 --kubeconfig=/opt/kubernetes/conf/kube-proxy.kubeconfig

set-credentials # 创建用户账号,即用户登陆使用的客户端私有和证书,可以创建多个证书

kubectl config set-credentials kube-proxy --client-certificate=/opt/kubernetes/server/bin/certs/kube-proxy-client.pem --client-key=/opt/kubernetes/server/bin/certs/kube-proxy-client-key.pem --embed-certs=true --kubeconfig=/opt/kubernetes/conf/kube-proxy.kubeconfig

set-context # 设置context,即确定账号和集群对应关系

kubectl config set-context myk8s-context --cluster=myk8s --user=kube-proxy --kubeconfig=/opt/kubernetes/conf/kube-proxy.kubeconfig

use-context # 设置当前使用哪个context

kubectl config use-context myk8s-context --kubeconfig=/opt/kubernetes/conf/kube-proxy.kubeconfig

把生成配置文件传到另一台机器 那边就可以不用做以上四步

scp /opt/kubernetes/conf/kube-proxy.kubeconfig  192.168.1.248:/opt/kubernetes/conf/
scp /opt/kubernetes/conf/kube-proxy.kubeconfig  192.168.1.247:/opt/kubernetes/conf/

②加载ipvs模块
kube-proxy 共有3种流量调度模式,分别是 namespace,iptables,ipvs,其中ipvs性能最好。

for i in $(ls /usr/lib/modules/$(uname -r)/kernel/net/netfilter/ipvs|grep -o "^[^.]*");do echo $i; /sbin/modinfo -F filename $i >/dev/null 2>&1 && /sbin/modprobe $i;done

lsmod | grep ip_vs  # 查看ipvs模块

创建日志目录
mkdir -p /data/logs/kubernetes/kube-proxy

创建启动脚本
vim /lib/systemd/system/kube-proxy.service
[Unit]
Description=kube-proxy
Documentation=https://github.com/coreos

[Service]
Type=simple
ExecStart=/opt/kubernetes/server/bin/kube-proxy \
  --cluster-cidr 172.7.0.0/16 \
  --hostname-override 192.168.1.246 \
  --proxy-mode=ipvs \
  --ipvs-scheduler=nq \
  --kubeconfig /opt/kubernetes/conf/kube-proxy.kubeconfig


Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target


systemctl enable kube-proxy.service
systemctl start kube-proxy.service
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值