kubernetes笔记 -- 搭建K8s高可用集群

一键化构建脚本参考:https://blog.csdn.net/weixin_42480750/article/details/109275738

环境准备

  1. 节点说明
IPHOSTNAME
13.13.51.51/16master01
13.13.52.52/16master02
13.13.53.53/16master03
13.13.71.71/16node01
  1. 配置/etc/hosts(all)
[root@master01 ~]# tail -4 /etc/hosts
13.13.51.51 master01
13.13.52.52 master02
13.13.53.53 master03
13.13.71.71 node01
[root@master01 ~]# 
  1. 前提环境修改(all)
[root@master01 ~]# setenforce 0
[root@master01 ~]# cp /etc/selinux/config{,.bak}
[root@master01 ~]# sed -i 's/enforcing$/disabled/' /etc/selinux/config
[root@master01 ~]# 
[root@master01 ~]# swapoff -a
[root@master01 ~]# cp /etc/fstab{,.bak}
[root@master01 ~]# sed -i '/swap/ s/\(.*\)/#\1/' /etc/fstab 
[root@master01 ~]# 
[root@master01 ~]# cat > /etc/sysctl.d/k8s.conf << EOF
> net.bridge.bridge-nf-call-ip6tables = 1
> net.bridge.bridge-nf-call-iptables = 1
> EOF
[root@master01 ~]# sysctl --system
[root@master01 ~]# 

软件安装

HOSTNAMEIPSOFT
master0113.13.13.51/16docker、k8s、haproxy、keepalived
master0213.13.13.52/16docker、k8s、haproxy、keepalived
master0313.13.13.53/16docker、k8s、haproxy、keepalived
node0113.13.13.71/16docker、k8s
  1. docker、k8s(all)
[root@master01 ~]# curl https://download.docker.com/linux/centos/docker-ce.repo -o /etc/yum.repos.d/docker-ce.repo
[root@master01 ~]# sed -i 's+download.docker.com+mirrors.tuna.tsinghua.edu.cn/docker-ce+' /etc/yum.repos.d/docker-ce.repo
[root@master01 ~]# dnf install docker-ce -y
[root@master01 ~]# systemctl enable docker.service
[root@master01 ~]# 
[root@master01 ~]# cat << EOF > /etc/yum.repos.d/kubernetes.repo
> [kubernetes]
> name=Kubernetes
> baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
> enabled=1
> gpgcheck=1
> repo_gpgcheck=1
> gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
> EOF
[root@master01 ~]# dnf install -y kubelet kubeadm kubectl --disableexcludes=kubernete
[root@master01 ~]# 
  1. haproxy、keepalived(master01-03)
[root@master01 ~]# dnf install keepalived -y                         
[root@master01 ~]# dnf install haproxy -y
[root@master01 ~]# 

配置高可用

参考文档:https://github.com/kubernetes/kubeadm/blob/master/docs/ha-considerations.md#options-for-software-load-balancing

  1. 配置keepalived
[root@master01 ~]# cp /etc/keepalived/keepalived.conf{,.bak}
[root@master01 ~]# vi /etc/keepalived/keepalived.conf
[root@master01 ~]# cat /etc/keepalived/keepalived.conf
vrrp_script check_apiserver {
  script "/etc/keepalived/check_apiserver.sh"
  interval 3
  weight -2
  fall 10
  rise 2
}

vrrp_instance VI_1 {
    state MASTER
    interface ens160
    virtual_router_id 51
    priority 101
    authentication {
        auth_type PASS
        auth_pass 42
    }
    virtual_ipaddress {
        13.13.13.50/16
    }
    track_script {
        check_apiserver
    }
}
[root@master01 ~]# vi /etc/keepalived/check_apiserver.sh 
[root@master01 ~]# chmod +x /etc/keepalived/check_apiserver.sh 
[root@master01 ~]# cat /etc/keepalived/check_apiserver.sh 
#!/bin/sh

APISERVER_VIP=13.13.13.50
APISERVER_DEST_PORT=16443

errorExit() {
    echo "*** $*" 1>&2
    exit 1
}

curl --silent --max-time 2 --insecure https://localhost:${APISERVER_DEST_PORT}/ -o /dev/null || errorExit "Error GET https://localhost:${APISERVER_DEST_PORT}/"
if ip addr | grep -q ${APISERVER_VIP}; then
    curl --silent --max-time 2 --insecure https://${APISERVER_VIP}:${APISERVER_DEST_PORT}/ -o /dev/null || errorExit "Error GET https://${APISERVER_VIP}:${APISERVER_DEST_PORT}/"
fi
[root@master01 ~]# 
[root@master02 ~]# cat /etc/keepalived/keepalived.conf
....
    state BACKUP
    priority 100
....
[root@master03 ~]# cat /etc/keepalived/keepalived.conf
....
    state BACKUP
    priority 100
....
  1. 配置haproxy
[root@master01 ~]# cp /etc/haproxy/haproxy.cfg{,.bak}
[root@master01 ~]# vi /etc/haproxy/haproxy.cfg
[root@master01 ~]# cat /etc/haproxy/haproxy.cfg
global
    log /dev/log local0
    log /dev/log local1 notice
    daemon
defaults
    mode                    http
    log                     global
    option                  httplog
    option                  dontlognull
    option http-server-close
    option forwardfor       except 127.0.0.0/8
    option                  redispatch
    retries                 1
    timeout http-request    10s
    timeout queue           20s
    timeout connect         5s
    timeout client          20s
    timeout server          20s
    timeout http-keep-alive 10s
    timeout check           10s
frontend apiserver
    bind *:16443
    mode tcp
    option tcplog
    default_backend apiserver
backend apiserver
    option httpchk GET /healthz
    http-check expect status 200
    mode tcp
    option ssl-hello-chk
    balance     roundrobin
        server master01 master01:6443 check
        server master02 master02:6443 check
        server master03 master03:6443 check
[root@master01 ~]# 

集群搭建

  1. 配置防火墙及服务(master)
[root@master01 ~]# firewall-cmd --zone=public --permanent --add-rich-rule='rule protocol value="vrrp" accept'
[root@master01 ~]# firewall-cmd --permanent --add-port=6443/tcp --add-port=16443/tcp --add-port=2379-2380/tcp --add-port=10250-10252/tcp
[root@master01 ~]# firewall-cmd --reload
[root@master01 ~]# systemctl start docker.service;systemctl enable docker.service
[root@master01 ~]# systemctl start keepalived.service;systemctl enable keepalived.service
[root@master01 ~]# systemctl start haproxy.service;systemctl enable haproxy.service
[root@master01 ~]# systemctl enable kubelet.service
  1. 配置防火墙及服务(node)
[root@master01 ~]# firewall-cmd --permanent --add-port=10251-10252/tcp --add-port=30000-32767/tcp
[root@master01 ~]# firewall-cmd --reload
[root@master01 ~]# systemctl start docker.service;systemctl enable docker.service
[root@master01 ~]# systemctl enable kubelet.service
  1. 初始化(master01)
[root@master01 ~]# kubeadm init --control-plane-endpoint "13.13.13.50:16443" --upload-certs --image-repository registry.aliyuncs.com/google_containers 

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:

  kubeadm join 13.13.13.50:16443 --token gjmj2c.a5npinfgxrtz32nj \
    --discovery-token-ca-cert-hash sha256:39a808fd1208abee57e9d9ccb7ba52554c42fd4977fa3ad0d3d517f37eff13c5 \
    --control-plane --certificate-key 9af25409b58438bb91a73fd7bf48509d538b9cd11dd1b8ca901263157adcc92c

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 13.13.13.50:16443 --token gjmj2c.a5npinfgxrtz32nj \
    --discovery-token-ca-cert-hash sha256:39a808fd1208abee57e9d9ccb7ba52554c42fd4977fa3ad0d3d517f37eff13c5 
[root@master01 ~]#   mkdir -p $HOME/.kube
[root@master01 ~]#   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master01 ~]#   sudo chown $(id -u):$(id -g) $HOME/.kube/config
[root@master01 ~]# 
  1. CNI网络插件
[root@master01 ~]# kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')"
serviceaccount/weave-net created
clusterrole.rbac.authorization.k8s.io/weave-net created
clusterrolebinding.rbac.authorization.k8s.io/weave-net created
role.rbac.authorization.k8s.io/weave-net created
rolebinding.rbac.authorization.k8s.io/weave-net created
daemonset.apps/weave-net created
[root@master01 ~]# 
  1. 添加其它节点进集群
[root@master02 ~]# kubeadm join 13.13.13.50:16443 --token gjmj2c.a5npinfgxrtz32nj \
>     --discovery-token-ca-cert-hash sha256:39a808fd1208abee57e9d9ccb7ba52554c42fd4977fa3ad0d3d517f37eff13c5 \
>     --control-plane --certificate-key 9af25409b58438bb91a73fd7bf48509d538b9cd11dd1b8ca901263157adcc92c
[root@master03 ~]# kubeadm join 13.13.13.50:16443 --token gjmj2c.a5npinfgxrtz32nj \
>     --discovery-token-ca-cert-hash sha256:39a808fd1208abee57e9d9ccb7ba52554c42fd4977fa3ad0d3d517f37eff13c5 \
>     --control-plane --certificate-key 9af25409b58438bb91a73fd7bf48509d538b9cd11dd1b8ca901263157adcc92c
[root@node01 ~]# kubeadm join 13.13.13.50:16443 --token gjmj2c.a5npinfgxrtz32nj \
>     --discovery-token-ca-cert-hash sha256:39a808fd1208abee57e9d9ccb7ba52554c42fd4977fa3ad0d3d517f37eff13c5 
  1. 查看集群节点状态
[root@master01 ~]# kubectl get nodes
NAME       STATUS   ROLES    AGE   VERSION
master01   Ready    master   18m   v1.19.3
master02   Ready    master   17m   v1.19.3
master03   Ready    master   16m   v1.19.3
node01     Ready    <none>   16m   v1.19.3
[root@master01 ~]# 
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值