kubeadm 安装k8s高可用集群 (3master)

kubeadm 安装k8s高可用集群 (3master)

介绍

  • 生产环境需要高可用的k8s集群 我们将 APIserver ETCD 核心组件多节点部署进行负载均衡达到高可用方案

架构

  • etcd 高可用安装
  • apiserver 负载方式nginx+keepalived

资源准备

hostname节点类型系统版本配置预配置IP地址
master1mastercentos7.82c4g192.168.21.128
master2mastercentos7.82c4g192.168.21.129
master3mastercentos7.82c4g192.168.21.130
vipApi server IPkeepalived192.168.21.127

安装

初始化节点 (所有集群内机器上均需要执行以下操作)

  1. 系统部署 IP 地址固定 (略)
  2. 配置hostname (注意修改 hostname)
    hostname master1
    cat > /etc/hostname << EOF
    master1
    EOF
    
  3. 修改hosts文件 添加主机名对于IP映射 (注意修改 IP地址、hostname, 其他机器秩序添加自己的 IP hostname映射即可)
    cat >> /etc/hosts << EOF
    192.168.21.128 master1
    192.168.21.129 master2
    192.168.21.130 master3
    EOF
    
  4. 关闭防火墙
    iptables -F
    systemctl stop firewalld && systemctl disable firewalld
    
  5. 关闭selinux
    setenforce 0
    sed -i 's/^SELINUX=.*$/SELINUX=disable/' /etc/selinux/config
    
  6. 关闭swap
    swapoff -a && sysctl -w vm.swappiness=0
    sed -i 's/.*swap.*/#&/g' /etc/fstab
    
  7. 设置Docker所需参数 开启ipv4 转发
    cat > /etc/sysctl.d/k8s.conf << EOF
    net.ipv4.ip_forward = 1
    net.bridge.bridge-nf-call-ip6tables = 1
    net.bridge.bridge-nf-call-iptables = 1
    EOF
    modprobe br_netfilter
    sysctl -p /etc/sysctl.d/k8s.conf
    
  8. 加载ip_vs模块 ()
    cat > /etc/sysconfig/modules/ipvs.modules <<EOF
    #!/bin/bash
    modprobe -- ip_vs
    modprobe -- ip_vs_rr
    modprobe -- ip_vs_wrr
    modprobe -- ip_vs_sh
    modprobe -- nf_conntrack_ipv4
    EOF
    chmod 755 /etc/sysconfig/modules/ipvs.modules && bash  /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
    
  9. 安装docker (默认最新版本)
    yum -y install yum-utils device-mapper-persistent-data lvm2 wget epel-release ipvsadm ntpdate
    yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
    
    yum install -y docker-ce 
    
    # 也可安装docker 指定版本
    # 查看所有可以安装docker 版本
    # yum list docker-ce --showduplicates|sort -r 
    # yum -y install docker-ce-20.10.5  # (安装时版本号 选择 3:后  .e之前 的三段数字)
    
    
    mkdir -p /etc/docker/
    # "exec-opts": ["native.cgroupdriver=systemd"] 为kubelete启动必须
    cat > /etc/docker/daemon.json <<EOF
    {
      "exec-opts": ["native.cgroupdriver=systemd"],
      "registry-mirrors": ["https://gco4rcsp.mirror.aliyuncs.com"]
    }
    EOF
    
    systemctl daemon-reload && systemctl enable docker && systemctl restart docker
    
    
  10. 安装kubeadm kubelet kubectl
    cat > /etc/yum.repos.d/kubernetes.repo <<EOF
    [kubernetes]
    name=Kubernetes
    baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
    enabled=1
    gpgcheck=0
    repo_gpgcheck=0
    gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg  https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
    EOF
    
    yum install -y kubelet kubeadm kubectl 
    
    # 也可安装kubelet kubeadm kubectl 其他版本 但注意需要和docker版本匹配
    # 查看 可安装 kubelet kubeadm kube版本
    # yum list kubeadm kubelet kubectl --showduplicates|sort -rhiso
    # yum -y install kubectl-1.20.13-0 kubelet-1.20.13-0 kubeadm-1.20.13-0
    
    
  11. docker kubelet 开机自启动
    systemctl enable --now docker kubelet
    

ETCD 高可用配置 (所有master节点)

  1. cfssl下载 (选一台master 执行)

    wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -O /usr/local/bin/cfssl
    wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -O    /usr/local/bin/cfssljson
    chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson
    
  2. 生成etcd CA、etcd证书 (证书目录 在 /etc/etcd/ssl 下) (注意替换 -hostname= 里面IP地址 为所有masterIP地址)

    mkdir -p /etc/etcd/ssl && cd /etc/etcd/ssl
    cat > ca-config.json <<EOF
    {"signing":{"default":{"expiry":"87600h"},"profiles":{"kubernetes":{"usages":["signing","key encipherment","server auth","client auth"],"expiry":"87600h"}}}}
    EOF
    
    cat > etcd-ca-csr.json <<EOF 
    {"CN":"etcd","key":{"algo":"rsa","size":2048},"names":[{"C":"CN","ST":"BeiJing","L":"BeiJing","O":"etcd","OU":"etcd"}]}
    EOF
    
    cfssl gencert -initca etcd-ca-csr.json | cfssljson -bare etcd-ca
    cfssl gencert -ca=etcd-ca.pem -ca-key=etcd-ca-key.pem -config=ca-config.json -hostname=127.0.0.1,192.168.21.128,192.168.21.129,192.168.21.130 -profile=kubernetes etcd-ca-csr.json | cfssljson -bare etcd
    # rm -rf *.json *.csr
    
    
  3. 将/etc/etcd/ssl 目录下所有文件拷贝到 剩余master 节点 相同目录下

    # 范例 注意替换 自己IP地址
    scp /etc/etcd/ssl/* 192.168.21.128:/etc/etcd/ssl/
    
  4. ETCD下载 (所有master节点执行)(3.3.12 版本)

    wget https://github.com/etcd-io/etcd/releases/download/v3.3.12/etcd-v3.3.12-linux-amd64.tar.gz
    tar zxvf etcd-v3.3.12-linux-amd64.tar.gz && cd etcd-v3.3.12-linux-amd64
    cp etcd* /usr/local/bin/ 
    
  5. 添加etcd服务 (所有master节点执行) (注意替换 IP地址 21.128 为本机IP)

    cat > /etc/etcd/config << EOF 
    #[Member]
    ETCD_NAME="etcd01"
    ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
    ETCD_LISTEN_PEER_URLS="https://192.168.21.128:2380"
    ETCD_LISTEN_CLIENT_URLS="https://192.168.21.128:2379"
    #[Clustering]
    ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.21.128:2380"
    ETCD_ADVERTISE_CLIENT_URLS="https://192.168.21.128:2379"
    ETCD_INITIAL_CLUSTER="etcd01=https://192.168.21.128:2380,etcd02=https://192.168.21.129:2380,etcd03=https://192.168.21.130:2380"
    ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
    ETCD_INITIAL_CLUSTER_STATE="new"
    EOF
    
    cat > /usr/lib/systemd/system/etcd.service << EOF
    [Unit]
    Description=Etcd Server
    After=neCNork.target
    After=network-online.target
    Wants=network-online.target
    [Service]
    Type=notify
    EnvironmentFile=/etc/etcd/config
    ExecStart=/usr/local/bin/etcd \
    --name=${ETCD_NAME} \
    --data-dir=${ETCD_DATA_DIR} \
    --listen-peer-urls=${ETCD_LISTEN_PEER_URLS} \
    --listen-client-urls=${ETCD_LISTEN_CLIENT_URLS},http://127.0.0.1:2379 \
    --advertise-client-urls=${ETCD_ADVERTISE_CLIENT_URLS} \
    --initial-advertise-peer-urls=${ETCD_INITIAL_ADVERTISE_PEER_URLS} \
    --initial-cluster=${ETCD_INITIAL_CLUSTER} \
    --initial-cluster-token=${ETCD_INITIAL_CLUSTER_TOKEN} \
    --initial-cluster-state=new \
    --cert-file=/etc/etcd/ssl/etcd.pem \
    --key-file=/etc/etcd/ssl/etcd-key.pem \
    --peer-cert-file=/etc/etcd/ssl/etcd.pem \
    --peer-key-file=/etc/etcd/ssl/etcd-key.pem \
    --trusted-ca-file=/etc/etcd/ssl/etcd-ca.pem \
    --peer-trusted-ca-file=/etc/etcd/ssl/etcd-ca.pem
    Restart=on-failure
    LimitNOFILE=65536
    [Install]
    WantedBy=multi-user.target
    EOF
    
    
  6. 启动而提出的 & 查看etcd 状态

    # 启动ETCD 
    systemctl enable --now etcd
    
    # 检查etcd集群状态
    etcdctl \
    --ca-file=/etc/etcd/ssl/etcd-ca.pem \
    --cert-file=/etc/etcd/ssl/etcd.pem \
    --key-file=/etc/etcd/ssl/etcd-key.pem \
    --endpoints="https://192.168.21.128:2379,\
    https://192.168.21.129:2379,\
    https://192.168.21.130:2379" cluster-health
    
    

apiserver 6443端口高可用 haproxy+keepalived实现 (所有master 节点执行)

  1. 安装haproxy keepalived

    yum -y install haproxy keepalived
    
  2. 编辑keepalived配置文件 和 检测脚本

    cat > /etc/keepalived/keepalived.conf << EOF 
    vrrp_script check_haproxy {
        script "/etc/keepalived/check_haproxy.sh"
        interval 3
    }
    vrrp_instance VI_1 {
        state MASTER
        # 修改为自己网卡
        interface ens33
        virtual_router_id 51
        priority 100
        advert_int 1
        authentication {
            auth_type PASS
            auth_pass 1111
        }
        virtual_ipaddress {
        # 修改为虚拟IP地址
            192.168.21.127
        }
         track_script {
            check_haproxy
         }
    }
    EOF
    # 检测脚本
    cat > /etc/keepalived/check_haproxy.sh << EOF
    #!/bin/bash
    systemctl status haproxy > /dev/null
    if [[ \$? != 0 ]];then
            echo "haproxy is down,close the keepalived"
            systemctl stop keepalived
    fi
    
    EOF
    
    
  3. 添加 haproxy 负载均衡 (注意修改IP地址) (启动端口8443 代理所有master节点6443端口)

    cat >> /etc/haproxy/haproxy.cfg <<EOF
    frontend  k8s-api
       bind *:8443
       mode tcp
       default_backend             apiserver
    #---------------------------------------------------------------------
    backend apiserver
        balance     roundrobin
        mode tcp
        server  k8s-m1 192.168.21.128:6443 check weight 1 maxconn 2000 check    inter 2000 rise 2 fall 3
        server  k8s-m2 192.168.21.129:6443 check weight 1 maxconn 2000 check    inter 2000 rise 2 fall 3
        server  k8s-m3 192.168.21.130:6443 check weight 1 maxconn 2000 check    inter 2000 rise 2 fall 3
    
    EOF
    
    
  4. 启动 haproxy keepalived

    systemctl enable --now haproxy keepalived
    

部署k8s集群

  1. 编辑kubeadm-config.yaml 文件 (选择master1 执行)

    cat > kubeadm-config.yaml << EOF
    apiVersion: kubeadm.k8s.io/v1beta2
    kind: ClusterConfiguration
    kubernetesVersion: v1.20.0
    # API service 虚拟地址
    controlPlaneEndpoint: "192.168.21.127:8443"
    # 跟换镜像源 为阿里
    imageRepository: "registry.aliyuncs.com/google_containers"
    etcd:
      external:
        endpoints:
        - https://192.168.21.128:2379
        - https://192.168.21.129:2379
        - https://192.168.21.130:2379
        caFile: /etc/etcd/ssl/etcd-ca.pem
        certFile: /etc/etcd/ssl/etcd.pem
        keyFile: /etc/etcd/ssl/etcd-key.pem
    networking:
      podSubnet: 10.244.0.0/16
    
    ---
    apiVersion: kubeproxy.config.k8s.io/v1alpha1
    kind: KubeProxyConfiguration
    mode: ipvs
    
    EOF
    
    
  2. 执行部署 (选择master1 执行)

    kubeadm init --config=kubeadm-config.yaml --upload-certs
    
  3. 添加config

    mkdir -p $HOME/.kube
    sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    sudo chown $(id -u):$(id -g) $HOME/.kube/config
    
  4. 根据提示添加 其他master 节点 (master2、master3 上执行)

    # You can now join any number of the control-plane node running the following command on each as root:
    
      kubeadm join 192.168.21.127:8443 --token ec75jo.co8nzmrlps8gvsav \
        --discovery-token-ca-cert-hash sha256:e770f99e6d831fc46ffb4dda2c010e7dbb082511fc42ed40c4ec4b67a6f8b3a2 \
        --control-plane --certificate-key 1e519a14851376e20c0a5ff8e6993eb459d3d0af2f2cc7847c078ba4b4a32a65
    
  5. 根据提示添加 work 节点

    Then you can join any number of worker nodes by running the following on each as root:
    
    kubeadm join 192.168.21.127:8443 --token ec75jo.co8nzmrlps8gvsav \
        --discovery-token-ca-cert-hash sha256:e770f99e6d831fc46ffb4dda2c010e7dbb082511fc42ed40c4ec4b67a6f8b3a2
    

后续操作

  1. 安装网络插件
  2. 安装ingress
  3. 安装dashboard
  • 4
    点赞
  • 11
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值