kubeadm高可用集群一键部署脚本!!!

一、脚本介绍

        高可用集群,采用三主三从,扩容的时候可以自行的加入集群。使用官方推荐的kubeadm部署方式,可以用于生产环境。

        脚本内标注了所有命令的含义,也可以根据脚本自行部署,体验部署的过程

        最低配置要求,master需要大约2核2G,node可1核1G

二、执行前提

        脚本内采用的是ssh执行命令,需要在master1节点上生成公私钥,并且免密登录到所有节点(包含master1)

#一路回车,生成公私钥
ssh-keygen
#将公私钥传到所有服务器,回车后输出yes、输入密码即可
ssh-copy-id root@192.168.0.***

三、脚本正文

#!/bin/bash
#服务器最低需要2核2GB
#前提准备,将master01上面生成公私钥,免密登录到其他节点
#一路回车,生成公私钥
#ssh-keygen
#将公私钥传到所有服务器,回车后输出yes、输入密码即可
#ssh-copy-id root@192.168.0.***

set -x

#把下面的ip修改为自己的
vip=192.168.0.188
master01=192.168.0.181
master02=192.168.0.182
master03=192.168.0.183
node01=192.168.0.191
node02=192.168.0.192
node03=192.168.0.193
podip="10.10.0.0/16"
serviceip="10.20.0.0/16"

all_node="
  $master01
  $master02
  $master03
  $node01
  $node02
  $node03"
master_node="
  $master01
  $master02
  $master03"
nodes="
  $node01
  $node02
  $node03"
kubelet_version=1.20.15
kubeadm_version=1.20.15
kubectl_version=1.20.15

#这是一个常用的函数,用于主机执行命令,$1指定主机变量名,$2命令
function all_cmd() {
  node="${!1}"
  for i in $node
  do
    ssh $i $2 &
  done
  echo -e "\033[32m 等待多进程执行命令 \033[0m"
  wait
  echo -e "\033[32m 执行命令OK \033[0m"
}
#这是一个常用的函数,用于把文件传输,$1指定主机变量名,$2文件名,除宿主机外
function scp_file() {
  node="${!1}"
  for i in $node
  do
    if [ $i != $master01 ];then
      scp $2 $i:$2 &
    fi
  done
  echo -e "\033[32m 等待多进程文件传输 \033[0m"
  wait
  echo -e "\033[32m 文件传输OK \033[0m"
}


#一、初始化:1、主机名修改(all_node)
echo -e "\033[31m #一、初始化:1、主机名修改(all_node) \033[0m"
ssh $master01 "hostnamectl set-hostname master01"
ssh $master02 "hostnamectl set-hostname master02"
ssh $master03 "hostnamectl set-hostname master03"
ssh $node01 "hostnamectl set-hostname node01"
ssh $node02 "hostnamectl set-hostname node02"
ssh $node03 "hostnamectl set-hostname node03"

#一、初始化:2、配置hosts解析(all_node)
echo -e "\033[31m #初始化:2、配置hosts解析(all_node) \033[0m"
all_cmd all_node "echo $master01 master01 >> /etc/hosts"
all_cmd all_node "echo $master02 master02 >> /etc/hosts"
all_cmd all_node "echo $master03 master03 >> /etc/hosts"
all_cmd all_node "echo $node01 node01 >> /etc/hosts"
all_cmd all_node "echo $node02 node02 >> /etc/hosts"
all_cmd all_node "echo $node03 node03 >> /etc/hosts"

#一、初始化:3、安装必要工具(all_node)
echo -e "\033[31m #一、初始化:3、安装必要工具(all_node) \033[0m"
all_cmd all_node "yum -y install vim tree lrzsz wget psmisc net-tools telnet yum-utils device-mapper-persistent-data lvm2 git rsync"

#一、初始化:4、禁用firewalld、selinux、swap(all_node)
echo -e "\033[31m #初始化:4、禁用firewalld、selinux、swap \033[0m"
all_cmd all_node "systemctl stop firewalld && systemctl disable firewalld"
all_cmd all_node "setenforce 0 && sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config "
all_cmd all_node "sed -ri 's/.*swap.*/#&/' /etc/fstab && swapoff -a"

#一、初始化:5、内核相关(all_node)
echo -e "\033[31m #一、初始化:5、内核相关(all_node) \033[0m"
cat > /etc/sysctl.d/k8s.conf <<EOF
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720

net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
EOF
#将文件传输到所有节点并且执行生效
scp_file all_node /etc/sysctl.d/k8s.conf
all_cmd all_node "sysctl --system"
#所有节点实现Linux的资源限制
cat >> /etc/security/limits.conf <<EOF
* soft nofile 65536
* hard nofile 131072
* soft nproc 65535
* hard nproc 655350
* soft memlock unlimited
* hard memlock unlimited
EOF
scp_file all_node /etc/security/limits.conf


#二、安装docker(all_node)
echo -e "\033[31m #二、安装docker(all_node) \033[0m"
all_cmd all_node "yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo"
all_cmd all_node "rpm -import https://mirrors.aliyun.com/docker-ce/linux/centos/gpg"
all_cmd all_node "yum install -y docker-ce docker-ce-cli containerd.io"
#创建配置文件,registry-mirrors为镜像加速,exec-opts修改cgroupdriver为systemd,log-driver为日志存储格式
cat > /etc/docker/daemon.json <<EOF
{
  "registry-mirrors": ["https://k2anw3oh.mirror.aliyuncs.com"],
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "500m", "max-file": "3"
  }
}
EOF
#把配置文件传到其他的主机
scp_file all_node /etc/docker/daemon.json
#启动docker并设置自动启动
all_cmd all_node "systemctl daemon-reload && systemctl restart docker && systemctl enable docker"


#三、安装kubeadm,kubelet和kubectl(all_node)
echo -e "\033[31m #三、安装kubeadm,kubelet和kubectl \033[0m"
#创建yum文件
cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
#把yum文件传输到其他服务器
scp_file all_node /etc/yum.repos.d/kubernetes.repo
#安装yum仓库的验证,安装kubeadm,kubelet和kubectl
all_cmd all_node "rpm -import https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg"
all_cmd all_node "rpm -import https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg"
all_cmd all_node "yum install -y kubelet-$kubelet_version kubeadm-$kubeadm_version kubectl-$kubectl_version"
#kubelet的配置文件,并且传输到其他服务器,并且启动kubelet
cat > /etc/sysconfig/kubelet <<EOF
KUBELET_EXTRA_ARGS="--cgroup-driver=systemd --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.2"
EOF
scp_file all_node /etc/sysconfig/kubelet
all_cmd all_node "systemctl enable --now kubelet"


#四、高可用组件安装、配置(master):1、安装haproxy
echo -e "\033[31m #四、高可用组件安装、配置(master):1、安装haproxy \033[0m"
all_cmd master_node "yum -y install haproxy"
cat > /etc/haproxy/haproxy.cfg << EOF
global
    log         127.0.0.1 local0 info
    log         127.0.0.1 local1 warning
    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     4000
    user        haproxy
    group       haproxy
    daemon
    stats socket /var/lib/haproxy/stats

defaults
    mode                    tcp
    log                     global
    option                  tcplog
    option                  dontlognull
    option                  redispatch
    retries                 3
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout check           10s
    maxconn                 3000

frontend monitor-in
    bind *:33305
    mode http
    option httplog
    monitor-uri /monitor

frontend Kube_master
    bind *:6444
    mode tcp
    option tcplog
    default_backend Kube_master

backend Kube_master
    mode tcp
    option tcplog
    option tcp-check
    balance roundrobin
    server master01 $master01:6443  check inter 10000 fall 2 rise 2 weight 1
    server master02 $master02:6443  check inter 10000 fall 2 rise 2 weight 1
    server master03 $master03:6443  check inter 10000 fall 2 rise 2 weight 1
EOF
scp_file master_node /etc/haproxy/haproxy.cfg

#四、高可用组件安装、配置:1、安装Keepalived(master)
echo -e "\033[31m #四、高可用组件安装、配置:1、安装Keepalived(master) \033[0m"
all_cmd master_node "yum -y install keepalived"
rm -rf /etc/keepalived/keepalived.conf
cat > /etc/keepalived/keepalived.conf << EOF
global_defs {
    router_id master01
}

vrrp_script chk_haproxy {
    script "/etc/keepalived/check_haproxy.sh"
    interval 2
    weight 2
}

vrrp_instance VI_1 {
    state MASTER
    interface ens33
    virtual_router_id 50
    priority 100
    advert_int 1
    virtual_ipaddress {
        $vip
    }
    track_script {
        chk_haproxy
    }
}
EOF
scp_file master_node /etc/keepalived/keepalived.conf
#修改另外两台的配置文件(唯一标识、主备区别、优先级)
all_cmd master02 "sed -i s/master01/master02/g /etc/keepalived/keepalived.conf"
all_cmd master02 "sed -i s/MASTER/BACKUP/g /etc/keepalived/keepalived.conf"
all_cmd master02 "sed -i 's/priority 100/priority 90/g' /etc/keepalived/keepalived.conf"

all_cmd master03 "sed -i s/master01/master03/g /etc/keepalived/keepalived.conf"
all_cmd master03 "sed -i s/MASTER/BACKUP/g /etc/keepalived/keepalived.conf"
all_cmd master03 "sed -i 's/priority 100/priority 90/g' /etc/keepalived/keepalived.conf"
#编写状态监控脚本,启动haproxy和keepalived
cat > /etc/keepalived/check_haproxy.sh << EOF
#!/bin/bash
if ! killall -0 haproxy; then
    systemctl stop keepalived
fi
EOF
scp_file master_node /etc/keepalived/check_haproxy.sh
all_cmd master_node "systemctl enable --now haproxy && systemctl enable --now keepalived"


#五、部署Kurbernetes集群:1、设置初始化配置文件(master01)
echo -e "\033[31m #五、部署Kurbernetes集群:1、设置初始化配置文件(master01) \033[0m"
#生成配置文件
kubeadm config print init-defaults > /opt/config_old.yaml
#修改配置文件
#指定当前master节点
sed -i s/1.2.3.4/$master01/g /opt/config_old.yaml
#添加一个certsSANs节点,里面是所有master的IP
kongge="  "
sed -i "/apiServer/a\ $a certSANs:" /opt/config_old.yaml
sed -i "/certSANs:/a\ $a   - $vip" /opt/config_old.yaml
sed -i "/- $vip/a\ $a   - $master01" /opt/config_old.yaml
sed -i "/- $master01/a\ $a   - $master02" /opt/config_old.yaml
sed -i "/- $master02/a\ $a   - $master03" /opt/config_old.yaml
#指定集群VIP地址
sed -i "/clusterName/a\controlPlaneEndpoint: $vip:6444" /opt/config_old.yaml
#替换镜像下载地址
sed -i s#k8s.gcr.io#registry.cn-hangzhou.aliyuncs.com/google_containers#g /opt/config_old.yaml
#替换kubernetes版本
sed -i s#1.20.0#$kubeadm_version#g /opt/config_old.yaml
#定义pod网段和service网段
sed -i "/dnsDomain/a\ $a podSubnet: $podip" /opt/config_old.yaml
sed -i /serviceSubnet/d /opt/config_old.yaml
sed -i "/podSubnet/a\ $a serviceSubnet: $serviceip" /opt/config_old.yaml
#将默认的kube-proxy调度改为ipvs模式
echo "--- 
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs" >> /opt/config_old.yaml

kubeadm config migrate --old-config /opt/config_old.yaml --new-config /opt/kubeadm-config.yaml

#五、部署Kurbernetes集群:2、所有节点拉取镜像(all_node)
echo -e "\033[31m #五、部署Kurbernetes集群:2、所有节点拉取镜像(all_node) \033[0m"
#将配置文件传输到所有节点,并且通过配置文件拉取镜像
scp_file all_node /opt/kubeadm-config.yaml
all_cmd all_node "kubeadm config images pull --config /opt/kubeadm-config.yaml"

#五、部署Kurbernetes集群:3、master01节点初始化和环境配置(master01)
echo -e "\033[31m #五、部署Kurbernetes集群:3、master01节点初始化和环境配置(master01) \033[0m"
#初始化集群
kubeadm init --config /opt/kubeadm-config.yaml --upload-certs | tee /opt/kubeadm-init.log
#配置工作目录修改配置文件
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config
sed -i 's/- --port=0/#&/g' /etc/kubernetes/manifests/kube-scheduler.yaml
sed -i 's/- --port=0/#&/g' /etc/kubernetes/manifests/kube-controller-manager.yaml
systemctl restart kubelet

#五、部署Kurbernetes集群:4、安装网络插件flannel(master01)
echo -e "\033[31m #五、部署Kurbernetes集群:4、安装网络插件flannel(master01) \033[0m"
#拉取yml,修改一下然后运行
cd /opt
while [ ! -f /opt/kube-flannel.yml ]
do
  wget https://raw.githubusercontent.com/flannel-io/flannel/master/Documentation/kube-flannel.yml
done
sed -i "s#10.244.0.0/16#$podip#g" /opt/kube-flannel.yml
sed -ir 's#.*flannel:v.*#        image: registry.cn-guangzhou.aliyuncs.com/mtactor/flannel:v0.25.3#g' /opt/kube-flannel.yml
sed -ir 's#.*flannel-cni-plugin.*#        image: registry.cn-guangzhou.aliyuncs.com/mtactor/flannel-cni-plugin:v1.4.1-flannel1#g' /opt/kube-flannel.yml
kubectl apply -f /opt/kube-flannel.yml

#五、部署Kurbernetes集群:5、所有节点加入集群(all_node)
echo -e "\033[31m #五、部署Kurbernetes集群:5、所有master节点加入集群(master) \033[0m"
#获取一下加入集群的命令
matser_line=`cat -n /opt/kubeadm-init.log | grep "kubeadm join" | awk '{print $1}' | head -1`
node_line=`cat -n /opt/kubeadm-init.log | grep "kubeadm join" | awk '{print $1}' | tail -1`
matser_line=$((matser_line+2))
node_line=$((node_line+1))
master_init=`cat /opt/kubeadm-init.log | head -$matser_line | tail -3 | tr '\\' ' '`
node_init=`cat /opt/kubeadm-init.log | head -$node_line | tail -2 | tr '\\' ' '`
#master加入集群并初始化
all_cmd master_node "$master_init"
all_cmd master_node "mkdir -p $HOME/.kube"
all_cmd master_node "cp -i /etc/kubernetes/admin.conf $HOME/.kube/config"
all_cmd master_node "chown $(id -u):$(id -g) $HOME/.kube/config"
#node节点加入集群
all_cmd nodes "$node_init"


#六、安装dashboard(k8s仪表盘)
echo -e "\033[31m #六、安装dashboard(k8s仪表盘) \033[0m"
cd /opt
while [ ! -f /opt/recommended.yaml ]
do
  wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.6.1/aio/deploy/recommended.yaml
done
#修改配置文件,暴漏nodeport端口
sed -i "/targetPort: 8443/a\      nodePort: 30001" /opt/recommended.yaml
sed -i "/nodePort/a\  type: NodePort" /opt/recommended.yaml
sed -ir 's#.*dashboard:v.*#          image: registry.cn-guangzhou.aliyuncs.com/mtactor/kubernetes-dashboard:v2.6.1#g' /opt/recommended.yaml
sed -ir 's#.*metrics-scraper:v.*#          image: registry.cn-guangzhou.aliyuncs.com/mtactor/metrics-scraper:v1.0.8#g' /opt/recommended.yaml
#部署仪表板
kubectl apply -f recommended.yaml
#获取登录链接
kubectl create serviceaccount dashboard-admin -n kube-system
kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin
token=`kubectl describe secrets -n kube-system $(kubectl -n kube-system get secret | awk '/dashboard-admin/{print $1}') | grep "token:"`
runnode=`kubectl get pod -n kubernetes-dashboard -o wide | awk '{print $7}' | tail -1`

echo -e "\033[32m kubernetes安装成功 \033[0m"
echo -e "\033[32m 节点运行情况 \033[0m"
kubectl get nodes
echo -e "\033[32m 仪表盘信息已存入/opt/dashboard.txt \033[0m"
echo -e "\033[32m 登录链接:https://${!runnode}:30001 \033[0m" >> /opt/dashboard.txt
echo -e "\033[32m $token \033[0m" >> /opt/dashboard.txt
cat /opt/dashboard.txt

  • 3
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值