kubeadm搭建高可用+网络组件calico

Kubeadm 1.18.17部署方案

1.提前准备镜像(有网机器下载),提前导入

for k in `ls -l | awk '{print $9}'`
do
sudo docker load -i $k
done

2.提前准备calico kube-config 文件

#安装docker
有网安装

sudo yum install -y yum-utils \
  device-mapper-persistent-data \
  lvm2

sudo yum-config-manager \
    --add-repo \
    http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
	
sudo yum install docker-ce-20.10.08 docker-ce-cli-20.10.08 containerd.io

无网安装
sudo yum -y localinstall ./*

#基础环境设置

关闭防火墙 如果是minimal安装,默认没有装firewalld

systemctl stop firewalld
systemctl disable firewalld

关闭selinux

sed -i 's/enforcing/disabled/' /etc/selinux/config  # 永久
setenforce 0  # 临时

关闭swap

swapoff -a  # 临时
sed -ri 's/.*swap.*/#&/' /etc/fstab    # 永久

根据规划设置主机名

hostnamectl set-hostname <hostname> #分别设置为master1、master2、master3、node1、node2、node3

在master添加hosts

cat >> /etc/hosts << EOF
192.168.xxx.xxx    master.k8s.io   k8s-vip
192.168.xxx.xxx    master01.k8s.io master1
192.168.xxx.xxx    master02.k8s.io master2
192.168.xxx.xxx    master03.k8s.io master3
192.168.xxx.xxx    node01.k8s.io   node1
192.168.xxx.xxx    node01.k8s.io   node2
192.168.xxx.xxx    node01.k8s.io   node3

EOF
ping node1或ping node01.k8s.io #确认配置生效

将桥接的IPv4流量传递到iptables的链

cat > /etc/sysctl.conf<< EOF
net.ipv4.ip_forward=1= 1
EOF
sysctl -p  # 生效

#所有master节点部署keepalived

yum install -y conntrack-tools libseccomp libtool-ltdl
yum install -y keepalived

#master1节点配置
! Configuration File for keepalived

global_defs {
   router_id k8s
}

vrrp_script check_haproxy {
    script "killall -0 haproxy"
    interval 3
    weight -2
    fall 10
    rise 2
}

vrrp_instance VI_1 {
    state MASTER 
    interface ens33
    virtual_router_id 51
    priority 250
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass ceb1b3ec013d66163d6ab
    }
    virtual_ipaddress {
        192.168.78.99
    }
    track_script {
        check_haproxy
    }
}
#Master2节点配置
! Configuration File for keepalived

global_defs {
   router_id k8s
}

vrrp_script check_haproxy {
    script "killall -0 haproxy"
    interval 3
    weight -2
    fall 10
    rise 2
}

vrrp_instance VI_1 {
    state BACKUP 
    interface ens33
    virtual_router_id 51
    priority 200
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass ceb1b3ec013d66163d6ab
    }
    virtual_ipaddress {
        192.168.78.99
    }
    track_script {
        check_haproxy
    }
}
#Master3节点配置
! Configuration File for keepalived

global_defs {
   router_id k8s
}

vrrp_script check_haproxy {
    script "killall -0 haproxy"
    interval 3
    weight -2
    fall 10
    rise 2
}

vrrp_instance VI_1 {
    state BACKUP 
    interface ens33
    virtual_router_id 51
    priority 150
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass ceb1b3ec013d66163d6ab
    }
    virtual_ipaddress {
        192.168.78.99
    }
    track_script {
        check_haproxy
    }
}

启动keepalived

$ systemctl start keepalived.service
设置开机启动
$ systemctl enable keepalived.service

查看启动状态

$ systemctl status keepalived.service

#部署haproxy

yum install -y haproxy
#master节点的配置均相同
cat > /etc/haproxy/haproxy.cfg << EOF
#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
    # to have these messages end up in /var/log/haproxy.log you will
    # need to:
    # 1) configure syslog to accept network log events.  This is done
    #    by adding the '-r' option to the SYSLOGD_OPTIONS in
    #    /etc/sysconfig/syslog
    # 2) configure local2 events to go to the /var/log/haproxy.log
    #   file. A line like the following can be added to
    #   /etc/sysconfig/syslog
    #
    #    local2.*                       /var/log/haproxy.log
    #
    log         127.0.0.1 local2
    
    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     4000
    user        haproxy
    group       haproxy
    daemon 
       
    # turn on stats unix socket
    stats socket /var/lib/haproxy/stats
#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------  
defaults
    mode                    http
    log                     global
    option                  httplog
    option                  dontlognull
    option http-server-close
    option forwardfor       except 127.0.0.0/8
    option                  redispatch
    retries                 3
    timeout http-request    10s
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout http-keep-alive 10s
    timeout check           10s
    maxconn                 3000
#---------------------------------------------------------------------
# kubernetes apiserver frontend which proxys to the backends
#--------------------------------------------------------------------- 
frontend kubernetes-apiserver
    mode                 tcp
    bind                 *:16443
    option               tcplog
    default_backend      kubernetes-apiserver    
#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------
backend kubernetes-apiserver
    mode        tcp
    balance     roundrobin
    server      master01.k8s.io   192.168.xxx.xxx:6443 check
server      master02.k8s.io   192.168.xxx.xxx:6443 check
server      master02.k8s.io   192.168.xxx.xxx:6443 check
#---------------------------------------------------------------------
# collection haproxy statistics message
#---------------------------------------------------------------------
listen stats
    bind                 *:1080
    stats auth           admin:awesomePassword
    stats refresh        5s
    stats realm          HAProxy\ Statistics
    stats uri            /admin?stats
EOF

设置开机启动

$ systemctl enable haproxy
# 开启haproxy
$ systemctl start haproxy

#添加阿里云YUM软件源

cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

#安装kubeadm,kubelet和kubectl

$ yum install -y kubelet-1.18.17.3 kubeadm-1.18.17 kubectl-1.18.17
$ systemctl enable kubelet

#部署Kubernetes Master

#在具有vip的master上操作
$ mkdir /usr/local/kubernetes/manifests -p

$ cd /usr/local/kubernetes/manifests/

$ vi kubeadm-config.yaml
apiServer: 
  certSANs:
    - master1
- master2
- master3
    - master.k8s.io
    - 192.168.xxx.xxx
    - 192.168.xxx.xxx
- 192.168.xxx.xxx
- 192.168.xxx.xxx
    - 127.0.0.1
  extraArgs:
    authorization-mode: Node,RBAC
  timeoutForControlPlane: 24m0s
apiVersion: kubeadm.k8s.io/v1beta1
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: "master.k8s.io:16443"
controllerManager: {}
dns: 
  type: CoreDNS
etcd:
  local:    
    dataDir: /var/lib/etcd
imageRepository: k8s.gcr.io
kind: ClusterConfiguration
kubernetesVersion: v1.18.17
networking: 
  dnsDomain: cluster.local  
  podSubnet: 10.244.0.0/16
  serviceSubnet: 10.96.0.0/16
scheduler: {}

#在master1节点执行

$ kubeadm init --config kubeadm-config.yaml

#按照提示保存以下内容,一会要使用(kubeadm init中的回显内容):

$ kubeadm join master.k8s.io:16443 --token so98j9.uv20vfmsmaffx3fz \
    --discovery-token-ca-cert-hash sha256:9233a9cdc38366a708f9bfaacabdc6626a606c36f973cca6c65924dc4b87d013 \
--control-plane 

#按照提示配置环境变量,使用kubectl工具:

$ mkdir -p $HOME/.kube
$ sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
$ sudo chown $(id -u):$(id -g) $HOME/.kube/config

#安装集群网络

$ curl https://docs.projectcalico.org/v3.11/manifests/calico-typha.yaml -o calico.yaml

#修改集群网络

$ Kubectl apply -f calico.yml

#master2节点加入集群

$ ssh root@192.168.xxx.xxx mkdir -p /etc/kubernetes/pki/etcd

$ scp /etc/kubernetes/admin.conf root@192.168.xxx.xxx:/etc/kubernetes
   
$ scp /etc/kubernetes/pki/{ca.*,sa.*,front-proxy-ca.*} root@192.168.xxx.xxx:/etc/kubernetes/pki
   
$ scp /etc/kubernetes/pki/etcd/ca.* root@192.168.xxx.xxx:/etc/kubernetes/pki/etcd


$ kubeadm join master.k8s.io:16443 --token a8r4cl.ipnc8uwnwg35alhn \
    --discovery-token-ca-cert-hash sha256:2686517c55d2093a7e59ca34ecf72a1a44b36b416e1c2d9ac15565e5b2affb39 \
    --control-plane

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

#Master3节点加入集群

$ ssh root@192.168.xxx.xxx mkdir -p /etc/kubernetes/pki/etcd

$ scp /etc/kubernetes/admin.conf root@192.168.xxx.xxx:/etc/kubernetes
   
$ scp /etc/kubernetes/pki/{ca.*,sa.*,front-proxy-ca.*} root@192.168.xxx.xxx:/etc/kubernetes/pki
   
$ scp /etc/kubernetes/pki/etcd/ca.* root@192.168.xxx.xxx:/etc/kubernetes/pki/etcd

$ kubeadm join master.k8s.io:16443 --token a8r4cl.ipnc8uwnwg35alhn \
    --discovery-token-ca-cert-hash sha256:2686517c55d2093a7e59ca34ecf72a1a44b36b416e1c2d9ac15565e5b2affb39 \
    --control-plane

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

#加入Kubernetes Node

在node1上执行
向集群添加新节点,执行在kubeadm init输出的kubeadm join命令(之前kubeadm init回显内容,注意不加--control-plane):
$ kubeadm join master.k8s.io:16443 --token a8r4cl.ipnc8uwnwg35alhn \
    --discovery-token-ca-cert-hash sha256:2686517c55d2093a7e59ca34ecf72a1a44b36b416e1c2d9ac15565e5b2affb39


$ kubectl get nodes
$ kubectl get pods -n kube-system
$ kubectl get cs

附相关包和组件

QQ:1279971605
获取,阿里云盘无法分享
  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

人生不过大梦一场

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值