搭建k8s高可用集群(1.27.2版本)

一、环境准备(所有主机都需要操作)

主机名IP地址说明
master160192.168.110.160
master161192.168.110.161
master162192.168.110.162
worker163192.168.110.163
worker164192.168.110.164

1.1 设置主机IP地址解析

vim /etc/hosts

# 粘贴以下内容
192.168.110.160 master160
192.168.110.161 master161
192.168.110.162 master162
192.168.110.163 worker163
192.168.110.164 worker164

1.1 主机安全设置

# 关闭防火墙
systemctl disable --now firewalld
 
# 关闭selinux
sed -ri 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config

1.2 设置时间同步

# crontab -e
crontab -e

# 每小时同步一次
0 */1 * * * ntpdate time1.aliyun.com

 

1.3 升级操作系统内核

# 导入elrepo gpg key
rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org

# 安装elrepo yum源仓库
yum -y install https://www.elrepo.org/elrepo-release-7.0-4.el7.elrepo.noarch.rpm

# 安装kernel-lt版本,ml为长期稳定版本,lt为长期维护版本
yum --enablerepo="elrepo-kernel" -y install kernel-lt.x86_64

# 设置grub2默认引导为0
grub2-set-default 0

# 重新生成grub2引导文件
grub2-mkconfig -o /boot/grub2/grub.cfg

# 重启
reboot

1.4 添加网桥过滤及内核转发配置文件 

# 创建k8s.conf文件
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness = 0
EOF

# 加载br_netfilter模块
modprobe br_netfilter

# 使配置生效
sysctl -p /etc/sysctl.d/k8s.conf

# 查看是否加载
lsmod | grep br_netfilter

1.5 安装ipset及ipvsadm 

# 安装ipset及ipvsadm
yum -y install ipset ipvsadm

# 配置ipvsadm模块加载方式,添加需要加载的模块
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
EOF

# 授权、运行、检查加载
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack

1.6 关闭SWAP分区

vim /etc/fstab

# 然后注释掉第11行

   注释掉第11行

1.7 配置ssh免密登录(master160执行)

ssh-keygen

# 然后一直回车

 拷贝密钥文件至其他服务器

# 进入/root/.ssh
cd /root/.ssh

# 查看文件
ls

# 复制文件并重命名
cp id_rsa.pub authorized_keys

# 拷贝至其他服务器
for i in 161 162 163 164
do
scp -r /root/.ssh 192.168.110.$i:/root/
done

  输入  yes  ,输入每台服务器密码

 二、部署etcd集群(master节点执行。160、161、162)

2.1 安装etcd 

# 安装etcd
yum -y install etcd

2.2 生成etcd配置相关文件(master160)

# 创建文件
vim etcd_install.sh

# 粘贴以下内容
etcd1=192.168.110.160
etcd2=192.168.110.161
etcd3=192.168.110.162

TOKEN=smartgo
ETCDHOSTS=($etcd1 $etcd2 $etcd3)
NAMES=("master160" "master161" "master162")
for i in "${!ETCDHOSTS[@]}"; do
HOST=${ETCDHOSTS[$i]}
NAME=${NAMES[$i]}
cat << EOF > /tmp/$NAME.conf
# [member]
ETCD_NAME=$NAME
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="http://$HOST:2380"
ETCD_LISTEN_CLIENT_URLS="http://$HOST:2379,http://127.0.0.1:2379"
#[cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://$HOST:2380"
ETCD_INITIAL_CLUSTER="${NAMES[0]}=http://${ETCDHOSTS[0]}:2380,${NAMES[1]}=http://${ETCDHOSTS[1]}:2380,${NAMES[2]}=http://${ETCDHOSTS[2]}:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="$TOKEN"
ETCD_ADVERTISE_CLIENT_URLS="http://$HOST:2379"
EOF
done
ls /tmp/master*
scp /tmp/master161.conf $etcd2:/etc/etcd/etcd.conf
scp /tmp/master162.conf $etcd3:/etc/etcd/etcd.conf
cp /tmp/master161.conf /etc/etcd/etcd.conf
rm -f /tmp/master*.conf


# 执行脚本文件
sh  etcd_install.sh

2.3 启动etcd

# 启动etcd
systemctl enable --now etcd

检查etcd是否正常

etcdctl member list

etcdctl cluster-health

三、安装负载均衡器 haproxy + keepalived(160、161)

3.1 安装

yum -y install haproxy keepalived

3.2 创建haproxy配置文件

cat > /etc/haproxy/haproxy.cfg << EOF
 global
  maxconn 2000
  ulimit-n 16384
  log 127.0.0.1 local0 err
  stats timeout 30s
 
 defaults
  log global
  mode http
  option httplog
  timeout connect 5000
  timeout client 50000
  timeout server 50000
  timeout http-request 15s
  timeout http-keep-alive 15s
 
 frontend monitor-in
  bind *:33305
  mode http
  option httplog
  monitor-uri /monitor
 
 frontend k8s-master
  bind 0.0.0.0:16443
  bind 127.0.0.1:16443
  mode tcp
  option tcplog
  tcp-request inspect-delay 5s
  default_backend k8s-masters
 
 backend k8s-masters
  mode tcp
  option tcplog
  option tcp-check
  balance roundrobin
  default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
  server  master160  192.168.110.160:6443 check
  server  master161  192.168.110.161:6443 check
  server  master162  192.168.110.162:6443 check
EOF

3.3 创建keepalived配置文件

两台服务器不一样

# master160 配置
cat >/etc/keepalived/keepalived.conf<<EOF 
 ! Configuration File for keepalived
 global_defs {
    router_id LVS_DEVEL
 script_user root
    enable_script_security
 }
 vrrp_script chk_apiserver {
    script "/etc/keepalived/check_apiserver.sh"
    interval 5
    weight -5
    fall 2 
 rise 1
 }
 vrrp_instance VI_1 {
    state MASTER
    interface ens33
    mcast_src_ip 192.168.110.160
    virtual_router_id 51
    priority 100
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        192.168.110.200
    }
    track_script {
       chk_apiserver
    }
 }
EOF
# master161配置

cat >/etc/keepalived/keepalived.conf<<EOF
 ! Configuration File for keepalived
 global_defs {
    router_id LVS_DEVEL
 script_user root
    enable_script_security
 }
 vrrp_script chk_apiserver {
    script "/etc/keepalived/check_apiserver.sh"
   interval 5
    weight -5
    fall 2 
 rise 1
 }
 vrrp_instance VI_1 {
    state BACKUP
    interface ens33
    mcast_src_ip 192.168.110.161
    virtual_router_id 51
    priority 99
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        192.168.110.200
    }
    track_script {
       chk_apiserver
    }
 }
EOF

3.4 健康检查脚本

cat > /etc/keepalived/check_apiserver.sh <<"EOF"
 #!/bin/bash
 err=0
 for k in $(seq 1 3)
 do
    check_code=$(pgrep haproxy)
    if [[ $check_code == "" ]]; then
        err=$(expr $err + 1)
        sleep 1
        continue
    else
        err=0
        break
    fi
 done
 
 if [[ $err != "0" ]]; then
    echo "systemctl stop keepalived"
    /usr/bin/systemctl stop keepalived
    exit 1
 else
    exit 0
 fi
EOF


# 授权
chmod +x /etc/keepalived/check_apiserver.sh

3.5 启动服务并验证 

systemctl daemon-reload
systemctl enable --now haproxy
systemctl enable --now keepalived

# 验证
ip address show

四 、安装docker(所有节点执行)

4.1 安装

# 更换yum源
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo

# 安装
yum -y install docker-ce

4.2 配置docker

# 创建docker数据文件夹
mkdir /data/docker -p

# 配置
cat > /etc/docker/daemon.json << EOF
{
     "registry-mirrors": [
         "https://ebkn7ykm.mirror.aliyuncs.com",
         "https://docker.mirrors.ustc.edu.cn",
         "http://f1361db2.m.daocloud.io",
         "https://registry.docker-cn.com",
         "http://hub-mirror.c.163.com",
         "https://registry.cn-hangzhou.aliyuncs.com"
     ],
     "data-root":"/data/docker",
     "exec-opts": ["native.cgroupdriver=systemd"]
}
EOF

其中"exec-opts": ["native.cgroupdriver=systemd"]为cgroup方式 

# 重启docker
systemctl start docker
systemctl enable docker

五、 安装cri-dockerd(所有节点执行)

5.1 下载 cri-dockerd

# 创建文件夹
mkdir /data/cri-dockerd -p
cd /data/cri-dockerd

# 上传下载的文件,然后查看
ll

5.2 安装

yum install cri-dockerd-0.3.1-3.el7.x86_64.rpm

5.3 配置

# 修改配置文件
vim /usr/lib/systemd/system/cri-docker.service

# 第10行改为
ExecStart=/usr/bin/cri-dockerd --pod-infra-container-image=registry.k8s.io/pause:3.9 --container-runtime-endpoint fd://

# 重启
systemctl start cri-docker
systemctl enable cri-docker

六、创建k8s集群

6.1 配置国内yum源(所有节点)

cat > /etc/yum.repos.d/k8s.repo <<EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

6.2 安装kubeadm、kubectl、kubelet(所有节点)

# 安装指定版本
yum -y install  kubeadm-1.27.2-0  kubelet-1.27.2-0 kubectl-1.27.2-0

6.3 配置kubelet(所有节点)

vim /etc/sysconfig/kubelet

# 删除原有内容,填入以下内容
KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"

# kubelet开机启动
systemctl enable kubelet

6.4 准备镜像 (所有节点)

 方法一(需要魔法):

# 查看所需镜像
kubeadm config images list --kubernetes-version=v1.27.2

# 创建 images.sh
vim images.sh

## 贴入以下内容,images_list里面的内容是第一步查出来的镜像

 #!/bin/bash
 images_list='
 registry.k8s.io/kube-apiserver:v1.27.2
 registry.k8s.io/kube-controller-manager:v1.27.2
 registry.k8s.io/kube-scheduler:v1.27.2
 registry.k8s.io/kube-proxy:v1.27.2
 registry.k8s.io/pause:3.9
 registry.k8s.io/etcd:3.5.7-0
 registry.k8s.io/coredns/coredns:v1.10.1'
 
 for i in $images_list
 do
    docker pull $i
 done
 
 docker save -o k8s-1-27-2.tar $images_list

# 执行批处理文件
sh images.sh

方法二:

使用我下载好的镜像:1.27.2

# 创建k8s文件夹
mkdir /data/k8s -p 
cd /data/k8s

# 上传下载好的文件
cd 1.27.2
ll

# 导入镜像
for i in $(ls);do docker load -i $i;done;

# 查看
docker images

6.5 安装 (160节点)

# 准备配置文件

cat > kubeadm-config.yaml << EOF
---
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.110.160
  bindPort: 6443
nodeRegistration:
  criSocket: unix:///var/run/cri-dockerd.sock
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
kubernetesVersion: 1.27.2
networking:
  dnsDomain: cluster.local
  podSubnet: 10.244.0.0/16
  serviceSubnet: 10.96.0.0/12
scheduler: {}
apiServerCertSANs:
- 192.168.110.200
controlPlaneEndpoint: "192.168.110.200:16443"
etcd:
  external:
    endpoints:
      - http://192.168.110.160:2379
      - http://192.168.110.161:2379
      - http://192.168.110.162:2379
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
---
apiVersion: kubelet.config.k8s.io/v1beta1
authentication:
  anonymous:
    enabled: false
  webhook:
    cacheTTL: 0s
    enabled: true
  x509:
    clientCAFile: /etc/kubernetes/pki/ca.crt
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 0s
    cacheUnauthorizedTTL: 0s
cgroupDriver: systemd
clusterDNS:
- 10.96.0.10
clusterDomain: cluster.local
cpuManagerReconcilePeriod: 0s
evictionPressureTransitionPeriod: 0s
fileCheckFrequency: 0s
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 0s
imageMinimumGCAge: 0s
kind: KubeletConfiguration
logging:
  flushFrequency: 0
  options:
    json:
      infoBufferSize: "0"
  verbosity: 0
memorySwap: {}
nodeStatusReportFrequency: 0s
nodeStatusUpdateFrequency: 0s
rotateCertificates: true
runtimeRequestTimeout: 0s
shutdownGracePeriod: 0s
shutdownGracePeriodCriticalPods: 0s
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 0s
syncFrequency: 0s
volumeStatsAggPeriod: 0s
EOF

# 根据配置文件初始化
kubeadm init --config kubeadm-config.yaml --upload-certs --v=9

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

6.6 复制证书至其他master节点加入集群 (160)

# master160执行
# 复制证书至其他master节点

vim cp-k8s-cert.sh

# 贴入以下内容
for host in 161 162; do
    scp -r /etc/kubernetes/pki 192.168.110.$host:/etc/kubernetes/
done


# 执行脚本
sh cp-k8s-cert.sh

6.7 删除多余证书,并加入集群(161、162)

# master161、master162执行

cd /etc/kubernetes/pki/
rm -rf api*
rm -rf front-proxy-client.*

ls

 加入集群,注意改成自己的key

# 加入集群
kubeadm join 192.168.110.200:16443 --token abcdef.0123456789abcdef \
        --discovery-token-ca-cert-hash sha256:6669cf9c6b7dd401dd9c2c317ccd6faec6a0035acf6828105ad7d7cddf0dcec0 \
        --control-plane --certificate-key 412bd02f4c4cf6855c27a973f176057c10dee21202f87a8b4bc3a352cdd54e53 \
        --cri-socket unix://var/run/cri-dockerd.sock

注意:如果不加最后的--cri-socket unix://var/run/cri-dockerd.sock,会报错

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

6.8 验证(160)

kubectl get pods -n kube-system

6.9 worker节点加入(163、164) 

kubeadm join 192.168.110.200:16443 --token abcdef.0123456789abcdef \
        --discovery-token-ca-cert-hash sha256:6669cf9c6b7dd401dd9c2c317ccd6faec6a0035acf6828105ad7d7cddf0dcec0 \
        --cri-socket unix:///var/run/cri-dockerd.sock

七、安装网络插件Calico

7.1 设置GitHub加速

# 去网上查询 raw.githubusercontent.com 的IP
# 修改 /etc/hosts

vim /etc/hosts

# 添加
185.199.108.133 raw.githubusercontent.com

185.199.108.133 是我查出来的IP,需要换成自己查出来的IP

7.2 部署Calico(3.26.0版本)

官方网站

复制安装命令

# 复制安装命令
kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.0/manifests/tigera-operator.yaml

 下载配置文件

# 下载配置文件
wget  https://raw.githubusercontent.com/projectcalico/calico/v3.26.0/manifests/custom-resources.yaml

# 修改配置文件
vim custom-resources.yaml

修改配置文件中第13行cidr,与6.5配置文件 kubeadm-config.yaml 中的 podSubnet 想同

# 执行配置文件
kubectl create -f custom-resources.yaml

然后等待。。。。

# 验证命名空间
kubectl get ns

# 验证pod
kubectl get pods -n calico-system

Over 

  • 2
    点赞
  • 8
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值