使用 Sealos 安装高可用 kubenetes 集群

🤵 个人博客: https://www.tingyinhu.com

🎨 微信公众号:java编程秀

✏️ 记录基础知识,做大家的备忘录。

🚀 分享前沿技术,做大家的加油站。

📝 如果文章对你有帮助的话,欢迎 「分享⏩在看👀点赞👍收藏📂」,不胜感激!

集群设计

作用IP地址操作系统配置
master1192.168.233.81Centos7.9 基础设施服务器2颗CPU 2G内存 50G硬盘
master2192.168.233.82Centos7.9 基础设施服务器2颗CPU 2G内存 50G硬盘
master3192.168.233.83Centos7.9 基础设施服务器2颗CPU 2G内存 50G硬盘
node1192.168.233.71Centos7.9 基础设施服务器2颗CPU 2G内存 50G硬盘

安装包下载

为你准备了安装要用到的软件包,如果有需要,可以通过下面的网址进行下载。

https://www.xsoftnet.com/share/a000Qgph5Wg3m.html

升级系统内核为 4.4

  • master1
  • master2
  • master3
  • node1

CentOS 7.x 系统自带的 3.10.x 内核存在一些 Bugs,导致运行的 Docker、Kubernetes 不稳定,例如: rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm

查看当前系统内核版本

[root@basecentos ~]# uname -r
3.10.0-1160.el7.x86_64

安装4.4内核

[root@basecentos ~]# yum -y install kernel-lt-4.4.222-1.el7.elrepo.x86_64.rpm

查看grub2信息

[root@basecentos ~]# cat /boot/grub2/grub.cfg | grep 4.4
menuentry 'CentOS Linux (4.4.222-1.el7.elrepo.x86_64) 7 (Core)' --class centos --class gnu-linux --class gnu --class os --unrestricted $menuentry_id_option 'gnulinux-3.10.0-1160.el7.x86_64-advanced-79b4406a-06e4-4cb0-96e9-e879f1aa4aef' {
        linux16 /vmlinuz-4.4.222-1.el7.elrepo.x86_64 root=UUID=79b4406a-06e4-4cb0-96e9-e879f1aa4aef ro crashkernel=auto rhgb quiet LANG=en_US.UTF-8
        initrd16 /initramfs-4.4.222-1.el7.elrepo.x86_64.img
menuentry 'CentOS Linux (3.10.0-1160.el7.x86_64) 7 (Core)' --class centos --class gnu-linux --class gnu --class os --unrestricted $menuentry_id_option 'gnulinux-3.10.0-1160.el7.x86_64-advanced-79b4406a-06e4-4cb0-96e9-e879f1aa4aef' {
        linux16 /vmlinuz-3.10.0-1160.el7.x86_64 root=UUID=79b4406a-06e4-4cb0-96e9-e879f1aa4aef ro crashkernel=auto rhgb quiet LANG=en_US.UTF-8
menuentry 'CentOS Linux (0-rescue-e31c04fc11eb409581d9be03fff3987a) 7 (Core)' --class centos --class gnu-linux --class gnu --class os --unrestricted $menuentry_id_option 'gnulinux-0-rescue-e31c04fc11eb409581d9be03fff3987a-advanced-79b4406a-06e4-4cb0-96e9-e879f1aa4aef' {
        linux16 /vmlinuz-0-rescue-e31c04fc11eb409581d9be03fff3987a root=UUID=79b4406a-06e4-4cb0-96e9-e879f1aa4aef ro crashkernel=auto rhgb quiet

设置为启动时的默认内核

[root@basecentos ~]# grub2-set-default 'CentOS Linux (4.4.222-1.el7.elrepo.x86_64) 7 (Core)'

设置系统主机名

  • master1
hostnamectl  set-hostname  master1
  • master2
hostnamectl  set-hostname  master2
  • master3
hostnamectl  set-hostname  master3
  • node1
hostnamectl  set-hostname  node1

设置 Host 文件的相互解析

  • master1
  • master2
  • master3
  • node1
[root@master1 ~]# vim /etc/hosts

127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6

192.168.233.81 master1 m1
192.168.233.82 master2 m2
192.168.233.83 master3 m3
192.168.233.71 node1 n1

安装依赖包

  • master1
  • master2
  • master3
  • node1
yum install -y conntrack ntpdate ntp ipvsadm ipset  iptables curl sysstat libseccomp wget  vim net-tools git

设置时间同步服务器

  • master1
[root@master1 ~]# vim /etc/chrony.conf

#1、
server 0.centos.pool.ntp.org iburst
server 1.centos.pool.ntp.org iburst
server 2.centos.pool.ntp.org iburst
server 3.centos.pool.ntp.org iburst
修改为
#server 0.centos.pool.ntp.org iburst
#server 1.centos.pool.ntp.org iburst
#server 2.centos.pool.ntp.org iburst
#server 3.centos.pool.ntp.org iburst
server ntp1.aliyun.com iburst
server ntp2.aliyun.com iburst
server ntp3.aliyun.com iburst

#2、
#allow 192.168.8.0/16
修改为
allow 192.168.233.0/24

#3、
#local stratum 10
修改为
local stratum 11

[root@master1 ~]# systemctl restart chronyd
[root@master1 ~]# systemctl enable chronyd
  • master2
[root@master2 ~]# vim /etc/chrony.conf

#1、
server 0.centos.pool.ntp.org iburst
server 1.centos.pool.ntp.org iburst
server 2.centos.pool.ntp.org iburst
server 3.centos.pool.ntp.org iburst
修改为
#server 0.centos.pool.ntp.org iburst
#server 1.centos.pool.ntp.org iburst
#server 2.centos.pool.ntp.org iburst
#server 3.centos.pool.ntp.org iburst
server ntp1.aliyun.com iburst
server ntp2.aliyun.com iburst
server ntp3.aliyun.com iburst

#2、
#allow 192.168.8.0/16
修改为
allow 192.168.233.0/24

#3、
#local stratum 10
修改为
local stratum 12

[root@master2 ~]# systemctl restart chronyd
[root@master2 ~]# systemctl enable chronyd
  • master3
[root@master3 ~]# vim /etc/chrony.conf

#1、
server 0.centos.pool.ntp.org iburst
server 1.centos.pool.ntp.org iburst
server 2.centos.pool.ntp.org iburst
server 3.centos.pool.ntp.org iburst
修改为
#server 0.centos.pool.ntp.org iburst
#server 1.centos.pool.ntp.org iburst
#server 2.centos.pool.ntp.org iburst
#server 3.centos.pool.ntp.org iburst
server ntp1.aliyun.com iburst
server ntp2.aliyun.com iburst
server ntp3.aliyun.com iburst

#2、
#allow 192.168.8.0/16
修改为
allow 192.168.233.0/24

#3、
#local stratum 10
修改为
local stratum 13

[root@master3 ~]# systemctl restart chronyd
[root@master3 ~]# systemctl enable chronyd
  • node1
[root@node1 ~]# vim /etc/chrony.conf

#1、
server 0.centos.pool.ntp.org iburst
server 1.centos.pool.ntp.org iburst
server 2.centos.pool.ntp.org iburst
server 3.centos.pool.ntp.org iburst
修改为
#server 0.centos.pool.ntp.org iburst
#server 1.centos.pool.ntp.org iburst
#server 2.centos.pool.ntp.org iburst
#server 3.centos.pool.ntp.org iburst
server 192.168.233.81 iburst
server 192.168.233.82 iburst
server 192.168.233.83 iburst

[root@node1 ~]# systemctl restart chronyd
[root@node1 ~]# systemctl enable chronyd

设置防火墙为 Iptables 并设置空规则

  • master1
  • master2
  • master3
  • node1
systemctl  stop firewalld  &&  systemctl  disable firewalld

yum -y install iptables-services  &&  systemctl  start iptables  &&  systemctl  enable iptables  &&  iptables -F  &&  service iptables save

关闭 swap 和 SELINUX

  • master1
  • master2
  • master3
  • node1
swapoff -a && sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab

setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config

检查

# 禁用swap分区
[root@master1 ~]# cat /etc/fstab

#
# /etc/fstab
# Created by anaconda on Tue Jun 14 00:13:14 2022
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
UUID=79b4406a-06e4-4cb0-96e9-e879f1aa4aef /                       xfs     defaults        0 0
UUID=234dd2af-4f8e-4eb2-836c-8540e9be4586 /boot                   xfs     defaults        0 0
#UUID=9e0834c3-574a-4826-a6ff-8c89cc136d26 swap                    swap    defaults        0 0

# 禁用selinux
[root@master1 ~]# cat /etc/selinux/config 

# This file controls the state of SELinux on the system.
# SELINUX= can take one of these three values:
#     enforcing - SELinux security policy is enforced.
#     permissive - SELinux prints warnings instead of enforcing.
#     disabled - No SELinux policy is loaded.
SELINUX=disabled
# SELINUXTYPE= can take one of three values:
#     targeted - Targeted processes are protected,
#     minimum - Modification of targeted policy. Only selected processes are protected. 
#     mls - Multi Level Security protection.
SELINUXTYPE=targeted

调整内核参数

  • master1
  • master2
  • master3
  • node1
cat > kubernetes.conf <<EOF
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0 # 禁止使用 swap 空间,只有当系统 OOM 时才允许使用它
vm.overcommit_memory=1 # 不检查物理内存是否够用
vm.panic_on_oom=0 # 开启 OOM	
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
EOF

cp kubernetes.conf  /etc/sysctl.d/kubernetes.conf

sysctl -p /etc/sysctl.d/kubernetes.conf

关闭系统不需要服务

  • master1
  • master2
  • master3
  • node1
systemctl stop postfix && systemctl disable postfix

设置 rsyslogd 和 systemd journald

  • master1
  • master2
  • master3
  • node1
# 持久化保存日志的目录
mkdir /var/log/journal

mkdir /etc/systemd/journald.conf.d

cat > /etc/systemd/journald.conf.d/99-prophet.conf <<EOF
[Journal]
# 持久化保存到磁盘
Storage=persistent

# 压缩历史日志
Compress=yes

SyncIntervalSec=5m
RateLimitInterval=30s
RateLimitBurst=1000

# 最大占用空间 10G
SystemMaxUse=10G

# 单日志文件最大 200M
SystemMaxFileSize=200M

# 日志保存时间 2 周
MaxRetentionSec=2week

# 不将日志转发到 syslog
ForwardToSyslog=no
EOF

systemctl restart systemd-journald

kube-proxy开启ipvs的前置条件

  • master1
  • master2
  • master3
modprobe br_netfilter

cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF

chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4

重启服务器

  • master1
  • master2
  • master3
  • node1
[root@basecentos ~]# reboot

检查

  • master1
  • master2
  • master3
  • node1
[root@node1 ~]# getenforce
Disabled

[root@node1 ~]# uname -r
4.4.222-1.el7.elrepo.x86_64

通过 Sealos 安装集群

kube1.16.0.tar.gzsealos 这2个文件复制到 master1 机器上

[root@master1 ~]# mv sealos /usr/local/bin/

[root@master1 ~]# chmod a+x /usr/local/bin/sealos 

# --passwd 的值为服务器root用户的密码
sealos init --passwd 'www.tingyinhu.com' \
--master 192.168.233.81  --master 192.168.233.82  --master 192.168.233.83 \
--node 192.168.233.71 \
--pkg-url /root/kube1.16.0.tar.gz \
--version v1.16.0
增加 Master 节点
$ sealos join --master 192.168.0.6 --master 192.168.0.7

# 或者多个连续 IP
$ sealos join --master 192.168.0.6-192.168.0.9  
增加 node
$ sealos join --node 192.168.0.6 --node 192.168.0.7

# 或者多个连续 IP
$ sealos join --node 192.168.0.6-192.168.0.9  
删除指定 Master 节点
$ sealos clean --master 192.168.0.6 --master 192.168.0.7

# 或者多个连续 IP
$ sealos clean --master 192.168.0.6-192.168.0.9  
删除指定 node 节点
$ sealos clean --node 192.168.0.6 --node 192.168.0.7

# 或者多个连续 IP
$ sealos clean --node 192.168.0.6-192.168.0.9  
清理集群
$ sealos clean --all
备份集群
$ sealos etcd save

安装完毕测试

[root@master1 ~]# kubectl create deployment nginx --image=nginx:1.20.0
deployment.apps/nginx created
[root@master1 ~]# kubectl get pods -o wide
NAME                     READY   STATUS    RESTARTS   AGE   IP               NODE    NOMINATED NODE   READINESS GATES
nginx-56fb6c6b68-6lslp   1/1     Running   0          13s   100.66.209.194   node1   <none>           <none>
[root@master1 ~]# curl 100.66.209.194
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
    body {
        width: 35em;
        margin: 0 auto;
        font-family: Tahoma, Verdana, Arial, sans-serif;
    }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>

<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>

<p><em>Thank you for using nginx.</em></p>
</body>
</html>
[root@master1 ~]# kubectl create svc nodeport nginx --tcp=80:80
service/nginx created
[root@master1 ~]# kubectl get svc
NAME         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
kubernetes   ClusterIP   10.96.0.1       <none>        443/TCP        6m3s
nginx        NodePort    10.101.14.158   <none>        80:31542/TCP   5s

外部浏览器访问:http://192.168.233.81:31542,看到nginx欢迎页即可。

高可用测试

节点状态
kube-scheduler 状态查看
[root@master1 ~]# kubectl  get endpoints kube-scheduler -n kube-system -o yaml

apiVersion: v1
kind: Endpoints
metadata:
  annotations:
    control-plane.alpha.kubernetes.io/leader: '{"holderIdentity":"master3_1ab988a2-2f23-4cba-a08a-43ed9e9246e5","leaseDurationSeconds":15,"acquireTime":"2022-11-09T03:20:17Z","renewTime":"2022-11-09T03:20:51Z","leaderTransitions":2}'
  creationTimestamp: "2022-11-09T02:50:14Z"
  name: kube-scheduler
  namespace: kube-system
  resourceVersion: "4010"
  selfLink: /api/v1/namespaces/kube-system/endpoints/kube-scheduler
  uid: 2eea717f-5d5f-424e-bd32-ce67fa93a3f9
kube-controller-manager 状态查看
[root@master1 ~]# kubectl  get endpoints kube-controller-manager -n kube-system -o yaml

apiVersion: v1
kind: Endpoints
metadata:
  annotations:
    control-plane.alpha.kubernetes.io/leader: '{"holderIdentity":"master3_8035b83a-9e61-44cf-a24c-6ad59e780a12","leaseDurationSeconds":15,"acquireTime":"2022-11-09T02:51:12Z","renewTime":"2022-11-09T03:17:51Z","leaderTransitions":1}'
  creationTimestamp: "2022-11-09T02:50:14Z"
  name: kube-controller-manager
  namespace: kube-system
  resourceVersion: "3695"
  selfLink: /api/v1/namespaces/kube-system/endpoints/kube-controller-manager
  uid: abfdbbb2-c1a8-4c47-8471-dff52964921c

直接将master3服务器关闭,查看节点状态

[root@master1 ~]# kubectl get endpoints kube-scheduler -n kube-system -o yaml
apiVersion: v1
kind: Endpoints
metadata:
  annotations:
    control-plane.alpha.kubernetes.io/leader: '{"holderIdentity":"master2_d32950e7-a643-4edf-9991-0c67783f64b7","leaseDurationSeconds":15,"acquireTime":"2022-11-09T03:23:28Z","renewTime":"2022-11-09T03:23:30Z","leaderTransitions":3}'
  creationTimestamp: "2022-11-09T02:50:14Z"
  name: kube-scheduler
  namespace: kube-system
  resourceVersion: "4339"
  selfLink: /api/v1/namespaces/kube-system/endpoints/kube-scheduler
  uid: 2eea717f-5d5f-424e-bd32-ce67fa93a3f9
[root@master1 ~]# kubectl get endpoints kube-controller-manager -n kube-system -o yaml
apiVersion: v1
kind: Endpoints
metadata:
  annotations:
    control-plane.alpha.kubernetes.io/leader: '{"holderIdentity":"master1_a41e91d9-1ea5-4bf9-976c-e8d55e3809d4","leaseDurationSeconds":15,"acquireTime":"2022-11-09T03:23:27Z","renewTime":"2022-11-09T03:23:43Z","leaderTransitions":2}'
  creationTimestamp: "2022-11-09T02:50:14Z"
  name: kube-controller-manager
  namespace: kube-system
  resourceVersion: "4362"
  selfLink: /api/v1/namespaces/kube-system/endpoints/kube-controller-manager
  uid: abfdbbb2-c1a8-4c47-8471-dff52964921c

查看节点状态,并部署nginx-test

[root@master1 ~]# kubectl get nodes
NAME      STATUS     ROLES    AGE   VERSION
master1   Ready      master   35m   v1.16.0
master2   Ready      master   35m   v1.16.0
master3   NotReady   master   35m   v1.16.0
node1     Ready      <none>   34m   v1.16.0
[root@master1 ~]# kubectl create deployment nginx-test --image=nginx:1.20.0
deployment.apps/nginx-test created
[root@master1 ~]# kubectl get pods
NAME                        READY   STATUS    RESTARTS   AGE
nginx-56fb6c6b68-6lslp      1/1     Running   0          17m
nginx-test-584898dc-2fdv7   1/1     Running   0          9s

可以看到新部署的nginx-test正常运行。
高可用集群搭建成功。

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

楚洛瞬

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值