kubernetes(k8s):部署高可用集群


Kubernetes 集群,在生产环境,必须实现高可用:

实现Master节点及其核心组件的高可用;如果Master节点出现问题的话,那整个集群就失去了控制;

具体工作原理:

  • etcd 集群:部署了3个Master节点,每个Master节点的etcd组成集群入口
  • 集群:3个Master节点上的APIServer的前面放一个负载均衡器,工作节点和客户端通过这个负载均衡和APIServer进行通信
  • pod-master保证仅是主master可用,scheduler、controller-manager在集群中多个实例只有一个工作,其他为备用

背景:两种高可用集群架构

在这里插入图片描述2.采用分布式存储etcd,分别用三台主机节点作为etcd
在这里插入图片描述

1.实验环境

  • 重新准备server2、server3、server4,三台虚拟机,分别安装haproxy和keepalived

安装keepalivedhaproxykeepalived用来监控集群中各个服务节点的状态heproxy是一个适合于高可用性环境的tcp/http开元的反向代理和负载均衡软件

1.1 Loadbalancer部署:三台主机分别配置haproxy,keepalived

[root@server2 ~]# yum install -y haproxy keepalived -y
[root@server3 ~]# yum install -y haproxy keepalived -y
[root@server4 ~]# yum install -y haproxy keepalived -y

[root@server2 haproxy]# pwd
/etc/haproxy
[root@server2 haproxy]# vim haproxy.cfg 
#---------------------------------------------------------------------
# main frontend which proxys to the backends
#---------------------------------------------------------------------
frontend  main *:8443    // apiserver监听加密的6443端口,所以指定的监听端口不能与apiserver的端口冲突
    mode tcp  // 使用tcp模式转发
    default_backend             apiserver  // 后端为apiserver,k8s集群接口
#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------
backend apiserver
    mode tcp        // tcp模式
    balance     roundrobin
    server  app1 172.25.60.2:6443 check     //后端server,均为k8s集群中master节点
    server  app2 172.25.60.3:6443 check
    server  app3 172.25.60.4:6443 check
[root@server2 ~]# systemctl start haproxy
[root@server2 ~]# systemctl enable haproxy    //开机自启

[root@server2 ~]# netstat -antlpe|grep haproxy
tcp        0      0 0.0.0.0:8443            0.0.0.0:*               LISTEN      0          31732      3668/haproxy 


[root@server2 keepalived]# pwd
/etc/keepalived
[root@server2 keepalived]# cat check_haproxy.sh   //编写脚本文件check_haproxy.sh,检测haproxy状态,将haproxy与keepalived联系起来
#!/bin/bash
systemctl status haproxy &> /dev/null  
if [ $? != 0 ];then
	systemctl stop keepalived
fi
[root@server2 keepalived]# chmod +x check_haproxy.sh     //加执行权限
[root@server2 keepalived]# ./check_haproxy.sh        //测试是否可以正常运行

[root@server2 keepalived]# cat keepalived.conf 
! Configuration File for keepalived

global_defs {
   
   notification_email {
   
     root@localhost
   }
   notification_email_from keepalived@localhost
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   router_id LVS_DEVEL
}
vrrp_script check_haproxy {
   
    script "/etc/keepalived/check_haproxy.sh"  // 放入检测脚本
    interval 5      //时间间隔5s
}

vrrp_instance VI_1 {
   
state MASTER  
    interface eth0
    virtual_router_id 51
    priority 100  // 备机设备优先级 (数值越大优先级越高)
    advert_int 1
    authentication {
   
        auth_type PASS
        auth_pass 1111
    }
    track_script {
   
        check_haproxy
    }
    virtual_ipaddress {
   
        172.25.60.100      //VIP
   }
} 

[root@server2 keepalived]# systemctl stop haproxy  // 停止haproxy
[root@server2 keepalived]# systemctl status keepalived  // keepalived因为脚本的执行也会停止
● keepalived.service - LVS and VRRP High Availability Monitor
   Loaded: loaded (/usr/lib/systemd/system/keepalived.service; enabled; vendor preset: disabled)
   Active: inactive (dead) since Thu 2020-05-14 17:06:47 CST; 8s ago

[root@server2 keepalived]# systemctl start haproxy
[root@server2 keepalived]# systemctl start keepalived
[root@server2 keepalived]# ssh-keygen  // 三台master节点主机,做免密
[root@server2 keepalived]# ssh-copy-id server3
[root@server2 keepalived]# ssh-copy-id server4

[root@server2 keepalived]# scp /etc/haproxy/haproxy.cfg server3:/etc/haproxy/
haproxy.cfg                                                                                                     100% 2658     3.0MB/s   00:00    
[root@server2 keepalived]# scp /etc/haproxy/haproxy.cfg server4:/etc/haproxy/
haproxy.cfg                                                                                                     100% 2658     3.1MB/s   00:00    
[root@server2 keepalived]# scp /etc/keepalived/keepalived.conf server3:/etc/keepalived/
keepalived.conf                                                                                                 100%  607   913.9KB/s   00:00    
[root@server2 keepalived]# scp /etc/keepalived/keepalived.conf server4:/etc/keepalived/
keepalived.conf    
[root@server3 ~]# cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived

global_defs {
   
   notification_email {
   
     root@localhost
   }
   notification_email_from keepalived@localhost
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   router_id LVS_DEVEL
}
vrrp_script check_haproxy {
   
    script "/etc/keepalived/check_haproxy.sh"
    interval 5
}

vrrp_instance VI_1 {
   
    state BACKUP      // 备机设置为backup
    interface eth0
    virtual_router_id 51
    priority 90       // 优先级降低
    advert_int 1
    authentication {
   
        auth_type PASS
        auth_pass 1111
    }
    track_script {
   
        check_haproxy
    }
    virtual_ipaddress {
   
        172.25.60.100
   }
} 

[root@server3 ~]# systemctl start haproxy
[root@server3 ~]# systemctl enable haproxy
[root@server3 ~]# systemctl start keepalived
[root@server3 ~]# systemctl enable keepalived
[root@server4 ~]# cat /etc/keepalived/keepalived.conf 
! Configuration File for keepalived

global_defs {
   
   notification_email {
   
     root@localhost
   }
   notification_email_from keepalived@localhost
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   router_id LVS_DEVEL
}
vrrp_script check_haproxy {
   
    script "/etc/keepalived/check_haproxy.sh"
    interval 5
}

vrrp_instance VI_1 {
   
    state BACKUP        //改为backup备机
    interface eth0
    virtual_router_id 51
    priority 80         //降低优先级
    advert_int 1
    authentication {
   
        auth_type PASS
        auth_pass 1111
    }
    track_script {
   
        check_haproxy
    }
    virtual_ipaddress {
   
        172.25.60.100
   }
} 

[root@server4 ~]# systemctl start haproxy
[root@server4 ~]# systemctl enable haproxy
Created symlink from /etc/systemd/system/multi-user.target.wants/haproxy.service to /usr/lib/systemd/system/haproxy.service.
[root@server4 ~]# systemctl start keepalived
[root@server4 ~]# systemctl enable keepalived
Created symlink from /etc/systemd/system/multi-user.target.wants/keepalived.service to /usr/lib/systemd/system/keepalived.service.

1.2 分别部署k8s,docker

(在所有节点都要做)
时间同步(此处省略)

[root@server2 ~]# cat /etc/yum.repos.d/docker-ce.repo // docker安装的yum源
[docker-ce-stable]
name=Docker CE Stable - $basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/$basearch/stable
enabled=1
gpgcheck=0
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[root@server2 ~]# yum install docker-ce container-selinux-2.77-1.el7.noarch.rpm -y # 需要container-selinux-2.77-1.el7.noarch.rpm包解决依赖性,自行下载

[root@server2 ~]# scp /etc/yum.repos.d/docker-ce.repo root@172.25.60.3:/etc/yum.repos.d/
docker-ce.repo                                                                                                  100%  213   145.9KB/s   00:00    
[root@server2 ~]# scp /etc/yum.repos.d/docker-ce.repo root@172.25.60.4:/etc/yum.repos.d/
docker-ce.repo
[root@server2 ~]# scp container-selinux-2.77-1.el7.noarch.rpm server3:/root/
container-selinux-2.77-1.el7.noarch.rpm                                                                         100%   37KB  12.4MB/s   00:00    
[root@server2 ~]# scp container-selinux-2.77-1.el7.noarch.rpm server4:/root/
container-selinux-2.77-1.el7.noarch.rpm
[root@server3 ~]# yum install docker-ce container-selinux-2.77-1.el7.noarch.rpm -y
[root@server4 ~]# yum install docker-ce container-selinux-2.77-1.el7.noarch.rpm -y

[root@server2 ~]# systemctl start docker
[root@server2 ~]# systemctl enable docker
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
[root@server2 ~]# docker info
//如果出现一下错误,执行下面的排错步骤

排错:
此处可参考k8s部署的博客https://blog.csdn.net/weixin_43936969/article/details/105773756#3_Kubernetes_57

[root@server2 sysctl.d]# pwd
/etc/sysctl.d
[root@server2 sysctl.d]# vim k8s.conf
[root@server2 sysctl.d]# cat k8s.conf 
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
[root@server2 sysctl.d]# sysctl --syste  // 使其生效

将cgroupdriver修改为system的方式:

[root@server2 docker]# pwd
/etc/docker
[root@server2 docker]# cat daemon.json 
{
   
  "exec-opts"
  • 0
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值