kubernetess -- 高可用集群搭建

当前我们的即诶但上面只有一个master结点,然而在真正的集群中,我们至少需要三个结点来实现集群的高可用。注意master结点的cpu至少要两个。

重新准备三台全新的机器。

有两种部署的方式:

  1. 堆放 etcd 的拓扑结构
    在这里插入图片描述
    下面的三个结点是master结,每个master萨回嗯面都有完整的应用,也包括 etcd,就是说他们有独立的存储,然后有他们上层的 LB 进行负载均衡。
  2. 外部etcd集群的拓扑结构
    在这里插入图片描述
    分布式的etcd 集群,将master 结点中的存储独立出来,保证了数据的高可用和冗余控制。

堆栈式的集群

首先我们要做的就是Loadbalancer部署:
三个结点都安装 haproxy 和 keepalive 。

[root@server2 ~]# yum install -y keepalived haproxy
[root@server3 ~]# yum install -y keepalived haproxy
[root@server4 ~]# yum install -y keepalived haproxy

haproxy 和keepalive配置

配置haproxy

[root@server2 ~]# vim /etc/haproxy/haproxy.cfg 

#---------------------------------------------------------------------
defaults
    mode                    tcp			使用tcp的方式连接,它调度的是apiserver

...
#---------------------------------------------------------------------
# main frontend which proxys to the backends
#---------------------------------------------------------------------

listen admmin_stats
    bind *:80
    mode http				/只在server2上加这个,着只是让我们可以用web进行访问,测试连接的,可以不加。
    stats uri /status

frontend  main *:8443		/因为高可用和 msater再一台主机,所以不能占用apiserver的6443端口
    mode tcp
    default_backend             apiserver		后端名称

#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------
backend apiserver
    mode        tcp		/使用tcp模式
    balance     roundrobin			/负载使用轮询的模式
    server  app1 172.25.254.2:6443 check			/对应的后端地址
    server  app2 172.25.254.3:6443 check
    server  app3 172.25.254.4:6443 check


[root@server2 ~]# systemctl enable --now haproxy.service 		设置开机自启
Created symlink from /etc/systemd/system/multi-user.target.wants/haproxy.service to /usr/lib/systemd/system/haproxy.service.
[root@server2 ~]# netstat -tnlp 
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name    
tcp        0      0 0.0.0.0:22              0.0.0.0:*               LISTEN      3119/sshd           
tcp        0      0 127.0.0.1:25            0.0.0.0:*               LISTEN      3213/master         
tcp        0      0 0.0.0.0:8443            0.0.0.0:*               LISTEN      4105/haproxy        
tcp6       0      0 :::22                   :::*                    LISTEN      3119/sshd           
tcp6       0      0 ::1:25                  :::*                    LISTEN      3213/master         

在这里插入图片描述
可以看到我们的三个后端

[root@server2 ~]# ssh-keygen 
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa): 
Created directory '/root/.ssh'.
Enter passphrase (empty for no passphrase): 
Enter same passphrase again: 
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:

[root@server2 ~]# ssh-copy-id server3
[root@server2 ~]# ssh-copy-id server4

[root@server2 ~]# scp /etc/haproxy/haproxy.cfg server3:/etc/haproxy/haproxy.cfg 
haproxy.cfg                                                                                                                                                                      100% 2650     2.4MB/s   00:00    
[root@server2 ~]# scp /etc/haproxy/haproxy.cfg server4:/etc/haproxy/haproxy.cfg 
haproxy.cfg   		把配置文件传输过去

[root@server3 ~]# systemctl enable --now haproxy.service
[root@server ~]# systemctl enable --now haproxy.service		设置开机自启

高可用keepalived配置:

[root@server2 ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived

global_defs {
   notification_email {
      root@localhost		/邮件地址
   }
   notification_email_from keepalived@localhost		/来源
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   router_id LVS_DEVEL
   vrrp_skip_check_adv_addr
#   vrrp_strict
   vrrp_garp_interval 0
   vrrp_gna_interval 0
}

vrrp_instance VI_1 {
    state MASTER			/master结点
    interface ens3		/rhel8是eth0
    virtual_router_id 51
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        172.25.254.100		/vip
    }
}

要注意keepalived是无法对haproxy进行健康检查的。所以我们还要一个脚本,来进行健康检查:

[root@server2 ~]# cat check_haproxy.sh 
#!/bin/bash
systemctl status haproxy &> /dev/null
if [ $? != 0 ];then		
  systemctl stop keepalived
fi

返回值为0,就代表服务开启,!=0,不等于0就是服务异常,然后执行关闭keepalived 服务,进行切换,backup 进行接管。然后我们把脚本放进去:

[root@server2 ~]# chmod +x check_haproxy.sh
[root@server2 ~]# mv check_haproxy.sh /etc/keepalived/

[root@server2 ~]# vim /etc/keepalived/keepalived.conf 
! Configuration File for keepalived

global_defs {
   notification_email {
      root@localhost
   }
   notification_email_from keepalived@localhost
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   router_id LVS_DEVEL
   vrrp_skip_check_adv_addr
#   vrrp_strict
   vrrp_garp_interval 0
   vrrp_gna_interval 0
}

vrrp_script check_haproxy {
    script "/etc/keepalived/check_haproxy.sh"		/脚本位置
    interval 5		/间隔五秒检测一次
}

vrrp_instance VI_1 {
    state MASTER
    interface ens3
    virtual_router_id 51
    priority 100		/优先级,备用机设置的低一点
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    track_script {
        check_haproxy		/调用脚本
    }
    virtual_ipaddress {
        172.25.254.100
    }
}
[root@server2 ~]# systemctl enable --now keepalived.service 
Created symlink from /etc/systemd/system/multi-user.target.wants/keepalived.service to /usr/lib/systemd/system/keepalived.service.
[root@server2 ~]# scp /etc/keepalived/* server3:/etc/keepalived/
check_haproxy.sh                                                                                                                                                                 100%  101     0.4KB/s   00:00    
keepalived.conf                                                                                                                                                                  100%  702     1.2MB/s   00:00    
[root@server2 ~]# scp /etc/keepalived/* server4:/etc/keepalived/
check_haproxy.sh                                                                                                                                                                 100%  101    68.9KB/s   00:00    
keepalived.conf   

server3 和 server4 是备机,所以配置稍微有点不同:

[root@server3 ~]# vim /etc/keepalived/keepalived.conf 

vrrp_instance VI_1 {
    state BACKUP			/转台为备用机
    interface ens3
    virtual_router_id 51
    priority 90		/优先级90

[root@server4 ~]# vim /etc/keepalived/keepalived.conf 

vrrp_instance VI_1 {
    state BACKUP		/状态为备用机
    interface ens3
    virtual_router_id 51
    priority 80		/优先级为80

[root@server3 ~]# systemctl enable --now keepalived.service 		启动
Created symlink from /etc/systemd/system/multi-user.target.wants/keepalived.service to /usr/lib/systemd/system/keepalived.service.
[root@server4 ~]# systemctl enable --now keepalived.service
Created symlink from /etc/systemd/system/multi-user.target.wants/keepalived.service to /usr/lib/systemd/system/keepalived.service.

3的优先级比4高,所以会优先选择.

测试:

[root@server2 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: ens3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 52:54:00:df:b5:2f brd ff:ff:ff:ff:ff:ff
    inet 172.25.254.2/24 brd 172.25.254.255 scope global ens3
       valid_lft forever preferred_lft forever
    inet 172.25.254.100/32 scope global ens3
       valid_lft forever preferred_lft forever
    inet6 fe80::5054:ff:fedf:b52f/64 scope link 
       valid_lft forever preferred_lft forever
此后四的master在server2上,vip也在

[root@server2 ~]# systemctl stop haproxy.service 
[root@server2 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: ens3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 52:54:00:df:b5:2f brd ff:ff:ff:ff:ff:ff
    inet 172.25.254.2/24 brd 172.25.254.255 scope global ens3
       valid_lft forever preferred_lft forever
    inet6 fe80::5054:ff:fedf:b52f/64 scope link 
       valid_lft forever preferred_lft forever
关闭服务后vip消失
[root@server3 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: ens3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 52:54:00:fa:bc:3a brd ff:ff:ff:ff:ff:ff
    inet 172.25.254.3/24 brd 172.25.254.255 scope global ens3
       valid_lft forever preferred_lft forever
    inet 172.25.254.100/32 scope global ens3
       valid_lft forever preferred_lft forever
    inet6 fe80::5054:ff:fefa:bc3a/64 scope link 
       valid_lft forever preferred_lft forever

在server三上查看日志我们可以发现:
在这里插入图片描述
进入master状态,设置vip。

然后我们在恢复server2的服务:

[root@server2 ~]# systemctl start haproxy.service 
[root@server2 ~]# systemctl start keepalived.service 
[root@server2 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: ens3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 52:54:00:df:b5:2f brd ff:ff:ff:ff:ff:ff
    inet 172.25.254.2/24 brd 172.25.254.255 scope global ens3
       valid_lft forever preferred_lft forever
    inet 172.25.254.100/32 scope global ens3
       valid_lft forever preferred_lft forever
    inet6 fe80::5054:ff:fedf:b52f/64 scope link 
       valid_lft forever preferred_lft forever

vip又会来了。

docker 和 k8s集群配置

docker的安装
还是这四个安装包,三个结点都安装。

[root@server2 ~]# ls
containerd.io-1.2.5-3.1.el7.x86_64.rpm  container-selinux-2.77-1.el7.noarch.rpm  docker-ce-18.09.6-3.el7.x86_64.rpm  docker-ce-cli-18.09.6-3.el7.x86_64.rpm
[root@server2 ~]# yum install ./*
[root@server2 ~]# systemctl enable --now docker		设置开机自启。

配置一下:

[root@server2 docker]# vim /etc/sysctl.d/k8s.conf
[root@server2 docker]# cat /etc/sysctl.d/k8s.conf 
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1		/配置内核参数
net.ipv4.ip_forward = 1
vm.swappiness=0		
[root@server2 docker]# sysctl --system

[root@server2 docker]# swapoff -a
[root@server2 docker]# vim /etc/fstab 

/dev/mapper/rhel-root   /                       xfs     defaults        0 0
UUID=004d1dd6-221a-4763-a5eb-c75e18655041 /boot                   xfs     defaults        0 0
#/dev/mapper/rhel-swap   swap                    swap    defaults        0 0		/禁用掉开机自启
[root@server2 sysctl.d]# scp k8s.conf  server3:/etc/sysctl.d/k8s.conf 
k8s.conf                                                                                                                                                                         100%  119   113.7KB/s   00:00    
[root@server2 sysctl.d]# scp k8s.conf  server4:/etc/sysctl.d/k8s.conf 
k8s.conf                                                                                                                                                                         100%  119   107.5KB/s   00:00    
[root@server2 sysctl.d]# ssh server3 'sysctl --system'
[root@server2 sysctl.d]# ssh server4 'sysctl --system'
[root@server2 sysctl.d]# ssh server4 'swapoff -a'
[root@server2 sysctl.d]# ssh server3 'swapoff -a'
[root@server2 sysctl.d]# scp /etc/fstab server3:/etc/fstab 
fstab                                                                                                                                                                            100%  466   506.8KB/s   00:00    
[root@server2 sysctl.d]# scp /etc/fstab server4:/etc/fstab 
fstab 

[root@server2 sysctl.d]# ssh server4 'docker info'
[root@server2 sysctl.d]# ssh server3 'docker info'
[root@server2 sysctl.d]# docker info			查看docker状态,正常

更改驱动:

[root@server2 docker]# cat daemon.json 
{
  "registry-mirrors": ["https://reg.caoaoyuan.org"]	,	/docker仓库位置
  "exec-opts": ["native.cgroupdriver=systemd"],		/使用systemd的驱动,和 k8s保持一致
  "log-driver": "json-file",
  "log-opts": {
   "max-size": "100m"
  },
  "storage-driver": "overlay2",
  "storage-opts": [
  "overlay2.override_kernel_check=true"
  ]
}
[root@server2 docker]# scp daemon.json server3:/etc/docker/
daemon.json                                                                                                                                                                      100%  272   216.8KB/s   00:00    
[root@server2 docker]# scp daemon.json server4:/etc/docker/
daemon.json                 
                                                                                                                                                 
[root@server2 docker]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
172.25.254.67 	rhel7host
172.25.254.1	server1 reg.caoaoyuan.org			/仓库解析
172.25.254.2	server2
172.25.254.3	server3
172.25.254.4	server4
172.25.254.5	server5
172.25.254.6	server6
172.25.254.7	server7
172.25.254.8	server8
[root@server2 docker]# scp /etc/hosts server3:/etc/hosts
hosts                                                                                                                                                                            100%  369   319.0KB/s   00:00    
[root@server2 docker]# scp /etc/hosts server4:/etc/hosts
hosts 

仓库的认证:

[root@server1 docker]# scp -r certs.d/ server2:/etc/docker/
root@server2's password: 
ca.crt      
[root@server2 docker]# scp -r certs.d/ server3:/etc/docker/
ca.crt                                                                                                                                                                           100% 2114     1.4MB/s   00:00    
[root@server2 docker]# scp -r certs.d/ server4:/etc/docker/
ca.crt   
[root@server2 docker]# systemctl restart docker.service 
[root@server2 docker]# ssh server3 'systemctl restart docker'
[root@server2 docker]# ssh server4 'systemctl restart docker'

测试:

[root@server2 docker]# docker pull busyboxplus
Using default tag: latest
latest: Pulling from library/busyboxplus
4f4fb700ef54: Pull complete 
84a9463f104a: Pull complete 
737bafab9ae7: Pull complete 
Digest: sha256:9d1c242c1fd588a1b8ec4461d33a9ba08071f0cc5bb2d50d4ca49e430014ab06
Status: Downloaded newer image for busyboxplus:latest

在这里插入图片描述
是从harbor仓库拉取的。

默认使用nat 的方式,我们改为ipvs的模式:

[root@server2 docker]# yum install -y ipvsadm
[root@server2 docker]# ssh server3 'yum install -y ipvsadm'
[root@server2 docker]# ssh server4 'yum install -y ipvsadm'

[root@server2 docker]# modprobe ip_vs_rr
[root@server2 docker]# modprobe ip_vs_sh
[root@server2 docker]# modprobe ip_vs_wrr		添加几个算法,另外两个结点一样
[root@server2 docker]# lsmod |grep ip_vs
ip_vs_wrr              12697  0 
ip_vs_sh               12688  0 
ip_vs_rr               12600  0 
ip_vs                 145497  6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack          133095  7 ip_vs,nf_nat,nf_nat_ipv4,xt_conntrack,nf_nat_masquerade_ipv4,nf_conntrack_netlink,nf_conntrack_ipv4
libcrc32c              12644  4 xfs,ip_vs,nf_nat,nf_conntrack

kubeadm部署
从阿里云上下载:

[root@server2 yum.repos.d]# vim k8s.repo
[root@server2 yum.repos.d]# yum repolist 
Loaded plugins: product-id, search-disabled-repos, subscription-manager
This system is not registered with an entitlement server. You can use subscription-manager to register.
kubernetes                                                                                                                                                                                  | 1.4 kB  00:00:00     
kubernetes/primary                                                                                                                                                                          |  73 kB  00:00:00     
kubernetes                                                                                                                                                                                                 533/533
repo id                                                                                               repo name                                                                                              status
kubernetes                                                                                            Kubernetes                                                                                               533
westos                                                                                                westos                                                                                                 5,152
repolist: 5,685
[root@server2 yum.repos.d]# yum install kubeadm-1.18.3-0 kubelet-1.18.3-0 kubectl-1.18.3-0 -y
[root@server3 yum.repos.d]# yum install kubeadm-1.18.3-0 kubelet-1.18.3-0 kubectl-1.18.3-0 -y
[root@server4 yum.repos.d]# yum install kubeadm-1.18.3-0 kubelet-1.18.3-0 kubectl-1.18.3-0 -y
[root@server2 yum.repos.d]# systemctl enable --now kubelet.service 
[root@server3 yum.repos.d]# systemctl enable --now kubelet.service 
[root@server4 yum.repos.d]# systemctl enable --now kubelet.service 
[root@server2 yum.repos.d]# echo "source <(kubectl completion bash)" >> ~/.bashrc	/自动补齐
[root@server2 yum.repos.d]# cd
[root@server2 ~]# source .bashrc 


[root@server2 ~]# kubeadm config print init-defaults > kubeadm-init.yaml
W0712 14:49:18.294453   19847 configset.go:202] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
[root@server2 ~]# ls
 kubeadm-init.yaml		/生成一个初始化的yaml文件
查看所需镜像
[root@server2 ~]# kubeadm config images list kubeadm config images list --image-repository reg.caoaoyuan.org
拉取镜像
[root@server2 ~]# kubeadm config images pull --image-repository reg.caoaoyuan.org/library --kubernetes-version v1.18.3
W0712 14:55:43.760930   20514 configset.go:202] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
[config/images] Pulled reg.caoaoyuan.org/library/kube-apiserver:v1.18.3
[config/images] Pulled reg.caoaoyuan.org/library/kube-controller-manager:v1.18.3
[config/images] Pulled reg.caoaoyuan.org/library/kube-scheduler:v1.18.3
[config/images] Pulled reg.caoaoyuan.org/library/kube-proxy:v1.18.3
[config/images] Pulled reg.caoaoyuan.org/library/pause:3.2
[config/images] Pulled reg.caoaoyuan.org/library/etcd:3.4.3-0
[config/images] Pulled reg.caoaoyuan.org/library/coredns:1.6.7

[root@server3 ~]# kubeadm config images pull --image-repository reg.caoaoyuan.org/library --kubernetes-version v1.18.3

[root@server4 ~]# kubeadm config images pull --image-repository reg.caoaoyuan.org/library --kubernetes-version v1.18.3

更改初始化文件:

[root@server2 ~]# vim kubeadm-init.yaml 
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 172.25.254.2		/当前的 api 地址
  bindPort: 6443			/端口
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: server2	/主机名
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: "172.25.254.100:8443"		/控制面板,从我们的VIP接入,调度三台master主机
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: reg.caoaoyuan.org/library			/指定镜像拉取仓库
kind: ClusterConfiguration
kubernetesVersion: v1.18.3   /版本
networking:
  dnsDomain: cluster.local
  podSubnet: 10.244.0.0/16			flannel网络插件需要我们在处是话的时候定义 pod 的子网
  serviceSubnet: 10.96.0.0/12
scheduler: {}

---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration		/使用 ipvs 代理的模式
mode: ipvs

最后再进行初始化就好了:(镜像我们刚才拉取过了)

[root@server2 ~]# kubeadm init --config kubeadm-init.yaml --upload-certs
 --upload-certs		这个参数其实就是把我们的证书上传到集群中的cm,让三个结点可以使用相同的证书。

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:
用来加master结点
  kubeadm join 172.25.254.100:8443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:042a8381284cca227c16c844058b172882f51b1c00dfcfa4586647ed8629e5d5 \
    --control-plane --certificate-key 4424178e60934c7a43be5e8480bf84bf7e8524b34521048492eb29a3e589e5f2

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:
用来加worker 结点
kubeadm join 172.25.254.100:8443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:042a8381284cca227c16c844058b172882f51b1c00dfcfa4586647ed8629e5d5

好了.

[root@server2 ~]# mkdir ./.kube
[root@server2 ~]# cp /etc/kubernetes/admin.conf ./.kube/config	/就可以调用api了
[root@server2 .kube]# kubectl get nodes
NAME      STATUS     ROLES    AGE     VERSION
server2   NotReady   master   3m44s   v1.18.3		获取到了当前结点

在这里插入图片描述
app1 也就是server2 已经绿了。
现在我们把server3 和server4 加入到控制结点中。

[root@server3 ~]# kubeadm join 172.25.254.100:8443 --token abcdef.0123456789abcdef \
>     --discovery-token-ca-cert-hash sha256:042a8381284cca227c16c844058b172882f51b1c00dfcfa4586647ed8629e5d5 \
>     --control-plane --certificate-key 4424178e60934c7a43be5e8480bf84bf7e8524b34521048492eb29a3e589e5f2

[root@server4 ~]# kubeadm join 172.25.254.100:8443 --token abcdef.0123456789abcdef \
>     --discovery-token-ca-cert-hash sha256:042a8381284cca227c16c844058b172882f51b1c00dfcfa4586647ed8629e5d5 \
>     --control-plane --certificate-key 4424178e60934c7a43be5e8480bf84bf7e8524b34521048492eb29a3e589e5f2

[root@server2 ~]# kubectl -n kube-system describe cm kubeadm-config 


ClusterStatus:
----
apiEndpoints:			/我们的三个api后端就都加近来了
  server2:
    advertiseAddress: 172.25.254.2
    bindPort: 6443
  server3:
    advertiseAddress: 172.25.254.3
    bindPort: 6443
  server4:
    advertiseAddress: 172.25.254.4
    bindPort: 6443
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterStatus

Events:  <none>

在这里插入图片描述
都绿了。
做高可用的时候,master结点的故障容忍比率是(n-1)/2,所以三个结点是最低的需求。

获取flannel网络插件:

[root@server2 ~]# wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml 
[root@server2 ~]# vim kube-flannel.yml
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "Backend": {
        "Type": "host-gw"		改为值机网关模式,更方便使用

        image: flannel:v0.12.0-amd64		/更改镜像位置
[root@server2 ~]# kubectl apply -f kube-flannel.yml 

[root@server2 ~]# kubectl get -n kube-system pod
NAME                              READY   STATUS     RESTARTS   AGE
coredns-5fd54d7f56-j4bsl          0/1     Pending    0          5h43m
coredns-5fd54d7f56-nnqqp          0/1     Pending    0          5h43m
etcd-server2                      1/1     Running    0          5h43m
etcd-server3                      1/1     Running    0          5h25m
etcd-server4                      1/1     Running    0          5h24m
kube-apiserver-server2            1/1     Running    0          5h43m
kube-apiserver-server3            1/1     Running    0          5h25m
kube-apiserver-server4            1/1     Running    0          5h23m
kube-controller-manager-server2   1/1     Running    0          5h43m
kube-controller-manager-server3   1/1     Running    0          5h25m
kube-controller-manager-server4   1/1     Running    0          5h23m
kube-flannel-ds-amd64-9lhwk       0/1     Init:0/1   0          19s
kube-flannel-ds-amd64-m2qnj       1/1     Running    0          18s
kube-flannel-ds-amd64-nd47b       0/1     Init:0/1   0          18s		/出现了

[root@server2 ~]# kubectl get nodes
NAME      STATUS   ROLES    AGE     VERSION
server2   Ready    master   5h44m   v1.18.3
server3   Ready    master   5h27m   v1.18.3
server4   Ready    master   5h24m   v1.18.3		/都处于ready状态
[root@server1 .kube]# kubectl get nodes
NAME      STATUS   ROLES    AGE     VERSION
server2   Ready    master   5h52m   v1.18.3
server3   Ready    master   5h34m   v1.18.3
server4   Ready    master   5h32m   v1.18.3

我们还可以通过不是集群结点的主机通过VIP 连接集群,主需要安装kubectl 。并把 config 文件拷贝到 家目录下的.kube 目录内即可

在添加一个worker结点,开启一台新虚拟机,配置同其他结点一样。
只需要部署docker和k8s即可:

[root@server5 docker]# systemctl daemon-reload 
[root@server5 docker]# systemctl restart docker
[root@server5 docker]# docker info		/看一下docker是否正常
[root@server5 docker]# systemctl restart kubelet.service		/识别docker

以worker方式加入集群

[root@server5 docker]# kubeadm join 172.25.254.100:8443 --token abcdef.0123456789abcdef \
>     --discovery-token-ca-cert-hash sha256:042a8381284cca227c16c844058b172882f51b1c00dfcfa4586647ed8629e5d5

当我们在/etc/hosts上做好解析时,镜像也会自动的拉取下来。此时

[root@server1 .kube]# kubectl get nodes
NAME      STATUS   ROLES    AGE     VERSION
server2   Ready    master   6h11m   v1.18.3
server3   Ready    master   5h54m   v1.18.3
server4   Ready    master   5h51m   v1.18.3
server5   Ready    <none>   2m7s    v1.18.3
server5 这个node机诶单也加进来了

测试集群高可用

部署pod

[root@server2 ~]# kubectl run demo --image=busybox -it
If you don't see a command prompt, try pressing enter.
/ # ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
3: eth0@if7: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue 
    link/ether 42:e9:d4:d4:cc:e0 brd ff:ff:ff:ff:ff:ff
    inet 10.244.3.2/24 brd 10.244.3.255 scope global eth0
       valid_lft forever preferred_lft forever
/ # ^C
/ # Session ended, resume using 'kubectl attach demo -c demo -i -t' command when the pod is running
[root@server2 ~]# kubectl get pod -owide
NAME   READY   STATUS    RESTARTS   AGE   IP           NODE      NOMINATED NODE   READINESS GATES
demo   1/1     Running   1          34s   10.244.3.2   server5   <none>           <none>
只有一个node结点,部署在server5上。

高可用

[root@server2 ~]# poweroff
Connection to 172.25.254.2 closed by remote host.
Connection to 172.25.254.2 closed.

[root@server3 ~]# vim /var/log/messages

Jul 12 23:02:30 server3 systemd-logind: Removed session 8.
Jul 12 23:27:04 server3 Keepalived_vrrp[3162]: VRRP_Instance(VI_1) Transition to MASTER STATE
Jul 12 23:27:05 server3 Keepalived_vrrp[3162]: VRRP_Instance(VI_1) Entering MASTER STATE
Jul 12 23:27:05 server3 Keepalived_vrrp[3162]: VRRP_Instance(VI_1) setting protocol VIPs.
可见server5继承了master,优先级较高

[root@server3 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: ens3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 52:54:00:fa:bc:3a brd ff:ff:ff:ff:ff:ff
    inet 172.25.254.3/24 brd 172.25.254.255 scope global ens3
       valid_lft forever preferred_lft forever
    inet 172.25.254.100/32 scope global ens3			/VIP出现
       valid_lft forever preferred_lft forever
    inet6 fe80::5054:ff:fefa:bc3a/64 scope link
[root@server1 .kube]# kubectl get pod -owide
NAME   READY   STATUS    RESTARTS   AGE     IP           NODE      NOMINATED NODE   READINESS GATES
demo   1/1     Running   1          3m19s   10.244.3.2   server5   <none>           <none>

pod依然正常运行。
我们在开启server2

[kiosk@rhel7host ~]$ ssh root@172.25.254.2
root@172.25.254.2's password: 
Last login: Sun Jul 12 12:30:23 2020 from rhel7host
[root@server2 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: ens3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 52:54:00:df:b5:2f brd ff:ff:ff:ff:ff:ff
    inet 172.25.254.2/24 brd 172.25.254.255 scope global ens3
       valid_lft forever preferred_lft forever
    inet 172.25.254.100/32 scope global ens3
       valid_lft forever preferred_lft forever

[root@server3 ~]# vim /var/log/messages
Jul 12 23:30:08 server3 Keepalived_vrrp[3162]: VRRP_Instance(VI_1) Received advert with higher priority 100, ours 90
Jul 12 23:30:08 server3 Keepalived_vrrp[3162]: VRRP_Instance(VI_1) Entering BACKUP STATE
Jul 12 23:30:08 server3 Keepalived_vrrp[3162]: VRRP_Instance(VI_1) removing protocol VIPs.

vip又回来了,server3又进入 backup。

keepalived与master不在一个节点

我们关闭刚才运行的pod,然后在所有的机诶单执行:

[root@server2 ~]# kubeadm reset
[root@server3 ~]# kubeadm reset
[root@server4 ~]# kubeadm reset
[root@server5 ~]# kubeadm reset

[root@server2 ~]# systemctl disable --now  haproxy.service
Removed symlink /etc/systemd/system/multi-user.target.wants/haproxy.service.
[root@server2 ~]# systemctl disable --now  keepalived.service 
Removed symlink /etc/systemd/system/multi-user.target.wants/keepalived.service.
[root@server3 ~]# systemctl disable --now  haproxy.service
Removed symlink /etc/systemd/system/multi-user.target.wants/haproxy.service.
[root@server3 ~]# systemctl disable --now  keepalived.service 
Removed symlink /etc/systemd/system/multi-user.target.wants/keepalived.service.
[root@server4 ~]# systemctl disable --now  haproxy.service
Removed symlink /etc/systemd/system/multi-user.target.wants/haproxy.service.
[root@server4 ~]# systemctl disable --now  keepalived.service 
Removed symlink /etc/systemd/system/multi-user.target.wants/keepalived.service.

我们选择使用keepalived 和 lvs 的方式进行高可用所以我们在开启两个结点server6 和 server7:

[root@server6 ~]# yum install keepalived ipvsadm -y
[root@server7 ~]# yum install keepalived ipvsadm -y

[root@server6 ~]# vim /etc/keepalived/keepalived.conf 
[root@server6 ~]# cat /etc/keepalived/keepalived.conf 
! Configuration File for keepalived

global_defs {
   notification_email {
	root@localhost
   }
   notification_email_from keepalived@localhost
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   router_id LVS_DEVEL
   vrrp_skip_check_adv_addr
   #vrrp_strict
   vrrp_garp_interval 0
   vrrp_gna_interval 0
}

vrrp_instance VI_1 {
    state MASTER
    interface ens3			/rhel8 为eth0
    virtual_router_id 51
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
	172.25.254.100		/虚拟IP
    }
}

virtual_server 172.25.254.100 6443 {				/虚拟服务,通过6443端口
    delay_loop 6
    lb_algo rr		/调度算法
    lb_kind DR			/负载均衡模式
    #persistence_timeout 50
    protocol TCP

    real_server 172.25.254.2 6443 {			/一共有三个realserver
        weight 1
        TCP_CHECK {		/使用这种健康检查方式
            connect_timeout 3
            retry 3
            delay_before_retry 3
        }
    }


    real_server 172.25.254.3 6443 {
        weight 1
        TCP_CHECK {
            connect_timeout 3
            retry 3
            delay_before_retry 3
        }
    }


    real_server 172.25.254.4 6443 {
        weight 1
        TCP_CHECK {
            connect_timeout 3
            retry 3
            delay_before_retry 3
        }
    }
}

[root@server6 ~]# systemctl enable --now keepalived.service 
Created symlink from /etc/systemd/system/multi-user.target.wants/keepalived.service to /usr/lib/systemd/system/keepalived.service.
[root@server6 ~]# scp /etc/keepalived/keepalived.conf server7:/etc/keepalived/keepalived.conf
[root@server7 ~]# vim /etc/keepalived/keepalived.conf
vrrp_instance VI_1 {
    state BACKUP		/备用结点
    interface eth0
    virtual_router_id 51
    priority 50		/优先级50,比master低

[root@server6 ~]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
当前没有结点准备好
[root@server2 ~]# vim kubeadm-init.yaml
controlPlaneEndpoint: "172.25.254.100:6443"		/控制面板改为6443端口

但是此时我们的k8s结点上都是没有100 这个vip 的。

[root@server2 ~]# ip addr add 172.25.254.100/24 dev ens3		ipvs的dr模式要求结点上要有VIP

我们可以修改linux内核,当有人直接访问本机的 VIP 时不做响应.

[root@server2 ~]# yum install -y arptables_jf
[root@server2 ~]# arptables -A INPUT -d 172.25.254.100 -j DROP
[root@server2 ~]# arptables -A OUTPUT -s 172.25.254.100 -j mangle --mangle-ip-s 172.25.254.2
[root@server2 ~]# arptables-save > /etc/sysconfig/arptables 		/保存
[root@server2 ~]# cat /etc/sysconfig/arptables
*filter
:INPUT ACCEPT
:OUTPUT ACCEPT
:FORWARD ACCEPT
-A INPUT -j DROP -d 172.25.254.100 
-A OUTPUT -j mangle -s 172.25.254.100 --mangle-ip-s 172.25.254.2	/从100出去时,显示为本机IP地址
2master结点初始化
[root@server2 ~]# kubeadm init --config kubeadm-init.yaml --upload-certs

[root@server6 ~]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  172.25.254.100:6443 rr
  -> 172.25.254.2:6443            Route   1      0          0     
二这个结点已经加进来了
[root@server2 .kube]#  rm -fr *			/删除原来的配置文件
[root@server2 .kube]# cp /etc/kubernetes/
admin.conf               controller-manager.conf  kubelet.conf             manifests/               pki/                     scheduler.conf           
[root@server2 .kube]# cp /etc/kubernetes/admin.conf ./config
[root@server2 .kube]# kubectl get nodes
NAME      STATUS   ROLES    AGE   VERSION
server2   Ready    master   19m   v1.18.3		结点正常


[root@server2 .kube]# scp config server1:~/.kube/		
root@server1's password: 
config  
从server1萨会嗯访问 api 接口
[root@server1 .kube]# kubectl get nodes
NAME      STATUS   ROLES    AGE   VERSION
server2   Ready    master   22m   v1.18.3
[root@server1 .kube]# kubectl get nodes
NAME      STATUS   ROLES    AGE   VERSION
server2   Ready    master   23m   v1.18.3
[root@server1 .kube]# 


[root@server6 ~]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  172.25.254.100:6443 rr
  -> 172.25.254.2:6443            Route   1      0          6   	

部署server3和server4

[root@server3 ~]#  kubeadm join 172.25.254.100:6443 --token abcdef.0123456789abcdef \
>     --discovery-token-ca-cert-hash sha256:862dfdf31feb03d19a3c6faa3f02900c07e8752ce9a439eab35316cbc30516bf \
>     --control-plane --certificate-key f489122a163646c1001ef6d5a541e8aec3081a707b0cb0a8bd0a0157c6b069c3

[root@server2 .kube]# kubectl get nodes
NAME      STATUS   ROLES    AGE   VERSION
server2   Ready    master   26m   v1.18.3
server3   Ready    master   32s   v1.18.3
[root@server6 ~]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  172.25.254.100:6443 rr
  -> 172.25.254.2:6443            Route   1      5          1         
  -> 172.25.254.3:6443            Route   1      0          2   
  
[root@server3 ~]# ip addr add 172.25.254.100/24 dev ens3
[root@server3 ~]# yum install arptables.x86_64 -y
[root@server3 ~]# arptables -A INPUT -d 172.25.254.100 -j DROP
[root@server3 ~]# arptables -A OUTPUT -s 172.25.254.100 -j mangle --mangle-ip-s 172.25.254.3
[root@server3 ~]#  arptables-save > /etc/sysconfig/arptables

在这里插入代码片
[root@server4 ~]# ip addr add 172.25.254.100/24 dev ens3
[root@server4 ~]# yum install arptables_jf -y
[root@server4 ~]# arptables -A INPUT -d 172.25.254.100 -j DROP
[root@server4 ~]# arptables -A OUTPUT -s 172.25.254.100 -j mangle --mangle-ip-s 172.25.254.4
[root@server4 ~]# arptables-save > /etc/sysconfig/arptables
[root@server2 .kube]# kubectl get nodes
NAME      STATUS   ROLES    AGE     VERSION
server2   Ready    master   34m     v1.18.3
server3   Ready    master   8m15s   v1.18.3
server4   Ready    master   106s    v1.18.3
[root@server6 ~]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  172.25.254.100:6443 rr
  -> 172.25.254.2:6443            Route   1      9          0         
  -> 172.25.254.3:6443            Route   1      2          0         
  -> 172.25.254.4:6443            Route   1      0          0         
You have new mail in /var/spool/mail/root

最后把网络部署起来:

[root@server2 ~]# kubectl apply -f kube-flannel.yml

添加worker

[root@server5 docker]# kubeadm join 172.25.254.100:6443 --token abcdef.0123456789abcdef \
>     --discovery-token-ca-cert-hash sha256:862dfdf31feb03d19a3c6faa3f02900c07e8752ce9a439eab35316cbc30516bf

[root@server2 ~]# kubectl get nodes
NAME      STATUS   ROLES    AGE     VERSION
server2   Ready    master   38m     v1.18.3
server3   Ready    master   11m     v1.18.3
server4   Ready    master   5m30s   v1.18.3
server5   Ready    <none>   23s     v1.18.3		/自动拉取镜像后就好了

[root@server1 ~]# kubectl run demo --image=busybox -it
If you don't see a command prompt, try pressing enter.
/ # Session ended, resume using 'kubectl attach demo -c demo -i -t' command when the pod is running
[root@server1 ~]# kubectl get pod -owide
NAME   READY   STATUS    RESTARTS   AGE   IP           NODE      NOMINATED NODE   READINESS GATES
demo   1/1     Running   1          13s   10.244.3.2   server5   <none>           <none>		

检测

[root@server2 ~]# poweroff
Connection to 172.25.254.2 closed by remote host.
Connection to 172.25.254.2 closed.

[root@server6 ~]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  172.25.254.100:6443 rr
  -> 172.25.254.3:6443            Route   1      3          0         
  -> 172.25.254.4:6443            Route   1      2          0 

[root@server7 ~]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  172.25.254.100:6443 rr
  -> 172.25.254.3:6443            Route   1      2          4         
  -> 172.25.254.4:6443            Route   1      0          6         
You have new mail in /var/spool/mail/root
[root@server7 ~]# ip a

2: ens3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 52:54:00:95:37:20 brd ff:ff:ff:ff:ff:ff
    inet 172.25.254.7/24 brd 172.25.254.255 scope global ens3
       valid_lft forever preferred_lft forever
    inet 172.25.254.100/32 scope global ens3				/server7 接管了vip
       valid_lft forever preferred_lft forever
    inet6 fe80::5054:ff:fe95:3720/64 scope link 
[root@server1 ~]# kubectl get nodes
NAME      STATUS     ROLES    AGE     VERSION
server2   NotReady   master   43m     v1.18.3
server3   Ready      master   17m     v1.18.3
server4   Ready      master   10m     v1.18.3
server5   Ready      <none>   5m29s   v1.18.3		并且集群正常
[root@rhel7host images]# virsh start k8s1		/开启server2
Domain k8s1 started
[root@server6 ~]# systemctl restart keepalived.service			/开启服务

[root@server6 ~]# ip a

2: ens3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 52:54:00:73:c4:64 brd ff:ff:ff:ff:ff:ff
    inet 172.25.254.6/24 brd 172.25.254.255 scope global ens3
       valid_lft forever preferred_lft forever
    inet 172.25.254.100/32 scope global ens3			/vip切换回来了
       valid_lft forever preferred_lft forever
    inet6 fe80::5054:ff:fe73:c464/64 scope link 

[root@server2 ~]# ip addr add 172.25.254.100/24 dev ens3		/不加访问不到,因为 ipvs的dr模式需要
[root@server2 ~]# systemctl start arptables.service 		
[root@server2 ~]# kubectl get pod
NAME   READY   STATUS    RESTARTS   AGE
demo   1/1     Running   1          8m19s

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值