负载均衡

1 篇文章 0 订阅
1 篇文章 0 订阅

1.实现haproxy+keepalived集群高可用集群转发

1.实验环境

节点应用IP备注
Node12haproxy+keepalived172.16.62.12
Node13haproxy+keepalived172.16.62.13
webnginx172.16.62.15nginx:80 httpd:81
webnginx172.16.62.16nginx:80 httpd:81
DNSDNS172.16.62.24
DNSwww.haostack.com172.16.62.248解析
DNSmobile.haostack.com172.16.62.24解析

2.软件安装

2.1 安装keepalived

#在node12和node13上安装keepalived
#安装keepalived 
yum install keepalived -y

#查看node12节点keepalived服务
[root@node12 keepalived]# systemctl status  keepalived
● keepalived.service - LVS and VRRP High Availability Monitor
   Loaded: loaded (/usr/lib/systemd/system/keepalived.service; enabled; vendor preset: disabled)
   Active: active (running) since Sat 2020-06-27 17:49:44 CST; 39s ago
  Process: 2195 ExecReload=/bin/kill -HUP $MAINPID (code=exited, status=0/SUCCESS)
  Process: 2219 ExecStart=/usr/sbin/keepalived $KEEPALIVED_OPTIONS (code=exited, status=0/SUCCESS)
 Main PID: 2220 (keepalived)
   CGroup: /system.slice/keepalived.service
           ├─2220 /usr/sbin/keepalived -D
           ├─2221 /usr/sbin/keepalived -D
           └─2222 /usr/sbin/keepalived -D

Jun 27 17:49:53 node12 Keepalived_vrrp[2222]: Sending gratuitous ARP on eth0 for 172.16.62.248
Jun 27 17:49:53 node12 Keepalived_vrrp[2222]: Sending gratuitous ARP on eth0 for 172.16.62.249
Jun 27 17:49:53 node12 Keepalived_vrrp[2222]: Sending gratuitous ARP on eth0 for 172.16.62.250
Jun 27 17:49:53 node12 Keepalived_vrrp[2222]: Sending gratuitous ARP on eth0 for 172.16.62.251
Jun 27 17:49:53 node12 Keepalived_vrrp[2222]: Sending gratuitous ARP on eth0 for 172.16.62.252
Jun 27 17:49:53 node12 Keepalived_vrrp[2222]: Sending gratuitous ARP on eth0 for 172.16.62.248
Jun 27 17:49:53 node12 Keepalived_vrrp[2222]: Sending gratuitous ARP on eth0 for 172.16.62.249
Jun 27 17:49:53 node12 Keepalived_vrrp[2222]: Sending gratuitous ARP on eth0 for 172.16.62.250
Jun 27 17:49:53 node12 Keepalived_vrrp[2222]: Sending gratuitous ARP on eth0 for 172.16.62.251
Jun 27 17:49:53 node12 Keepalived_vrrp[2222]: Sending gratuitous ARP on eth0 for 172.16.62.252
[root@node12 keepalived]#


#查看node13节点keepalived服务
[root@node13 keepalived]# systemctl status  keepalived
● keepalived.service - LVS and VRRP High Availability Monitor
   Loaded: loaded (/usr/lib/systemd/system/keepalived.service; disabled; vendor preset: disabled)
   Active: active (running) since Sat 2020-06-27 17:49:55 CST; 22s ago
  Process: 2457 ExecStart=/usr/sbin/keepalived $KEEPALIVED_OPTIONS (code=exited, status=0/SUCCESS)
 Main PID: 2458 (keepalived)
   CGroup: /system.slice/keepalived.service
           ├─2458 /usr/sbin/keepalived -D
           ├─2459 /usr/sbin/keepalived -D
           └─2460 /usr/sbin/keepalived -D

Jun 27 17:49:55 node13 Keepalived_vrrp[2460]: Registering Kernel netlink reflector
Jun 27 17:49:55 node13 Keepalived_vrrp[2460]: Registering Kernel netlink command channel
Jun 27 17:49:55 node13 Keepalived_vrrp[2460]: Registering gratuitous ARP shared channel
Jun 27 17:49:55 node13 Keepalived_vrrp[2460]: Opening file '/etc/keepalived/keepalived.conf'.
Jun 27 17:49:55 node13 Keepalived_vrrp[2460]: VRRP_Instance(VI_200) removing protocol VIPs.
Jun 27 17:49:55 node13 Keepalived_vrrp[2460]: Using LinkWatch kernel netlink reflector...
Jun 27 17:49:55 node13 Keepalived_vrrp[2460]: VRRP_Instance(VI_200) Entering BACKUP STATE
Jun 27 17:49:55 node13 Keepalived_vrrp[2460]: VRRP sockpool: [ifindex(2), proto(112), unicast(1), fd(10,11)]
Jun 27 17:49:55 node13 Keepalived_healthcheckers[2459]: Activating healthchecker for service [172.16.62.248]:80
Jun 27 17:49:55 node13 Keepalived_healthcheckers[2459]: Activating healthchecker for service [172.16.62.248]:80
[root@node13 keepalived]# 

2.2安装haproxy

#在node12和node13上安装haproxy
#安装proxy
yum install haproxy -y


#查看node12节点haproxy服务
[root@node12 etc]# systemctl start haproxy
[root@node12 etc]# systemctl enable haproxy
Created symlink from /etc/systemd/system/multi-user.target.wants/haproxy.service to /usr/lib/systemd/system/haproxy.service.
[root@node12 etc]# systemctl status haproxy
● haproxy.service - HAProxy Load Balancer
   Loaded: loaded (/usr/lib/systemd/system/haproxy.service; enabled; vendor preset: disabled)
   Active: active (running) since Sun 2020-06-28 10:12:48 CST; 32s ago
 Main PID: 3484 (haproxy-systemd)
   CGroup: /system.slice/haproxy.service
           ├─3484 /usr/sbin/haproxy-systemd-wrapper -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid
           ├─3485 /usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid -Ds
           └─3486 /usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid -Ds

Jun 28 10:12:48 node12 systemd[1]: Started HAProxy Load Balancer.
Jun 28 10:12:48 node12 haproxy-systemd-wrapper[3484]: haproxy-systemd-wrapper: executing /usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid -Ds
[root@node12 etc]#


#查看node13节点haproxy服务
[root@node13 ~]# systemctl status haproxy
● haproxy.service - HAProxy Load Balancer
   Loaded: loaded (/usr/lib/systemd/system/haproxy.service; enabled; vendor preset: disabled)
   Active: active (running) since Sun 2020-06-28 10:12:32 CST; 1min 29s ago
 Main PID: 3456 (haproxy-systemd)
   CGroup: /system.slice/haproxy.service
           ├─3456 /usr/sbin/haproxy-systemd-wrapper -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid
           ├─3457 /usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid -Ds
           └─3458 /usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid -Ds

Jun 28 10:12:32 node13 systemd[1]: Started HAProxy Load Balancer.
Jun 28 10:12:32 node13 haproxy-systemd-wrapper[3456]: haproxy-systemd-wrapper: executing /usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid -Ds
[root@node13 ~]# 

3.配置

3.1安装 psmisc

  • 使用 killall -0 检查 HAProxy 服务是否存在。如果没有 killall 命令,则需要安装 psmisc 包。
yum install psmisc -y
检测脚本chk_haproxy.sh 
[root@node12 keepalived]# more chk_haproxy.sh 
#!/bin/bash
#检测haproxy服务

/usr/bin/killall -0 haproxy

3.2 haproxy配置

-两个节点都要配置

listen apachecluster
bind 0.0.0.0:81 #监听IP和端口
mode http  #代理协议
log global   #日志
balance roundrobin #负载均衡策略
server apache1 172.16.62.12:81  weight 1 check inter 3000 fall 2 rise 5
server apache2 172.16.62.15:81  weight 1 check inter 3000 fall 2 rise 5

listen nginxcluster
bind 0.0.0.0:80
mode http
log global
balance roundrobin
server nginx1 172.16.62.15:80 weight 1 check inter 3000 fall 2 rise 5
server nginx2 172.16.62.16:80 weight 1 check inter 3000 fall 2 rise 5
[root@node12 keepalived]# 

3.3 keepalived配置

#node12节点配置
[root@node12 keepalived]# more keepalived.conf
! Configuration File for keepalived

global_defs {       #全局定义块
   notification_email {    #邮件通知配置,用于服务有故障时发送邮件报警,可选项
     test@qq.com     #邮件接收者
   }
   notification_email_from root@qq.com #邮件发送者
   smtp_server 127.0.0.1    #smtp邮件服务器
   smtp_connect_timeout 30  #超时时间
   router_id node12.haostack.com    # 标识本节点的字条串,通常为hostname,但不一定非得是hostname。故障发生时,邮件通知会用到
   vrrp_skip_check_adv_addr  #所有报文都检查比较消耗性能,此配置为如果收到的报文和上一个报文是同一个路由器则跳过检查报文中的源地址
   #vrrp_strict      #严格遵守VRRP协议,不允许状况:1,没有VIP地址,2.配置了单播邻居,3.在VRRP版本2中有IPv6地址.
   vrrp_garp_interval 0  #ARP报文发送延迟 
   vrrp_gna_interval 0  #消息发送延迟
   vrrp_iptables #需要关闭vrrp_iptables规则,否则vip ping不通
}
#HAProxy健康检查
vrrp_script chk_haproxy {
   script "/etc/keepalived/chk_haproxy.sh"  #脚本目录
   interval 1   #脚本运行周期
   weight -80   #降权重
   fall 3     #失败次数
   rise 5     #重试次数
   timeout 2  #超时
   }

vrrp_instance VI_200 {  #VRRP实例定义块
    state MASTER    #标识当前节点的状态,可以是MASTER或BACKUP,当其他节点keepalived启动时会将priority比较大的节点选举为MASTER
    interface eth0  #节点固有IP(非VIP)的网卡,用来发VRRP包
    virtual_router_id 200 #值在0-255之间,用来区分多个instance的VRRP组播。同一网段中virtual_router_id的值不能重复,否则会出错
    priority 100    #用来选举master的,要成为master,那么这个选项的值最好高于其他机器50个点,该项取值范围是[1-254](在此范围之外会被识别成默认值100)
    advert_int 2  ##发VRRP包的时间间隔,即多久进行一次master选举(可以认为是健康查检时间间隔)
    #nopreempt  #非抢占模式,不会回来
    #preempt_delay 60s  #抢占模式延迟
    unicast_src_ip 172.16.62.12 #配置单播本机地址,需要禁用vrrp_strict
    unicast_peer {             #配置对端单播地址
       172.16.62.13
     }
    authentication {   #发VRRP包的时间间隔,即多久进行一次master选举(可以认为是健康查检时间间隔)
        auth_type PASS
        auth_pass 123456
    }
    virtual_ipaddress {
        172.16.62.248
        172.16.62.249
        172.16.62.250
        172.16.62.251
        172.16.62.252
    }
    #以下三行为调用通知脚本
    #notify_master "/etc/keepalived/notify.sh master"   
    #当前节点成为主节点时触发的脚本 
    ##notify_backup "/etc/keepalived/notify.sh backup"
    #当前节点转为备节点时触发的脚本
    ##notify_fault "/etc/keepalived/notify.sh fault"
    #当前节点转为失败状态时触发的脚本
track_script {
  chk_haproxy   #调用chk_haproxy脚本名称
  }
}

#node13节点配置


4.WEB服务器配置

  • 4.1 页面访问测试
[root@node24 ~]# curl -l 172.16.62.16:81
<h1>Hello, mobile.haostack.com </h1>
<h2>>apache,172.16.62.15</h2
[root@node24 ~]# curl -l 172.16.62.16
<h1>Hello, www.haostack.com </h1>
<h2>nginx,172.16.62.16</h2>
[root@node24 ~]# curl -l 172.16.62.15:81
<h1>Hello, mobile.haostack.com </h1>
<h2>apache,172.16.62.15</h2>
[root@node24 ~]# curl -l 172.16.62.15
<h1>Hello, www.haostack.com </h1>
<h2>nginx,172.16.62.15</h2>
[root@node24 ~]#



5.测试

5.1 关闭node12节点haproxy服务

#node12节点日志
Jun 28 12:36:39 node12 systemd: Stopped HAProxy Load Balancer.
Jun 28 12:36:39 node12 Keepalived_vrrp[9408]:
#haproxy 检测状态为 1
/etc/keepalived/chk_haproxy.sh exited with status 1
Jun 28 12:36:40 node12 Keepalived_vrrp[9408]:
/etc/keepalived/chk_haproxy.sh exited with status 1
Jun 28 12:36:41 node12 Keepalived_vrrp[9408]: /etc/keepalived/chk_haproxy.sh exited with status 1
Jun 28 12:36:41 node12 Keepalived_vrrp[9408]: VRRP_Script(chk_haproxy) failed
Jun 28 12:36:41 node12 Keepalived_vrrp[9408]: VRRP_Instance(VI_200) Changing effective priority from 100 to 20
Jun 28 12:36:42 node12 Keepalived_vrrp[9408]: /etc/keepalived/chk_haproxy.sh exited with status 1
Jun 28 12:36:43 node12 Keepalived_vrrp[9408]: VRRP_Instance(VI_200) Received advert with higher priority 80, ours 20
Jun 28 12:36:43 node12 Keepalived_vrrp[9408]: VRRP_Instance(VI_200) Entering BACKUP STATE
Jun 28 12:36:43 node12 Keepalived_vrrp[9408]: VRRP_Instance(VI_200) removing protocol VIPs.






#VIP飘到node13上
[root@node13 haproxy]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
    link/ether 00:50:56:a0:98:7e brd ff:ff:ff:ff:ff:ff
    inet 172.16.62.13/24 brd 172.16.62.255 scope global noprefixroute eth0
       valid_lft forever preferred_lft forever
    inet 172.16.62.248/32 scope global eth0
       valid_lft forever preferred_lft forever
    inet 172.16.62.249/32 scope global eth0
       valid_lft forever preferred_lft forever
    inet 172.16.62.250/32 scope global eth0
       valid_lft forever preferred_lft forever
    inet 172.16.62.251/32 scope global eth0
       valid_lft forever preferred_lft forever
    inet 172.16.62.252/32 scope global eth0
       valid_lft forever preferred_lft forever
[root@node13 haproxy]#
#node13节点日志
Jun 28 12:36:43 node13 Keepalived_vrrp[3692]: VRRP_Instance(VI_200) forcing a new MASTER election
Jun 28 12:36:45 node13 Keepalived_vrrp[3692]: VRRP_Instance(VI_200) Transition to MASTER STATE
Jun 28 12:36:47 node13 Keepalived_vrrp[3692]: VRRP_Instance(VI_200) Entering MASTER STATE
Jun 28 12:36:47 node13 Keepalived_vrrp[3692]: VRRP_Instance(VI_200) setting protocol VIPs.
Jun 28 12:36:47 node13 Keepalived_vrrp[3692]: Sending gratuitous ARP on eth0 for 172.16.62.248
Jun 28 12:36:47 node13 Keepalived_vrrp[3692]: VRRP_Instance(VI_200) Sending/queueing gratuitous ARPs on eth0 for 172.16.62.248
Jun 28 12:36:47 node13 Keepalived_vrrp[3692]: Sending gratuitous ARP on eth0 for 172.16.62.249
Jun 28 12:36:47 node13 Keepalived_vrrp[3692]: VRRP_Instance(VI_200) Sending/queueing gratuitous ARPs on eth0 for 172.16.62.249
Jun 28 12:36:47 node13 Keepalived_vrrp[3692]: Sending gratuitous ARP on eth0 for 172.16.62.250
Jun 28 12:36:47 node13 Keepalived_vrrp[3692]: VRRP_Instance(VI_200) Sending/queueing gratuitous ARPs on eth0 for 172.16.62.250
Jun 28 12:36:47 node13 Keepalived_vrrp[3692]: Sending gratuitous ARP on eth0 for 172.16.62.251
Jun 28 12:36:47 node13 Keepalived_vrrp[3692]: VRRP_Instance(VI_200) Sending/queueing gratuitous ARPs on eth0 for 172.16.62.251
Jun 28 12:36:47 node13 Keepalived_vrrp[3692]: Sending gratuitous ARP on eth0 for 172.16.62.252
Jun 28 12:36:47 node13 Keepalived_vrrp[3692]: VRRP_Instance(VI_200) Sending/queueing gratuitous ARPs on eth0 for 172.16.62.252


5.2 访问页面测试

#访问www.haostack.com
[root@node24 named]# while true;do curl http://www.haostack.com/index.html;sleep 0.7;done
curl: (7) Failed connect to www.haostack.com:80; Connection refused
curl: (7) Failed connect to www.haostack.com:80; Connection refused
<h1>Hello, www.haostack.com </h1>
<h2>nginx,172.16.62.15</h2>
<h1>Hello, www.haostack.com </h1>
<h2>nginx,172.16.62.16</h2>
<h1>Hello, www.haostack.com </h1>
<h2>nginx,172.16.62.15</h2>
<h1>Hello, www.haostack.com </h1>
<h2>nginx,172.16.62.16</h2>
<h1>Hello, www.haostack.com </h1>
<h2>nginx,172.16.62.15</h2>
<h1>Hello, www.haostack.com </h1>
<h2>nginx,172.16.62.16</h2>
<h1>Hello, www.haostack.com </h1>
<h2>nginx,172.16.62.15</h2>
<h1>Hello, www.haostack.com </h1>
<h2>nginx,172.16.62.16</h2>
<h1>Hello, www.haostack.com </h1>
<h2>nginx,172.16.62.15</h2>
^C
[root@node24 named]# 



#访问mobile.haostack.com
[root@node24 named]# while true;do curl http://mobile.haostack.com/index.html;sleep 0.7;done
<h1>Hello, www.haostack.com </h1>
<h2>nginx,172.16.62.16</h2>
<h1>Hello, www.haostack.com </h1>
<h2>nginx,172.16.62.15</h2>
<h1>Hello, www.haostack.com </h1>
<h2>nginx,172.16.62.16</h2>
<h1>Hello, www.haostack.com </h1>

5.3 开启haproxy 服务

#node12节点开启haproxy服务
[root@node12 keepalived]# systemctl start haproxy

#VIP回到node12节点上
[root@node12 keepalived]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
    link/ether 00:50:56:a0:a4:f5 brd ff:ff:ff:ff:ff:ff
    inet 172.16.62.12/24 brd 172.16.62.255 scope global noprefixroute eth0
       valid_lft forever preferred_lft forever
    inet 172.16.62.248/32 scope global eth0
       valid_lft forever preferred_lft forever
    inet 172.16.62.249/32 scope global eth0
       valid_lft forever preferred_lft forever
    inet 172.16.62.250/32 scope global eth0
       valid_lft forever preferred_lft forever
    inet 172.16.62.251/32 scope global eth0
       valid_lft forever preferred_lft forever
    inet 172.16.62.252/32 scope global eth0
       valid_lft forever preferred_lft forever
[root@node12 keepalived]# 


#node12日志
Jun 28 12:44:31 node12 systemd: Started HAProxy Load Balancer.
Jun 28 12:44:36 node12 Keepalived_vrrp[9408]: VRRP_Script(chk_haproxy) succeeded
Jun 28 12:44:37 node12 Keepalived_vrrp[9408]: VRRP_Instance(VI_200) Changing effective priority from 20 to 100
Jun 28 12:44:37 node12 Keepalived_vrrp[9408]: VRRP_Instance(VI_200) forcing a new MASTER election
Jun 28 12:44:39 node12 Keepalived_vrrp[9408]: VRRP_Instance(VI_200) Transition to MASTER STATE
Jun 28 12:44:41 node12 Keepalived_vrrp[9408]: VRRP_Instance(VI_200) Entering MASTER STATE
Jun 28 12:44:41 node12 Keepalived_vrrp[9408]: VRRP_Instance(VI_200) setting protocol VIPs.
Jun 28 12:44:41 node12 Keepalived_vrrp[9408]: Sending gratuitous ARP on eth0 for 172.16.62.248
Jun 28 12:44:41 node12 Keepalived_vrrp[9408]: VRRP_Instance(VI_200) Sending/queueing gratuitous ARPs on eth0 for 172.16.62.248
Jun 28 12:44:41 node12 Keepalived_vrrp[9408]: Sending gratuitous ARP on eth0 for 172.16.62.249
Jun 28 12:44:41 node12 Keepalived_vrrp[9408]: VRRP_Instance(VI_200) Sending/queueing gratuitous ARPs on eth0 for 172.16.62.249
Jun 28 12:44:41 node12 Keepalived_vrrp[9408]: Sending gratuitous ARP on eth0 for 172.16.62.250
Jun 28 12:44:41 node12 Keepalived_vrrp[9408]: VRRP_Instance(VI_200) Sending/queueing gratuitous ARPs on eth0 for 172.16.62.250
Jun 28 12:44:41 node12 Keepalived_vrrp[9408]: Sending gratuitous ARP on eth0 for 172.16.62.251
Jun 28 12:44:41 node12 Keepalived_vrrp[9408]: VRRP_Instance(VI_200) Sending/queueing gratuitous ARPs on eth0 for 172.16.62.251
Jun 28 12:44:41 node12 Keepalived_vrrp[9408]: Sending gratuitous ARP on eth0 for 172.16.62.252
Jun 28 12:44:41 node12 Keepalived_vrrp[9408]: VRRP_Instance(VI_200) Sending/queueing gratuitous ARPs on eth0 for 172.16.62.252





#node13日志
Jun 28 12:44:37 node13 Keepalived_vrrp[3692]: VRRP_Instance(VI_200) Received advert with higher priority 100, ours 80
Jun 28 12:44:37 node13 Keepalived_vrrp[3692]: VRRP_Instance(VI_200) Entering BACKUP STATE
Jun 28 12:44:37 node13 Keepalived_vrrp[3692]: VRRP_Instance(VI_200) removing protocol VIPs.
Jun 28 12:44:39 node13 ntpd[718]: Deleting interface #34 eth0, 172.16.62.252#123, interface stats: received=0, sent=0, dropped=0, active_time=470 secs
Jun 28 12:44:39 node13 ntpd[718]: Deleting interface #33 eth0, 172.16.62.251#123, interface stats: received=0, sent=0, dropped=0, active_time=470 secs
Jun 28 12:44:39 node13 ntpd[718]: Deleting interface #32 eth0, 172.16.62.250#123, interface stats: received=0, sent=0, dropped=0, active_time=470 secs
Jun 28 12:44:39 node13 ntpd[718]: Deleting interface #31 eth0, 172.16.62.249#123, interface stats: received=0, sent=0, dropped=0, active_time=470 secs
Jun 28 12:44:39 node13 ntpd[718]: Deleting interface #30 eth0, 172.16.62.248#123, interface stats: received=0, sent=0, dropped=0, active_time=470 secs



5.4 访问页面测试

#访问www.haostack.com
[root@node24 named]# while true;do curl http://www.haostack.com/index.html;sleep 0.7;done
<h1>Hello, www.haostack.com </h1>
<h2>nginx,172.16.62.16</h2>
<h1>Hello, www.haostack.com </h1>
<h2>nginx,172.16.62.15</h2>
<h1>Hello, www.haostack.com </h1>
<h2>nginx,172.16.62.16</h2>
<h1>Hello, www.haostack.com </h1>


#访问mobile.haostack.com
[root@node24 named]# while true;do curl http://mobile.haostack.com/index.html;sleep 0.7;done
<h1>Hello, www.haostack.com </h1>
<h2>nginx,172.16.62.16</h2>
<h1>Hello, www.haostack.com </h1>
<h2>nginx,172.16.62.15</h2>
<h1>Hello, www.haostack.com </h1>
<h2>nginx,172.16.62.16</h2>
<h1>Hello, www.haostack.com </h1>

2.实现LVS+keepalived 高可用集群

1.实验环境

节点应用IP
Node12lvs+keepalived172.16.62.12
Node13lvs+keepalived172.16.62.13
web15nginx172.16.62.15
web16nginx172.16.62.16
DNS24DNS172.16.62.24
Node14sorry server172.16.62.14

2.软件安装

2.1 安装Keepalived
#在node12和node13上安装keepalived
#安装keepalived 
yum install keepalived -y


#查看node13节点keepalived
[root@node12 keepalived]# systemctl status  keepalived
● keepalived.service - LVS and VRRP High Availability Monitor
   Loaded: loaded (/usr/lib/systemd/system/keepalived.service; enabled; vendor preset: disabled)
   Active: active (running) since Sat 2020-06-27 17:49:44 CST; 39s ago
  Process: 2195 ExecReload=/bin/kill -HUP $MAINPID (code=exited, status=0/SUCCESS)
  Process: 2219 ExecStart=/usr/sbin/keepalived $KEEPALIVED_OPTIONS (code=exited, status=0/SUCCESS)
 Main PID: 2220 (keepalived)
   CGroup: /system.slice/keepalived.service
           ├─2220 /usr/sbin/keepalived -D
           ├─2221 /usr/sbin/keepalived -D
           └─2222 /usr/sbin/keepalived -D

Jun 27 17:49:53 node12 Keepalived_vrrp[2222]: Sending gratuitous ARP on eth0 for 172.16.62.248
Jun 27 17:49:53 node12 Keepalived_vrrp[2222]: Sending gratuitous ARP on eth0 for 172.16.62.249
Jun 27 17:49:53 node12 Keepalived_vrrp[2222]: Sending gratuitous ARP on eth0 for 172.16.62.250
Jun 27 17:49:53 node12 Keepalived_vrrp[2222]: Sending gratuitous ARP on eth0 for 172.16.62.251
Jun 27 17:49:53 node12 Keepalived_vrrp[2222]: Sending gratuitous ARP on eth0 for 172.16.62.252
Jun 27 17:49:53 node12 Keepalived_vrrp[2222]: Sending gratuitous ARP on eth0 for 172.16.62.248
Jun 27 17:49:53 node12 Keepalived_vrrp[2222]: Sending gratuitous ARP on eth0 for 172.16.62.249
Jun 27 17:49:53 node12 Keepalived_vrrp[2222]: Sending gratuitous ARP on eth0 for 172.16.62.250
Jun 27 17:49:53 node12 Keepalived_vrrp[2222]: Sending gratuitous ARP on eth0 for 172.16.62.251
Jun 27 17:49:53 node12 Keepalived_vrrp[2222]: Sending gratuitous ARP on eth0 for 172.16.62.252
[root@node12 keepalived]#


#查看node13节点keepalived
[root@node13 keepalived]# systemctl status  keepalived
● keepalived.service - LVS and VRRP High Availability Monitor
   Loaded: loaded (/usr/lib/systemd/system/keepalived.service; disabled; vendor preset: disabled)
   Active: active (running) since Sat 2020-06-27 17:49:55 CST; 22s ago
  Process: 2457 ExecStart=/usr/sbin/keepalived $KEEPALIVED_OPTIONS (code=exited, status=0/SUCCESS)
 Main PID: 2458 (keepalived)
   CGroup: /system.slice/keepalived.service
           ├─2458 /usr/sbin/keepalived -D
           ├─2459 /usr/sbin/keepalived -D
           └─2460 /usr/sbin/keepalived -D

Jun 27 17:49:55 node13 Keepalived_vrrp[2460]: Registering Kernel netlink reflector
Jun 27 17:49:55 node13 Keepalived_vrrp[2460]: Registering Kernel netlink command channel
Jun 27 17:49:55 node13 Keepalived_vrrp[2460]: Registering gratuitous ARP shared channel
Jun 27 17:49:55 node13 Keepalived_vrrp[2460]: Opening file '/etc/keepalived/keepalived.conf'.
Jun 27 17:49:55 node13 Keepalived_vrrp[2460]: VRRP_Instance(VI_200) removing protocol VIPs.
Jun 27 17:49:55 node13 Keepalived_vrrp[2460]: Using LinkWatch kernel netlink reflector...
Jun 27 17:49:55 node13 Keepalived_vrrp[2460]: VRRP_Instance(VI_200) Entering BACKUP STATE
Jun 27 17:49:55 node13 Keepalived_vrrp[2460]: VRRP sockpool: [ifindex(2), proto(112), unicast(1), fd(10,11)]
Jun 27 17:49:55 node13 Keepalived_healthcheckers[2459]: Activating healthchecker for service [172.16.62.248]:80
Jun 27 17:49:55 node13 Keepalived_healthcheckers[2459]: Activating healthchecker for service [172.16.62.248]:80
[root@node13 keepalived]# 

2.2 安装LVS
#在node12和node13上安装LVS
#安装LVS
yum -y install ipvsadm
如果服务无法启动,请创建和保持至ipvsadm
ipvsadm --save > /etc/sysconfig/ipvsadm

[root@node12 keepalived]# systemctl status ipvsadm
● ipvsadm.service - Initialise the Linux Virtual Server
   Loaded: loaded (/usr/lib/systemd/system/ipvsadm.service; enabled; vendor preset: disabled)
   Active: active (exited) since Sat 2020-06-27 17:54:06 CST; 43s ago
  Process: 2251 ExecStart=/bin/bash -c exec /sbin/ipvsadm-restore < /etc/sysconfig/ipvsadm (code=exited, status=0/SUCCESS)
 Main PID: 2251 (code=exited, status=0/SUCCESS)

Jun 27 17:54:06 node12 systemd[1]: Starting Initialise the Linux Virtual Server...
Jun 27 17:54:06 node12 systemd[1]: Started Initialise the Linux Virtual Server


3.配置

3.1 keepalive+LVS配置

#在node12节点上keepalived.conf配置
[root@node12 keepalived]# more keepalived.conf
! Configuration File for keepalived

global_defs {       #全局定义块
   notification_email {    #邮件通知配置,用于服务有故障时发送邮件报警,可选项
     test@qq.com     #邮件接收者
   }
   notification_email_from root@qq.com #邮件发送者
   smtp_server 127.0.0.1    #smtp邮件服务器
   smtp_connect_timeout 30  #超时时间
   router_id node12.haostack.com    # 标识本节点的字条串,通常为hostname,但不一定非得是hostname。故障发生时,邮件通知会用到
   vrrp_skip_check_adv_addr  #所有报文都检查比较消耗性能,此配置为如果收到的报文和上一个报文是同一个路由器则跳过检查报文中的源地址
   #vrrp_strict      #严格遵守VRRP协议,不允许状况:1,没有VIP地址,2.配置了单播邻居,3.在VRRP版本2中有IPv6地址.
   vrrp_garp_interval 0  #ARP报文发送延迟 
   vrrp_gna_interval 0  #消息发送延迟
   vrrp_iptables #需要关闭vrrp_iptables规则,否则vip ping不通
}

vrrp_instance VI_200 {  #VRRP实例定义块
    state MASTER    #标识当前节点的状态,可以是MASTER或BACKUP,当其他节点keepalived启动时会将priority比较大的节点选举为MASTER
    interface eth0  #节点固有IP(非VIP)的网卡,用来发VRRP包
    virtual_router_id 200 #值在0-255之间,用来区分多个instance的VRRP组播。同一网段中virtual_router_id的值不能重复,否则会出错
    priority 100    #用来选举master的,要成为master,那么这个选项的值最好高于其他机器50个点,该项取值范围是[1-254](在此范围之外会被识别成默认值100
)
    advert_int 2  ##发VRRP包的时间间隔,即多久进行一次master选举(可以认为是健康查检时间间隔)
    #nopreempt  #非抢占模式,不会回来
    #preempt_delay 60s  #抢占模式延迟
    unicast_src_ip 172.16.62.12 #配置单播本机地址,需要禁用vrrp_strict
    unicast_peer {             #配置对端单播地址
       172.16.62.13
     }
    authentication {   #发VRRP包的时间间隔,即多久进行一次master选举(可以认为是健康查检时间间隔)
        auth_type PASS
        auth_pass 123456
    }
    virtual_ipaddress {
        172.16.62.248
        172.16.62.249
        172.16.62.250
        172.16.62.251
        172.16.62.252
    }
}

virtual_server 172.16.62.248 80 { ##设置VS的VIP和port
    delay_loop 3  #服务轮询的时间间隔
    lb_algo wrr    #LVS调度算法
    lb_kind DR   #LVS集群模式
    #persistence_timeout 50 #会话保持时间(秒为单位),即以用户在120秒内被分配到同一个后端realserver
    protocol TCP    #健康检查用的是TCP还是UDP
    sorry_server 172.16.62.14 80 #备用机,就是当所有后端realserver节点都不可用时,就用这里设置的,也就是临时把所有的请求都发送到这台服务器

    real_server 172.16.62.15 80 {  #后端真实服务器
        weight 1   #权重
         TCP_CHECK {   #健康检查方式
            connect_timeout 3   #定义连接超时时长
            nb_get_retry 3   ##重试次数
            delay_before_retry 3 #每次重试的间隔时间
            connect_port 80     #监测的端口
        }
    }
     real_server 172.16.62.16 80 {  #后端真实服务器
        weight 1   #权重
         TCP_CHECK {   #健康检查方式
            connect_timeout 3   #定义连接超时时长
            nb_get_retry 3   ##重试次数
            delay_before_retry 3 #每次重试的间隔时间
            connect_port 80     #监测的端口
        }
    }
}
[root@node12 keepalived]#


#在node13节点上keepalived.conf配置

[root@node13 keepalived]# more keepalived.conf 
! Configuration File for keepalived

global_defs {       #全局定义块
   notification_email {    #邮件通知配置,用于服务有故障时发送邮件报警,可选项
     test@qq.com     #邮件接收者
   }
   notification_email_from root@qq.com #邮件发送者
   smtp_server 127.0.0.1    #smtp邮件服务器
   smtp_connect_timeout 30  #超时时间
   router_id node13.haostack.com    # 标识本节点的字条串,通常为hostname,但不一定非得是hostname。故障发生时,邮件通知会用到
   vrrp_skip_check_adv_addr  #所有报文都检查比较消耗性能,此配置为如果收到的报文和上一个报文是同一个路由器则跳过检查报文中的源地址
   #vrrp_strict      #严格遵守VRRP协议,不允许状况:1,没有VIP地址,2.配置了单播邻居,3.在VRRP版本2中有IPv6地址.
   vrrp_garp_interval 0  #ARP报文发送延迟 
   vrrp_gna_interval 0  #消息发送延迟
   vrrp_iptables #需要关闭vrrp_iptables规则,否则vip ping不通
}

vrrp_instance VI_200 {  #VRRP实例定义块
    state BACKUP   #标识当前节点的状态,可以是MASTER或BACKUP,当其他节点keepalived启动时会将priority比较大的节点选举为MASTER
    interface eth0  #节点固有IP(非VIP)的网卡,用来发VRRP包
    virtual_router_id 200 #值在0-255之间,用来区分多个instance的VRRP组播。同一网段中virtual_router_id的值不能重复,否则会出错
    priority 80    #用来选举master的,要成为master,那么这个选项的值最好高于其他机器50个点,该项取值范围是[1-254](在此范围之外会被识别成默认值100
)
    advert_int 2  ##发VRRP包的时间间隔,即多久进行一次master选举(可以认为是健康查检时间间隔)
    #nopreempt  #非抢占模式,不会回来
    #preempt_delay 60s  #抢占模式延迟
    unicast_src_ip 172.16.62.13 #配置单播本机地址,需要禁用vrrp_strict
    unicast_peer {             #配置对端单播地址
       172.16.62.12
     }
    authentication {   #发VRRP包的时间间隔,即多久进行一次master选举(可以认为是健康查检时间间隔)
        auth_type PASS
        auth_pass 123456
    }
    virtual_ipaddress {
        172.16.62.248
        172.16.62.249
        172.16.62.250
        172.16.62.251
        172.16.62.252
    }
}

virtual_server 172.16.62.248 80 { ##设置VS的VIP和port
    delay_loop 3  #服务轮询的时间间隔
    lb_algo wrr    #LVS调度算法
    lb_kind DR   #LVS集群模式
    #persistence_timeout 50 #会话保持时间(秒为单位),即以用户在120秒内被分配到同一个后端realserver
    protocol TCP    #健康检查用的是TCP还是UDP
    sorry_server 172.16.62.14 80 #备用机,就是当所有后端realserver节点都不可用时,就用这里设置的,也就是临时把所有的请求都发送到这台服务器

    real_server 172.16.62.15 80 {  #后端真实服务器
        weight 1   #权重
         TCP_CHECK {   #健康检查方式
            connect_timeout 3   #定义连接超时时长
            nb_get_retry 3   ##重试次数
            delay_before_retry 3 #每次重试的间隔时间
            connect_port 80     #监测的端口
        }
    }
     real_server 172.16.62.16 80 {  #后端真实服务器
        weight 1   #权重
         TCP_CHECK {   #健康检查方式
            connect_timeout 3   #定义连接超时时长
            nb_get_retry 3   ##重试次数
            delay_before_retry 3 #每次重试的间隔时间
            connect_port 80     #监测的端口
        }
    }
}
[root@node13 keepalived]# 
3.2 查看ipvsadm
[root@node12 keepalived]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  172.16.62.248:80 wrr
  -> 172.16.62.15:80              Route   1      0          0         
  -> 172.16.62.16:80              Route   1      0          0         
[root@node12 keepalived]# 
[root@node12 keepalived]#

3.3 在WEB服务器上配置
#安装nginx 测试
#测试服务可用性,nginx使用80 
[root@node24 named]# curl -l 172.16.62.16:80
mobile mobile.haostack.com 172.16.62.16
[root@node24 named]# curl -l 172.16.62.15:80
mobile mobile.haostack.com 172.16.62.15
[root@node24 named]# 


#VIP 配置,通过如下脚本生成
自动生成172.16.62.248和249
[root@web15 tmp]# more lvs_dr.sh 
#!/bin/bash
#LVS DR模式初始化脚本
LVS_VIP=172.16.62.248
source /etc/rc.d/init.d/functions
#rpm -q httpd &> /dev/null || yum -y install httpd &>/dev/null
#service httpd start &> /dev/null && echo "The httpd Server is Ready!"
#echo "<h1>`hostname`</h1>" > /var/www/html/index.html

case "$1" in
start)
    /sbin/ifconfig lo:0 $LVS_VIP netmask 255.255.255.255 broadcast $LVS_VIP
    /sbin/route add -host $LVS_VIP dev lo:0
    echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore
    echo 1 > /proc/sys/net/ipv4/conf/lo/arp_ignore
    echo 2 > /proc/sys/net/ipv4/conf/all/arp_announce
    echo 2 > /proc/sys/net/ipv4/conf/lo/arp_announce
    systctl -p >dev/null 2>&1
    echo "RealServer Start OK"
    ;;
stop)
    /sbin/ifconfig lo:0 down
    /sbin/route del  $LVS_VIP >dev/null 2>&1
    echo 0 > /proc/sys/net/ipv4/conf/all/arp_ignore
    echo 0 > /proc/sys/net/ipv4/conf/lo/arp_ignore
    echo 0 > /proc/sys/net/ipv4/conf/all/arp_announce
    echo 0 > /proc/sys/net/ipv4/conf/lo/arp_announce
    echo "RealServer Stoped"
    ;;
*) 
    echo "Usage:  $0 start|stop"
    exit 1
esac
exit 0
[root@web15 tmp]#

#查看主机web15上的vip 
[root@web15 tmp]# ifconfig
eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 172.16.62.15  netmask 255.255.255.0  broadcast 172.16.62.255
        ether 00:50:56:a0:cf:97  txqueuelen 1000  (Ethernet)
        RX packets 68959917  bytes 4492041925 (4.1 GiB)
        RX errors 0  dropped 1369227  overruns 0  frame 0
        TX packets 1263689  bytes 105074845 (100.2 MiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
        inet 127.0.0.1  netmask 255.0.0.0
        loop  txqueuelen 0  (Local Loopback)
        RX packets 506  bytes 42884 (41.8 KiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 506  bytes 42884 (41.8 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

lo:0: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
        inet 172.16.62.248  netmask 255.255.255.255
        loop  txqueuelen 0  (Local Loopback)

lo:1: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
        inet 172.16.62.249  netmask 255.255.255.255
        loop  txqueuelen 0  (Local Loopback)

[root@web15 tmp]#

#查看主机web15上的vip 
[root@web16 tmp]# ifconfig
eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 172.16.62.16  netmask 255.255.255.0  broadcast 172.16.62.255
        ether 00:50:56:a0:18:39  txqueuelen 1000  (Ethernet)
        RX packets 26982444  bytes 1805618202 (1.6 GiB)
        RX errors 0  dropped 479593  overruns 0  frame 0
        TX packets 3918642  bytes 2183741064 (2.0 GiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
        inet 127.0.0.1  netmask 255.0.0.0
        loop  txqueuelen 0  (Local Loopback)
        RX packets 97862  bytes 12810900 (12.2 MiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 97862  bytes 12810900 (12.2 MiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

lo:0: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
        inet 172.16.62.248  netmask 255.255.255.255
        loop  txqueuelen 0  (Local Loopback)

lo:1: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
        inet 172.16.62.249  netmask 255.255.255.255
        loop  txqueuelen 0  (Local Loopback)

[root@web16 tmp]#

4. 测试

4.1 测试keepalived+lvs

  • 目前在node12上,keepalived配置默认为抢占
[root@node12 keepalived]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
    link/ether 00:50:56:a0:a4:f5 brd ff:ff:ff:ff:ff:ff
    inet 172.16.62.12/24 brd 172.16.62.255 scope global noprefixroute eth0
       valid_lft forever preferred_lft forever
    inet 172.16.62.248/32 scope global eth0
       valid_lft forever preferred_lft forever
    inet 172.16.62.249/32 scope global eth0
       valid_lft forever preferred_lft forever
    inet 172.16.62.250/32 scope global eth0
       valid_lft forever preferred_lft forever
    inet 172.16.62.251/32 scope global eth0
       valid_lft forever preferred_lft forever
    inet 172.16.62.252/32 scope global eth0
       valid_lft forever preferred_lft forever
[root@node12 keepalived]#

#关闭node12
[root@node12 keepalived]# systemctl stop keepalived

#切换到node13节点上,VIP切换到BACKUP上
[root@node13 keepalived]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
    link/ether 00:50:56:a0:98:7e brd ff:ff:ff:ff:ff:ff
    inet 172.16.62.13/24 brd 172.16.62.255 scope global noprefixroute eth0
       valid_lft forever preferred_lft forever
    inet 172.16.62.248/32 scope global eth0
       valid_lft forever preferred_lft forever
    inet 172.16.62.249/32 scope global eth0
       valid_lft forever preferred_lft forever
    inet 172.16.62.250/32 scope global eth0
       valid_lft forever preferred_lft forever
    inet 172.16.62.251/32 scope global eth0
       valid_lft forever preferred_lft forever
    inet 172.16.62.252/32 scope global eth0
       valid_lft forever preferred_lft forever
[root@node13 keepalived]#


#node13节点日志
Jun 27 19:02:20 node13 Keepalived_vrrp[2670]: VRRP_Instance(VI_200) Transition to MASTER STATE
Jun 27 19:02:22 node13 Keepalived_vrrp[2670]: VRRP_Instance(VI_200) Entering MASTER STATE
Jun 27 19:02:22 node13 Keepalived_vrrp[2670]: VRRP_Instance(VI_200) setting protocol VIPs.
Jun 27 19:02:22 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.248
Jun 27 19:02:22 node13 Keepalived_vrrp[2670]: VRRP_Instance(VI_200) Sending/queueing gratuitous ARPs on eth0 for 172.16.62.248
Jun 27 19:02:22 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.249
Jun 27 19:02:22 node13 Keepalived_vrrp[2670]: VRRP_Instance(VI_200) Sending/queueing gratuitous ARPs on eth0 for 172.16.62.249
Jun 27 19:02:22 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.250
Jun 27 19:02:22 node13 Keepalived_vrrp[2670]: VRRP_Instance(VI_200) Sending/queueing gratuitous ARPs on eth0 for 172.16.62.250
Jun 27 19:02:22 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.251
Jun 27 19:02:22 node13 Keepalived_vrrp[2670]: VRRP_Instance(VI_200) Sending/queueing gratuitous ARPs on eth0 for 172.16.62.251
Jun 27 19:02:22 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.252
Jun 27 19:02:22 node13 Keepalived_vrrp[2670]: VRRP_Instance(VI_200) Sending/queueing gratuitous ARPs on eth0 for 172.16.62.252
Jun 27 19:02:22 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.248
Jun 27 19:02:22 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.249
Jun 27 19:02:22 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.250
Jun 27 19:02:22 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.251
Jun 27 19:02:22 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.252
Jun 27 19:02:22 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.248
Jun 27 19:02:22 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.249
Jun 27 19:02:22 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.250
Jun 27 19:02:22 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.251
Jun 27 19:02:22 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.252
Jun 27 19:02:22 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.248
Jun 27 19:02:22 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.249
Jun 27 19:02:22 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.250
Jun 27 19:02:22 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.251
Jun 27 19:02:22 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.252
Jun 27 19:02:22 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.248
Jun 27 19:02:22 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.249
Jun 27 19:02:22 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.250
Jun 27 19:02:22 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.251
Jun 27 19:02:22 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.252
Jun 27 19:02:24 node13 ntpd[718]: Listen normally on 15 eth0 172.16.62.248 UDP 123
Jun 27 19:02:24 node13 ntpd[718]: Listen normally on 16 eth0 172.16.62.249 UDP 123
Jun 27 19:02:24 node13 ntpd[718]: Listen normally on 17 eth0 172.16.62.250 UDP 123
Jun 27 19:02:24 node13 ntpd[718]: Listen normally on 18 eth0 172.16.62.251 UDP 123
Jun 27 19:02:24 node13 ntpd[718]: Listen normally on 19 eth0 172.16.62.252 UDP 123
Jun 27 19:02:27 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.248
Jun 27 19:02:27 node13 Keepalived_vrrp[2670]: VRRP_Instance(VI_200) Sending/queueing gratuitous ARPs on eth0 for 172.16.62.248
Jun 27 19:02:27 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.249
Jun 27 19:02:27 node13 Keepalived_vrrp[2670]: VRRP_Instance(VI_200) Sending/queueing gratuitous ARPs on eth0 for 172.16.62.249
Jun 27 19:02:27 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.250
Jun 27 19:02:27 node13 Keepalived_vrrp[2670]: VRRP_Instance(VI_200) Sending/queueing gratuitous ARPs on eth0 for 172.16.62.250
Jun 27 19:02:27 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.251
Jun 27 19:02:27 node13 Keepalived_vrrp[2670]: VRRP_Instance(VI_200) Sending/queueing gratuitous ARPs on eth0 for 172.16.62.251
Jun 27 19:02:27 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.252
Jun 27 19:02:27 node13 Keepalived_vrrp[2670]: VRRP_Instance(VI_200) Sending/queueing gratuitous ARPs on eth0 for 172.16.62.252
Jun 27 19:02:27 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.248
Jun 27 19:02:27 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.249
Jun 27 19:02:27 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.250
Jun 27 19:02:27 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.251
Jun 27 19:02:27 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.252
Jun 27 19:02:27 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.248
Jun 27 19:02:27 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.249
Jun 27 19:02:27 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.250
Jun 27 19:02:27 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.251
Jun 27 19:02:27 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.252
Jun 27 19:02:27 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.248
Jun 27 19:02:27 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.249
Jun 27 19:02:27 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.250
Jun 27 19:02:27 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.251
Jun 27 19:02:27 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.252
Jun 27 19:02:27 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.248
Jun 27 19:02:27 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.249
Jun 27 19:02:27 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.250
Jun 27 19:02:27 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.251
Jun 27 19:02:27 node13 Keepalived_vrrp[2670]: Sending gratuitous ARP on eth0 for 172.16.62.252


#测试网站
[root@node24 named]# curl 172.16.62.248
mobile mobile.haostack.com 172.16.62.15
[root@node24 named]# curl 172.16.62.248
mobile mobile.haostack.com 172.16.62.16
[root@node24 named]# curl 172.16.62.248
mobile mobile.haostack.com 172.16.62.15
[root@node24 named]# curl 172.16.62.248
mobile mobile.haostack.com 172.16.62.16

#查看ipvsadm
[root@node13 keepalived]# ipvsadm -Ln --stats
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port               Conns   InPkts  OutPkts  InBytes OutBytes
  -> RemoteAddress:Port
TCP  172.16.62.248:80                   17      111        0     7217        0
  -> 172.16.62.15:80                     8       48        0     3176        0
  -> 172.16.62.16:80                     9       63        0     4041        0


#开启node12节点,VIP回到MASTER
[root@node12 keepalived]# systemctl start keepalived
[root@node12 keepalived]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
    link/ether 00:50:56:a0:a4:f5 brd ff:ff:ff:ff:ff:ff
    inet 172.16.62.12/24 brd 172.16.62.255 scope global noprefixroute eth0
       valid_lft forever preferred_lft forever
    inet 172.16.62.248/32 scope global eth0
       valid_lft forever preferred_lft forever
    inet 172.16.62.249/32 scope global eth0
       valid_lft forever preferred_lft forever
    inet 172.16.62.250/32 scope global eth0
       valid_lft forever preferred_lft forever
    inet 172.16.62.251/32 scope global eth0
       valid_lft forever preferred_lft forever
    inet 172.16.62.252/32 scope global eth0
       valid_lft forever preferred_lft forever
[root@node12 keepalived]# 





node12日志
Jun 27 19:05:39 node12 systemd: Starting LVS and VRRP High Availability Monitor...
Jun 27 19:05:39 node12 Keepalived[2691]: Starting Keepalived v1.3.5 (03/19,2017), git commit v1.3.5-6-g6fa32f2
Jun 27 19:05:39 node12 Keepalived[2691]: Opening file '/etc/keepalived/keepalived.conf'.
Jun 27 19:05:39 node12 Keepalived[2692]: Starting Healthcheck child process, pid=2693
Jun 27 19:05:39 node12 Keepalived[2692]: Starting VRRP child process, pid=2694
Jun 27 19:05:39 node12 systemd: Started LVS and VRRP High Availability Monitor.
Jun 27 19:05:39 node12 Keepalived_healthcheckers[2693]: Opening file '/etc/keepalived/keepalived.conf'.
Jun 27 19:05:39 node12 Keepalived_vrrp[2694]: Registering Kernel netlink reflector
Jun 27 19:05:39 node12 Keepalived_vrrp[2694]: Registering Kernel netlink command channel
Jun 27 19:05:39 node12 Keepalived_vrrp[2694]: Registering gratuitous ARP shared channel
Jun 27 19:05:39 node12 Keepalived_vrrp[2694]: Opening file '/etc/keepalived/keepalived.conf'.
Jun 27 19:05:39 node12 Keepalived_vrrp[2694]: VRRP_Instance(VI_200) removing protocol VIPs.
Jun 27 19:05:39 node12 Keepalived_vrrp[2694]: Using LinkWatch kernel netlink reflector...
Jun 27 19:05:39 node12 Keepalived_vrrp[2694]: VRRP sockpool: [ifindex(2), proto(112), unicast(1), fd(10,11)]
Jun 27 19:05:39 node12 Keepalived_healthcheckers[2693]: Activating healthchecker for service [172.16.62.248]:80
Jun 27 19:05:39 node12 Keepalived_healthcheckers[2693]: Activating healthchecker for service [172.16.62.248]:80
Jun 27 19:05:40 node12 Keepalived_vrrp[2694]: VRRP_Instance(VI_200) Transition to MASTER STATE
Jun 27 19:05:42 node12 Keepalived_vrrp[2694]: VRRP_Instance(VI_200) Entering MASTER STATE
Jun 27 19:05:42 node12 Keepalived_vrrp[2694]: VRRP_Instance(VI_200) setting protocol VIPs.
Jun 27 19:05:42 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.248
Jun 27 19:05:42 node12 Keepalived_vrrp[2694]: VRRP_Instance(VI_200) Sending/queueing gratuitous ARPs on eth0 for 172.16.62.248
Jun 27 19:05:42 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.249
Jun 27 19:05:42 node12 Keepalived_vrrp[2694]: VRRP_Instance(VI_200) Sending/queueing gratuitous ARPs on eth0 for 172.16.62.249
Jun 27 19:05:42 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.250
Jun 27 19:05:42 node12 Keepalived_vrrp[2694]: VRRP_Instance(VI_200) Sending/queueing gratuitous ARPs on eth0 for 172.16.62.250
Jun 27 19:05:42 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.251
Jun 27 19:05:42 node12 Keepalived_vrrp[2694]: VRRP_Instance(VI_200) Sending/queueing gratuitous ARPs on eth0 for 172.16.62.251
Jun 27 19:05:42 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.252
Jun 27 19:05:42 node12 Keepalived_vrrp[2694]: VRRP_Instance(VI_200) Sending/queueing gratuitous ARPs on eth0 for 172.16.62.252
Jun 27 19:05:42 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.248
Jun 27 19:05:42 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.249
Jun 27 19:05:42 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.250
Jun 27 19:05:42 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.251
Jun 27 19:05:42 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.252
Jun 27 19:05:42 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.248
Jun 27 19:05:42 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.249
Jun 27 19:05:42 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.250
Jun 27 19:05:42 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.251
Jun 27 19:05:42 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.252
Jun 27 19:05:42 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.248
Jun 27 19:05:42 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.249
Jun 27 19:05:42 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.250
Jun 27 19:05:42 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.251
Jun 27 19:05:42 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.252
Jun 27 19:05:42 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.248
Jun 27 19:05:42 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.249
Jun 27 19:05:42 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.250
Jun 27 19:05:42 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.251
Jun 27 19:05:42 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.252
Jun 27 19:05:44 node12 ntpd[715]: Listen normally on 30 eth0 172.16.62.248 UDP 123
Jun 27 19:05:44 node12 ntpd[715]: Listen normally on 31 eth0 172.16.62.249 UDP 123
Jun 27 19:05:44 node12 ntpd[715]: Listen normally on 32 eth0 172.16.62.250 UDP 123
Jun 27 19:05:44 node12 ntpd[715]: Listen normally on 33 eth0 172.16.62.251 UDP 123
Jun 27 19:05:44 node12 ntpd[715]: Listen normally on 34 eth0 172.16.62.252 UDP 123
Jun 27 19:05:47 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.248
Jun 27 19:05:47 node12 Keepalived_vrrp[2694]: VRRP_Instance(VI_200) Sending/queueing gratuitous ARPs on eth0 for 172.16.62.248
Jun 27 19:05:47 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.249
Jun 27 19:05:47 node12 Keepalived_vrrp[2694]: VRRP_Instance(VI_200) Sending/queueing gratuitous ARPs on eth0 for 172.16.62.249
Jun 27 19:05:47 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.250
Jun 27 19:05:47 node12 Keepalived_vrrp[2694]: VRRP_Instance(VI_200) Sending/queueing gratuitous ARPs on eth0 for 172.16.62.250
Jun 27 19:05:47 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.251
Jun 27 19:05:47 node12 Keepalived_vrrp[2694]: VRRP_Instance(VI_200) Sending/queueing gratuitous ARPs on eth0 for 172.16.62.251
Jun 27 19:05:47 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.252
Jun 27 19:05:47 node12 Keepalived_vrrp[2694]: VRRP_Instance(VI_200) Sending/queueing gratuitous ARPs on eth0 for 172.16.62.252
Jun 27 19:05:47 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.248
Jun 27 19:05:47 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.249
Jun 27 19:05:47 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.250
Jun 27 19:05:47 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.251
Jun 27 19:05:47 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.252
Jun 27 19:05:47 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.248
Jun 27 19:05:47 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.249
Jun 27 19:05:47 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.250
Jun 27 19:05:47 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.251
Jun 27 19:05:47 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.252
Jun 27 19:05:47 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.248
Jun 27 19:05:47 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.249
Jun 27 19:05:47 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.250
Jun 27 19:05:47 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.251
Jun 27 19:05:47 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.252
Jun 27 19:05:47 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.248
Jun 27 19:05:47 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.249
Jun 27 19:05:47 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.250
Jun 27 19:05:47 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.251
Jun 27 19:05:47 node12 Keepalived_vrrp[2694]: Sending gratuitous ARP on eth0 for 172.16.62.252

# 查看IPVSADM
[root@node12 keepalived]# ipvsadm -Ln --stats
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port               Conns   InPkts  OutPkts  InBytes OutBytes
  -> RemoteAddress:Port
TCP  172.16.62.248:80                    4       26        0     1704        0
  -> 172.16.62.15:80                     2       12        0      800        0
  -> 172.16.62.16:80                     2       14        0      904        0

#网站测试
172.16.62.248 做个DNS解析
[root@node24 named]# curl www.haostack.com
mobile mobile.haostack.com 172.16.62.16
[root@node24 named]# curl www.haostack.com
mobile mobile.haostack.com 172.16.62.15
[root@node24 named]# curl www.haostack.com
mobile mobile.haostack.com 172.16.62.16
[root@node24 named]# curl www.haostack.com
mobile mobile.haostack.com 172.16.62.15
[root@node24 named]# 

4.2 测试,基于http检测

  • 在web15和web16上创建测试页面
[root@web15 monitor-page]# curl 172.16.62.15/monitor-page/index.html
web15 172.16.62.15
[root@web15 monitor-page]# curl 172.16.62.16/monitor-page/index.html
web16 172.16.62.16
[root@web15 monitor-page]#

  • 配置http检测
real_server 172.16.62.15 80 {  #后端真实服务器
        weight 1   #权重
         HTTP_GET {   #健康检查方式
            url {
            path /monitor-page/index.html
            status_code 200
        }
            connect_timeout 3   #定义连接超时时长
            nb_get_retry 3   ##重试次数
            delay_before_retry 3 #每次重试的间隔时间
            #connect_port 80     #监测的端口
        }
    }
   real_server 172.16.62.16 80 {  #后端真实服务器
        weight 1   #权重 
         HTTP_GET {   #健康检查方式
            url {
            path /monitor-page/index.html
            status_code 200
        }
            connect_timeout 3   #定义连接超时时长
            nb_get_retry 3   ##重试次数
            delay_before_retry 3 #每次重试的间隔时间
            #connect_port 80     #监测的端口
        }
    }



#测试
[root@node24 named]# while true;do curl http://www.haostack.com/monitor-page/index.html  && sleep 1;done
web16 172.16.62.16
web15 172.16.62.15
web16 172.16.62.16
web15 172.16.62.15
web16 172.16.62.16
web15 172.16.62.15
#查看ipvsadm
[root@node12 keepalived]# ipvsadm -Ln --stats
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port               Conns   InPkts  OutPkts  InBytes OutBytes
  -> RemoteAddress:Port
TCP  172.16.62.248:80                   78      482        0    32106        0
  -> 172.16.62.15:80                    64      384        0    25689        0
  -> 172.16.62.16:80                    11       77        0     5041        0

4.3测试sorry server 172.16.62.14提供服务

  • 关闭后端web15和web16服务,经过一段时间检测后,无法探测到主机就下线了,172.16.62.14的sorry_server上线提供服务
    sorry server主机配置
#本地页面
[root@node14 tmp]# curl 172.16.62.14
Website maintenance, please contact 1380000001
[root@node14 tmp]# 

#绑定VIP
[root@node14 tmp]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet 172.16.62.248/32 brd 172.16.62.248 scope global lo:0
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
    link/ether 00:50:56:a0:e7:75 brd ff:ff:ff:ff:ff:ff
    inet 172.16.62.14/24 brd 172.16.62.255 scope global noprefixroute eth0
       valid_lft forever preferred_lft forever
3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default 
    link/ether 02:42:c9:59:c6:0a brd ff:ff:ff:ff:ff:ff
    inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
       valid_lft forever preferred_lft forever
[root@node14 tmp]# 


#关闭后端web nginx
[root@web15 tmp]# systemctl stop nginx
[root@web16 conf]# systemctl stop nginx

#查看master节点日志  不能链接到[172.16.62.15]:80
Jun 27 19:58:47 node12 Keepalived_vrrp[2694]: VRRP sockpool: [ifindex(2), proto(112), unicast(1), fd(10,11)]
Jun 27 19:58:49 node12 Keepalived_vrrp[2694]: VRRP_Instance(VI_200) Transition to MASTER STATE
Jun 27 20:01:01 node12 systemd: Started Session 7 of user root.
Jun 27 20:01:04 node12 Keepalived_healthcheckers[2693]: Error connecting server [172.16.62.15]:80.
Jun 27 20:01:07 node12 Keepalived_healthcheckers[2693]: Error connecting server [172.16.62.15]:80.
Jun 27 20:01:09 node12 Keepalived_healthcheckers[2693]: Error connecting server [172.16.62.16]:80.
Jun 27 20:01:10 node12 Keepalived_healthcheckers[2693]: Error connecting server [172.16.62.15]:80.
Jun 27 20:01:12 node12 Keepalived_healthcheckers[2693]: Error connecting server [172.16.62.16]:80.
Jun 27 20:01:13 node12 Keepalived_healthcheckers[2693]: Error connecting server [172.16.62.15]:80.
Jun 27 20:01:13 node12 Keepalived_healthcheckers[2693]: Check on service [172.16.62.15]:80 failed after 3 retry.
Jun 27 20:01:13 node12 Keepalived_healthcheckers[2693]: Removing service [172.16.62.15]:80 from VS [172.16.62.248]:80
Jun 27 20:01:13 node12 Keepalived_healthcheckers[2693]: SMTP connection ERROR to [127.0.0.1]:25.
Jun 27 20:01:15 node12 Keepalived_healthcheckers[2693]: Error connecting server [172.16.62.16]:80.
Jun 27 20:01:18 node12 Keepalived_healthcheckers[2693]: Error connecting server [172.16.62.16]:80.
Jun 27 20:01:18 node12 Keepalived_healthcheckers[2693]: Check on service [172.16.62.16]:80 failed after 3 retry.
Jun 27 20:01:18 node12 Keepalived_healthcheckers[2693]: Removing service [172.16.62.16]:80 from VS [172.16.62.248]:80
Jun 27 20:01:18 node12 Keepalived_healthcheckers[2693]: Lost quorum 1-0=1 > 0 for VS [172.16.62.248]:80
Jun 27 20:01:18 node12 Keepalived_healthcheckers[2693]: Adding sorry server [172.16.62.14]:80 to VS [172.16.62.248]:80
Jun 27 20:01:18 node12 Keepalived_healthcheckers[2693]: Removing alive servers from the pool for VS [172.16.62.248]:80
Jun 27 20:01:18 node12 Keepalived_healthcheckers[2693]: SMTP connection ERROR to [127.0.0.1]:25.

#查看backup节点日志  不能链接到[172.16.62.15]:80
Jun 27 20:01:03 node13 Keepalived_healthcheckers[2669]: Error connecting server [172.16.62.15]:80.
Jun 27 20:01:06 node13 Keepalived_healthcheckers[2669]: Error connecting server [172.16.62.15]:80.
Jun 27 20:01:09 node13 Keepalived_healthcheckers[2669]: Error connecting server [172.16.62.15]:80.
Jun 27 20:01:09 node13 Keepalived_healthcheckers[2669]: Error connecting server [172.16.62.16]:80.
Jun 27 20:01:12 node13 Keepalived_healthcheckers[2669]: Error connecting server [172.16.62.15]:80.
Jun 27 20:01:12 node13 Keepalived_healthcheckers[2669]: Check on service [172.16.62.15]:80 failed after 3 retry.
Jun 27 20:01:12 node13 Keepalived_healthcheckers[2669]: Removing service [172.16.62.15]:80 from VS [172.16.62.248]:80
Jun 27 20:01:12 node13 Keepalived_healthcheckers[2669]: SMTP connection ERROR to [127.0.0.1]:25.
Jun 27 20:01:12 node13 Keepalived_healthcheckers[2669]: Error connecting server [172.16.62.16]:80.
Jun 27 20:01:15 node13 Keepalived_healthcheckers[2669]: Error connecting server [172.16.62.16]:80.
Jun 27 20:01:18 node13 Keepalived_healthcheckers[2669]: Error connecting server [172.16.62.16]:80.
Jun 27 20:01:18 node13 Keepalived_healthcheckers[2669]: Check on service [172.16.62.16]:80 failed after 3 retry.
Jun 27 20:01:18 node13 Keepalived_healthcheckers[2669]: Removing service [172.16.62.16]:80 from VS [172.16.62.248]:80
Jun 27 20:01:18 node13 Keepalived_healthcheckers[2669]: Lost quorum 1-0=1 > 0 for VS [172.16.62.248]:80
Jun 27 20:01:18 node13 Keepalived_healthcheckers[2669]: Adding sorry server [172.16.62.14]:80 to VS [172.16.62.248]:80
Jun 27 20:01:18 node13 Keepalived_healthcheckers[2669]: Removing alive servers from the pool for VS [172.16.62.248]:80
Jun 27 20:01:18 node13 Keepalived_healthcheckers[2669]: SMTP connection ERROR to [127.0.0.1]:25.

#查看ipvsadm
[root@node12 keepalived]# ipvsadm -Ln --stats
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port               Conns   InPkts  OutPkts  InBytes OutBytes
  -> RemoteAddress:Port
TCP  172.16.62.248:80                   78      482        0    32106        0
  -> 172.16.62.14:80                     0        0        0        0        0

#页面测试
[root@node24 named]# while true;do curl http://www.haostack.com  && sleep 1;done
Website maintenance, please contact 1380000001
Website maintenance, please contact 1380000001
Website maintenance, please contact 1380000001
Website maintenance, please contact 1380000001


  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值