keepalived双主配置
既然说双主,那么也就是要有两个对外提供服务的VIP了,所以双主模式也可以理解为两个单主共用同一批后端RS。因此,我们要新增对另一个VIP的定义,包括vrrp实例和后端实例。
环境准备
机器 | 作用 |
---|---|
192.168.0.100 | LVS调度器对外服务IP,VIP1 |
192.168.0.110 | LVS调度器对外服务IP,VIP2 |
192.168.0.200 | RIP,后端web服务器之一 |
192.168.0.300 | RIP,后端web服务器之二 |
相比于上篇主备模式,我们新增一个VIP,192.168.0.110。
配置
配置两台LVS
VIP1上keepalived配置如下:
! Configuration File for keepalived
global_defs {
router_id LVS_1
}
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.0.100
}
}
virtual_server 192.168.0.100 80 {
delay_loop 6
lb_algo rr
lb_kind DR
persistence_timeout 0
protocol TCP
real_server 192.168.0.200 80 {
weight 1
TCP_CHECK {
connect_timeout 1
nb_get_retry 1
delay_before_retry 1
connect_port 80
}
}
real_server 192.168.0.300 80 {
weight 1
TCP_CHECK {
connect_timeout 3
nb_get_retry 2
delay_before_retry 3
connect_port 80
}
}
}
vrrp_instance VI_2 { #这里开始是VIP2的相关配置,其实和VIP1大同小异
state BACKUP #VIP1的主机是VIP2的备机
interface eth0
virtual_router_id 52 #区分不同VIP服务,因此不能和上面一个实例一样
priority 90 #备机优先级设置低一点
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress { #新增的VIP2
192.168.0.110
}
}
virtual_server 192.168.0.110 80 { #设置VIP2的后端机器,机器是VIP1一模一样,直接拷贝就可以了
delay_loop 6
lb_algo rr
lb_kind DR
persistence_timeout 0
protocol TCP
real_server 192.168.0.200 80 { #同样的后端
weight 1
TCP_CHECK {
connect_timeout 1
nb_get_retry 1
delay_before_retry 1
connect_port 80
}
}
real_server 192.168.0.300 80 { #同样的后端
weight 1
TCP_CHECK {
connect_timeout 1
nb_get_retry 1
delay_before_retry 1
connect_port 80
}
}
}
VIP2上keepalived配置如下,和上面的配置只有四处不同,
! Configuration File for keepalived
global_defs {
router_id LVS_1
}
vrrp_instance VI_1 {
state BACKUP #第一处,VIP1此时是备机
interface eth0
virtual_router_id 51
priority 90 #第二处,备机优先级要比主机低
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.0.100
}
}
virtual_server 192.168.0.100 80 {
delay_loop 6
lb_algo rr
lb_kind DR
persistence_timeout 0
protocol TCP
real_server 192.168.0.200 80 {
weight 1
TCP_CHECK {
connect_timeout 1
nb_get_retry 1
delay_before_retry 1
connect_port 80
}
}
real_server 192.168.0.300 80 {
weight 1
TCP_CHECK {
connect_timeout 3
nb_get_retry 2
delay_before_retry 3
connect_port 80
}
}
}
vrrp_instance VI_2 {
state MASTER #第三处,VIP2此时是主机
interface eth0
virtual_router_id 52
priority 100 #第4处,主机优先级要高于备机
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.0.110
}
}
virtual_server 192.168.0.110 80 {
delay_loop 6
lb_algo rr
lb_kind DR
persistence_timeout 0
protocol TCP
real_server 192.168.0.200 80 {
weight 1
TCP_CHECK {
connect_timeout 1
nb_get_retry 1
delay_before_retry 1
connect_port 80
}
}
real_server 192.168.0.300 80 {
weight 1
TCP_CHECK {
connect_timeout 1
nb_get_retry 1
delay_before_retry 1
connect_port 80
}
}
}
启动两台LVS上的keepalived,可以查到到如下两个VIP的负载均衡配置,
[root@CentOS-7-2 ~]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 192.168.0.110:80 rr
-> 192.168.0.200:80 Route 1 0 0
-> 192.168.0.300:80 Route 1 0 0
TCP 192.168.0.100:80 rr
-> 192.168.0.200:80 Route 1 0 0
-> 192.168.0.300:80 Route 1 0 0
eth0上也只有VIP1的地址,192.168.0.100
[root@CentOS-7-2 ~]# ip a show eth0
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 00:0c:29:2e:74:89 brd ff:ff:ff:ff:ff:ff
inet 192.168.0.140/24 brd 192.168.0.255 scope global eth0
valid_lft forever preferred_lft forever
inet 192.168.0.100/32 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:fe2e:7489/64 scope link
valid_lft forever preferred_lft forever
配置两台RS
两台后端RS上,自然也就要绑定两个VIP了。我们分别绑定在lo:0和lo:1上,
ifconfig lo:0 192.168.0.100/32
ifconfig lo:1 192.168.0.110/32
route add -host 192.168.0.100 dev lo
route add -host 192.168.0.110 dev lo
echo 1 > /proc/sys/net/ipv4/conf/eth0/arp_ignore
echo 2 > /proc/sys/net/ipv4/conf/eth0/arp_announce
echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore
echo 2 > /proc/sys/net/ipv4/conf/all/arp_announce
nginx的相关配置和之前一样,就不再陈述,确保服务开启即可。
测试
首先确保两个VIP都能正常工作,
[root@CentOS-7-4 ~]# curl http://192.168.0.100/
This is 192.168.0.300
[root@CentOS-7-4 ~]# curl http://192.168.0.100/
This is 192.168.0.200
[root@CentOS-7-4 ~]# curl http://192.168.0.100/
This is 192.168.0.300
[root@CentOS-7-4 ~]# curl http://192.168.0.100/
This is 192.168.0.200
[root@CentOS-7-4 ~]# curl http://192.168.0.110/
This is 192.168.0.200
[root@CentOS-7-4 ~]# curl http://192.168.0.110/
This is 192.168.0.300
[root@CentOS-7-4 ~]# curl http://192.168.0.110/
This is 192.168.0.200
[root@CentOS-7-4 ~]# curl http://192.168.0.110/
This is 192.168.0.300
然后停掉VIP2上LVS的keepalived服务,再次测试,服务依然正常,不过所有流量都发往VIP1。
此时,VIP1机器上能查看到两个VIP,
[root@CentOS-7-2 ~]# ip a show eth0
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 00:0c:29:2e:74:89 brd ff:ff:ff:ff:ff:ff
inet 192.168.0.140/24 brd 192.168.0.255 scope global eth0
valid_lft forever preferred_lft forever
inet 192.168.0.100/32 scope global eth0
valid_lft forever preferred_lft forever
inet 192.168.0.110/32 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:fe2e:7489/64 scope link
valid_lft forever preferred_lft forever
即,另一台LVS的IP漂移过来。