【LVS+DR+Nginx+keepalived】高可用方案

LVS+DR+Nginx+keepalived

参考资料:https://blog.csdn.net/qq_35403875/article/details/125259440

LVS的安装部署

系统必须有ip_vs模块。一般都是系统自带,如果没有可以参考下方的安装步骤。

#查看ip_vs模块
[root@node1 ~]# lsmod |grep -i ip_vs
ip_vs_rr               12600  1 
ip_vs                 145458  3 ip_vs_rr
nf_conntrack          139264  6 ip_vs,nf_nat,nf_nat_ipv4,xt_conntrack,nf_nat_masquerade_ipv4,nf_conntrack_ipv4
libcrc32c              12644  4 xfs,ip_vs,nf_nat,nf_conntrack
#如果没有,加载ip_vs
[root@node1 ~]# modprobe ip_vs

1、yum安装

#2、安装ipvadm
yum install -y ipvsadm

2、源码包安装

#https://mirrors.edge.kernel.org/pub/linux/utils/kernel/ipvsadm/选择与内核相同版本的软件
mkdir -p /root/LVS
cd /root/LVS
wget -c --no-check-certificate https://mirrors.edge.kernel.org/pub/linux/utils/kernel/ipvsadm/ipvsadm-1.30.tar.xz
ln -s /usr/src/kernels/3.10.0-1060.18.1.el7.x86_64/ /usr/src/linux 
tar -xvf ipvsadm-1.30.tar.xz
cd ipvsadm-1.30
make && make install
#如果编译报错,请安装依赖包
#yum install -y popt-static kernel-devel make gcc openssl-devel lftplibnl* popt* openssl-devel lftplibnl* popt* libnl* libpopt* gcc*

#yum install -y libnl* popt*

以下内容参考:https://blog.csdn.net/weixin_46108954/article/details/106955841

准备三台虚拟机

一台做LVS调度器,两台做后端服务器,四台最好,还有一台可做客户端

环境说明:

HOSTOShostnameGATEWAY
192.168.238.5centos7LVS192.168.238.2
192.168.238.3centos7r1192.168.238.5
192.168.238.4centos7r2192.168.238.5

此次测试使用NAT模式

3、LVS调度器(192.168.238.5)

新加一块网卡,选择桥接模式。也可以不用新增,直接从下方的LVS+DR+Nginx+keepalived开始,这里只是测试一下。

[root@localhost ipvsadm-1.30]# ifconfig
ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 192.168.238.5  netmask 255.255.255.0  broadcast 192.168.238.255
        inet6 fe80::4b6a:aefd:11c9:9419  prefixlen 64  scopeid 0x20<link>
        ether 00:0c:29:f7:96:27  txqueuelen 1000  (Ethernet)
        RX packets 104203  bytes 151621115 (144.5 MiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 26706  bytes 1983598 (1.8 MiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

ens36: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 10.18.110.213  netmask 255.255.254.0  broadcast 10.18.111.255
        inet6 fe80::b348:d12d:2562:c418  prefixlen 64  scopeid 0x20<link>
        ether 00:0c:29:f7:96:31  txqueuelen 1000  (Ethernet)
        RX packets 18161  bytes 1388242 (1.3 MiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 51  bytes 6283 (6.1 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
        inet 127.0.0.1  netmask 255.0.0.0
        inet6 ::1  prefixlen 128  scopeid 0x10<host>
        loop  txqueuelen 1000  (Local Loopback)
        RX packets 117  bytes 11322 (11.0 KiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 117  bytes 11322 (11.0 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

virbr0: flags=4099<UP,BROADCAST,MULTICAST>  mtu 1500
        inet 192.168.122.1  netmask 255.255.255.0  broadcast 192.168.122.255
        ether 52:54:00:2b:87:d9  txqueuelen 1000  (Ethernet)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

cp /etc/sysconfig/network-scripts/ifcfg-ens33 /etc/sysconfig/network-scripts/ifcfg-ens36
vim /etc/sysconfig/network-scripts/ifcfg-ens36
TYPE="Ethernet"
PROXY_METHOD="none"
BROWSER_ONLY="no"
BOOTPROTO="dhcp"
DEFROUTE="yes"
IPV4_FAILURE_FATAL="no"
IPV6INIT="yes"
IPV6_AUTOCONF="yes"
IPV6_DEFROUTE="yes"
IPV6_FAILURE_FATAL="no"
IPV6_ADDR_GEN_MODE="stable-privacy"
NAME="ens33"
UUID="6e76f3fb-08ff-4f95-b625-2b6b5410fd07"
DEVICE="ens33"
ONBOOT="yes"
systemctl restart network

安装ipvsadm(参考上方的LVS的安装部署)

# 开启路由转发、关闭ICMP重定向、防火墙规则
echo 1 > /proc/sys/net/ipv4/ip_forward
echo 0 > /proc/sys/net/ipv4/conf/all/send_redirects
echo 0 > /proc/sys/net/ipv4/conf/default/send_redirects
echo 0 > /proc/sys/net/ipv4/conf/ens33/send_redirects 
echo 0 > /proc/sys/net/ipv4/conf/ens36/send_redirects 
iptables -t nat -F
iptables -t nat -X
iptables -t nat -A POSTROUTING -s 192.168.56.0/24 -j MASQUERADE
# 配置ipvsadm
ipvsadm -C
ipvsadm -A -t 10.18.110.213:80 -s rr
ipvsadm -a -t 10.18.110.213:80 -r 192.168.238.3:80 -m -w 1
ipvsadm -a -t 10.18.110.213:80 -r 192.168.238.4:80 -m -w 1
# 查看
ipvsadm -L -n
[root@localhost ipvsadm-1.30]# ipvsadm -L -n
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  10.18.110.213:80 rr
  -> 192.168.238.3:80             Masq    1      0          0         
  -> 192.168.238.4:80             Masq    1      0          0         

参数说明:

  • -A:表示增加一个虚拟服务

  • -a:表示增加一个real server

  • -t:表示TCP服务

  • -r:指定real server的ip地址

  • -s:指定调度算法

  • -m:选择NAT方式调度(-i tun模式、-g dr模式)

  • -w:指定权重

4、r1,r2两个节点(192.168.238.3-4)

192.168.238.3

[root@localhost ipvsadm-1.30]# ifconfig
docker0: flags=4099<UP,BROADCAST,MULTICAST>  mtu 1500
        inet 172.17.0.1  netmask 255.255.0.0  broadcast 172.17.255.255
        inet6 fe80::42:29ff:fee4:3a41  prefixlen 64  scopeid 0x20<link>
        ether 02:42:29:e4:3a:41  txqueuelen 0  (Ethernet)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 19  bytes 2857 (2.7 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 192.168.238.3  netmask 255.255.255.0  broadcast 192.168.238.255
        inet6 fe80::71c0:759a:3abd:bfaa  prefixlen 64  scopeid 0x20<link>
        ether 00:0c:29:e3:91:b1  txqueuelen 1000  (Ethernet)
        RX packets 30048  bytes 36637305 (34.9 MiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 10632  bytes 1277035 (1.2 MiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
        inet 127.0.0.1  netmask 255.0.0.0
        inet6 ::1  prefixlen 128  scopeid 0x10<host>
        loop  txqueuelen 1000  (Local Loopback)
        RX packets 20  bytes 1552 (1.5 KiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 20  bytes 1552 (1.5 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

virbr0: flags=4099<UP,BROADCAST,MULTICAST>  mtu 1500
        inet 192.168.122.1  netmask 255.255.255.0  broadcast 192.168.122.255
        ether 52:54:00:00:f0:5c  txqueuelen 1000  (Ethernet)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

192.168.238.4

[root@localhost ipvsadm-1.30]# ifconfig
docker0: flags=4099<UP,BROADCAST,MULTICAST>  mtu 1500
        inet 172.17.0.1  netmask 255.255.0.0  broadcast 172.17.255.255
        ether 02:42:f4:4e:3f:df  txqueuelen 0  (Ethernet)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 192.168.238.4  netmask 255.255.255.0  broadcast 192.168.238.255
        inet6 fe80::1eb:4a80:e397:9c0a  prefixlen 64  scopeid 0x20<link>
        ether 00:0c:29:9b:8e:77  txqueuelen 1000  (Ethernet)
        RX packets 771412  bytes 1119931016 (1.0 GiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 352187  bytes 21774847 (20.7 MiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
        inet 127.0.0.1  netmask 255.0.0.0
        inet6 ::1  prefixlen 128  scopeid 0x10<host>
        loop  txqueuelen 1000  (Local Loopback)
        RX packets 38  bytes 3392 (3.3 KiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 38  bytes 3392 (3.3 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

virbr0: flags=4099<UP,BROADCAST,MULTICAST>  mtu 1500
        inet 192.168.122.1  netmask 255.255.255.0  broadcast 192.168.122.255
        ether 52:54:00:88:a6:c4  txqueuelen 1000  (Ethernet)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

r1,r2两个节点都操作一遍,<h>hello 192.168.238.x<h>中的内容不能一样

yum install -y httpd
systemctl start httpd
vim /var/www/html/index.html

cat /var/www/html/index.html
<h>hello 192.168.238.3<h>

vim /etc/sysconfig/network-scripts/ifcfg-ens33
# 将GATEWAY改为LVS调度器节点的ens33的inet地址
GATEWAY="192.168.238.5"

在LVS调度器上或r1,r2两个节点上测试,也可以打开网页测试:

[root@localhost ipvsadm-1.30]# curl 10.18.110.213
<h>hello 192.168.238.3~<h> 
[root@localhost ipvsadm-1.30]# curl 10.18.110.213
<h>hello 192.168.238.4~<h>
[root@localhost ipvsadm-1.30]# curl 10.18.110.213
<h>hello 192.168.238.3~<h> 
[root@localhost ipvsadm-1.30]# curl 10.18.110.213
<h>hello 192.168.238.4~<h>
# #动态查看访问记录
[root@localhost ipvsadm-1.30]# watch -n 1 ipvsadm -L -n

DR模式

有三种转发规则:

  1. NAT:简单理解,就是数据进出都通过 LVS,性能不是很好。
  2. TUNL:简单理解:隧道
  3. DR:最高效的负载均衡规则

​ 客户端向目标vip发出请求,lvs接收 ,LVS根据负载均衡算法选择一台活跃的的节点,将此节点的ip所在网卡的mac地址作为目标mac地址,发送到局域网里。

DR模式小结:

  1. 通过在调度器DB上修改数据包的目的MAC地址实现转发。注意源地址仍然是CIP(客户端),目的地址仍然是VIP地址。
  2. 请求的报文经过调度器,而RS响应处理后的报文无需经过调度器DB,因此并发访问量大时使用效率很高(和NAT模式比)
  3. 因为DR模式是通过MAC地址改写机制实现转发,因此所有RS节点和调度器DB只能在一个局域网里面
  4. RS主机需要绑定VIP地址在LO接口上,并且需要配置ARP抑制。
  5. RS节点的默认网关不需要配置成DB,而是直接配置为上级路由的网关,能让RS直接出网就可以。
  6. 由于DR模式的调度器仅做MAC地址的改写,所以调度器DB就不能改写目标端口,那么RS服务器就得使用和VIP相同的端口提供服务。

优缺点
通过直接路由实现虚拟服务器。DR模式是通过改写请求报文的目标MAC地址,将请求发给真实服务器的,而真实服务器响应后的处理结果直接返回给客户端用户。DR模式可以极大的提高集群系统的伸缩性,而且DR模式没有IP隧道的开销,对集群中的真实服务器也没有必要必须支持IP隧道协议的要求。但是要求调度器与真实服务器都有一块网卡连接到同一物理网段上,必须在同一个局域网环境。

原理
LVS通过控制IP来实现负载均衡。ipvsadm是其具体的实现模块。安装在调度器上面,在调度器上虚拟一个对外访问的IP(VIP)。用户访问VIP,到达调度器,调度器根据一定的规则选择一个真实服务器,处理完成后然后返回给客户端数据。

1、LVS调度器节点

(192.168.238.5)

# 清空ipvs设置
ipvsadm -C
# 关闭桥接的那块网卡	如没有则跳过
ifdown ens36

ifconfig ens33:1 192.168.238.5/24 up

ifconfig ens33:1
# 显示如下
ens33:1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 192.168.238.5  netmask 255.255.255.0  broadcast 192.168.238.255
        ether 00:0c:29:f7:96:27  txqueuelen 1000  (Ethernet)

ipvsadm -A -t 192.168.238.5:80 -s rr
ipvsadm -a -t 192.168.238.5:80 -r 192.168.238.3 -g -w 1
ipvsadm -a -t 192.168.238.5:80 -r 192.168.238.4 -g -w 1
ipvsadm -ln

# 显示如下
[root@localhost ~]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  192.168.238.5:80 rr
  -> 192.168.238.3:80             Route   1      0          0         
  -> 192.168.238.4:80             Route   1      0          0         

2、r1,r2两个节点

(192.168.238.3-4)

vim /etc/sysconfig/network-scripts/ifcfg-ens33
# 将网关改回来GATEWAY=192.168.238.2	如没有新增桥接网卡则不需要
systemctl restart network
ifconfig lo:1 192.168.238.5/32 up
ifconfig lo:1
echo 1 > /proc/sys/net/ipv4/conf/lo/arp_ignore
echo 2 > /proc/sys/net/ipv4/conf/lo/arp_announce 
echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore
echo 2 > /proc/sys/net/ipv4/conf/all/arp_announce

LVS+DR+Nginx+keepalived

​ LVS的DR能够实现负载均衡,但是没有健康检测功能,即检测策略对应的后端服务是否正常,因此我们需要安装keepalived。其采用了vrrp虚拟路由冗余协议,keepalived利用这一协议实现了高可用。这样就可以实现多个调度器,多个后台服务器。

1、在r1,r2两个节点安装nginx

yum install -y gcc-c++pcre pcre-develzlib zlib-developenssl openssl-devel
wget http://nginx.org/download/nginx-1.21.0.tar.gz
tar -zxvf nginx-1.21.0.tar.gz
cd nginx-1.21.0
./configure --prefix=/usr/local/nginx
make && make install
# 启动
/usr/local/nginx/sbin/nginx

# r1,r2每个节点都执行	<h1>标签中内容不一样
echo "<h1>welcome to use RS2 192.168.238.3</h1>" > /usr/local/nginx/html/index.html
# 重启nginx
/usr/local/nginx/sbin/nginx -s reload

# r1,r2两个节点测试,如果前面没有配置lo:1可跳过,直接从下面编辑脚本开始
[root@rs1 ~]# curl 192.168.238.5
<h1>welcome to use RS1 192.168.238.3</h1>
[root@rs2 ~]# curl 192.168.238.5
<h1>welcome to use RS2 192.168.238.4</h1>

# 测试完记得清除lo:1
ifconfig lo:1 down
echo "0" >/proc/sys/net/ipv4/conf/lo/arp_ignore
echo "0" >/proc/sys/net/ipv4/conf/lo/arp_announce
echo "0" >/proc/sys/net/ipv4/conf/all/arp_ignore
echo "0" >/proc/sys/net/ipv4/conf/all/arp_announce
sysctl -p

2、在r1,r2两个节点上编辑realserver脚本并执行

此处贴r1脚本详情:

[root@rs1 ~]# vim /etc/init.d/realserver
#!/bin/bash
SNS_VIP=192.168.238.88
/etc/init.d/functions
case "$1" in
start)
       ifconfig lo:1 $SNS_VIP netmask 255.255.255.255 broadcast $SNS_VIP up
       /sbin/route add -host $SNS_VIP dev lo:1
       echo "1" >/proc/sys/net/ipv4/conf/lo/arp_ignore
       echo "2" >/proc/sys/net/ipv4/conf/lo/arp_announce
       echo "1" >/proc/sys/net/ipv4/conf/all/arp_ignore
       echo "2" >/proc/sys/net/ipv4/conf/all/arp_announce
       sysctl -p >/dev/null 2>&1
       echo "RealServer Start OK"
       ;;
stop)
       ifconfig lo:1 down
       route del $SNS_VIP >/dev/null 2>&1
       echo "0" >/proc/sys/net/ipv4/conf/lo/arp_ignore
       echo "0" >/proc/sys/net/ipv4/conf/lo/arp_announce
       echo "0" >/proc/sys/net/ipv4/conf/all/arp_ignore
       echo "0" >/proc/sys/net/ipv4/conf/all/arp_announce
       echo "RealServer Stoped"
       ;;
*)
       echo "Usage: $0 {start|stop}"
       exit 1
esac
exit 0

[root@rs1 ~]# chmod +x /etc/init.d/realserver 
[root@rs1 ~]# chmod +x /etc/init.d/functions 
[root@rs1 ~]# /etc/init.d/realserver start
RealServer Start OK

3、在LVS节点上

yum install -y keepalived

[root@lvs ~]# cp /etc/keepalived/keepalived.conf{,.bak}  #备份源文件
[root@lvs ~]# > /etc/keepalived/keepalived.conf       #清空源文件
[root@lvs ~]# vim /etc/keepalived/keepalived.conf      #编辑keepalived.conf

# 配置文件详解
! Configuration File for keepalived

global_defs {
   notification_email {
    852221040@qq.com
   }
   notification_email_from Alexandre.Cassen@firewall.loc
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   router_id lvs
}
vrrp_instance VI_1 {
    state MASTER  #指定Keepalived的角色,MASTER为主,BACKUP为备
    interface ens33
    virtual_router_id 55  #虚拟路由id号,主备需要一直
    priority 150  #优先级,数值越大,优先级越高
    advert_int 1  #检测间隔,默认为1s
    authentication {
        auth_type PASS  #认证类型
        auth_pass 1111  #认证密码
    }
    virtual_ipaddress {
        192.168.238.5/24   #设置DR的虚拟ip,可以多设,一行一个
    }
}

virtual_server 192.168.238.5 80 {  #定义LVS对外提供服务的VIP为192.168.238.5和port为80
    delay_loop 6  #设置健康检查时间,单位为秒
    lb_algo wrr   #设置负载均衡调度算法为wrr
    lb_kind DR    #设置LVS实现负载均衡机制为DR模式
    nat_mask 255.255.255.0  
    persistence_timeout 20  #会话保持超时配置
    protocol TCP  #使用TCP协议检查realserver的状态
    
    real_server 192.168.238.3 80 {  #配置真实服务器节点和端口
		weight 100  #权重
        TCP_CHECK {
			connect_timeout 10  #连接超时,单位为秒
			nb_get_retry 3    #重试连接次数
			connect_port 80     #连接端口
        }
    }
    
    real_server 192.168.238.4 80 {
		weight 100
		TCP_CHECK {
			connect_timeout 10
			nb_get_retry 3
			connect_port 80
    	}
    }
}

配置文件中尽量不要有注释及多余的空格,可能会报错!!!如果配置了从lvs,从负载均衡服务器与主负载服务器大致相同,只是在keepalived的配置文件中需要改以下两处:

  1. 将state由MASTER改为BACKUP
  2. 将priority由150改为120
  3. 将router_id由lvs改为lvsbackup

主lvskeepalived.conf

!Configuration File for keepalived

vrrp_instance VI_1 {
    state MASTER
    interface ens33
    virtual_router_id 51
    priority 150
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        192.168.238.88
    }
}

virtual_server 192.168.238.88 80 {
    delay_loop 15
    lb_algo rr 
    lb_kind DR
    persistence_timeout 50
    protocol TCP
    real_server 192.168.238.3 80 {
	weight 1
      TCP_CHECK {
		connect_timeout 10
		retry 3
		delay_before_retry 4
        }
    }
    real_server 192.168.238.4 80 {
	weight 1
	TCP_CHECK {
		connect_timeout 10
		retry 3
		delay_before_retry 4
    	}
    }
}

备lvskeepalived.conf

!Configuration File for keepalived

vrrp_instance VI_1 {
    state BACKUP
    interface ens33
    virtual_router_id 51
    priority 120
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        192.168.238.88
    }
}

virtual_server 192.168.238.88 80 {
    delay_loop 15
    lb_algo rr 
    lb_kind DR
    persistence_timeout 50
    protocol TCP
    real_server 192.168.238.3 80 {
	weight 1
      TCP_CHECK {
		connect_timeout 10
		retry 3
		delay_before_retry 4
        }
    }
    real_server 192.168.238.4 80 {
	weight 1
	TCP_CHECK {
		connect_timeout 10
		retry 3
		delay_before_retry 4
    	}
    }
}

配置完成后,启动keepalived并访问VIP:

systemctl start keepalived

# 主备lvs上面运行这条命令应该都有
[root@lvs ~]# ipvsadm -L -n
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  192.168.238.88:80 rr
  -> 192.168.238.3:80             Route   1      0          0         
  -> 192.168.238.4:80             Route   1      0          0 
  
# 查看主lvs上是否存在VIP
[root@lvs1 ~]# ip addr |grep 192.168.238.88	
    inet 192.168.238.88/24 brd 192.168.238.255 scope global secondary ens33:1
# 备lvs执行这条命令是没有的
[root@lvs2 ~]# ip addr |grep 192.168.238.88	
[root@lvs2 ~]#

# 访问VIP测试
[root@r1 ~]# curl 192.168.238.88
<h1>welcome to use RS1 192.168.238.3</h1>
[root@r2 ~]# curl 192.168.238.88
<h1>welcome to use RS1 192.168.238.4</h1>

测试双机热备功能:

# 停掉主lvs的keepalived,看看备lvs是否接管
[root@lvs1 ~]# systemctl stop keepalived
[root@lvs1 ~]# ip addr |grep 192.168.238.88
[root@lvs1 ~]#

[root@lvs2 ~]# ip addr |grep 192.168.238.88	
    inet 192.168.238.88/24 brd 192.168.238.255 scope global secondary ens33:1

# 访问VIP测试
[root@r1 ~]# curl 192.168.238.88
<h1>welcome to use RS1 192.168.238.3</h1>
[root@r2 ~]# curl 192.168.238.88
<h1>welcome to use RS1 192.168.238.4</h1>

# 也可以测试一下停掉r1或r2上的nginx,访问192.168.238.88看能否访问另外一个页面,这里就不演示了
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

Yohann*

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值