本次实验的拓扑图:
实验之前 所有的机子都 同步 时间 关闭防火前 关闭 selinux
lvs1和 lvs2 安装keepalived
yum install keepalived
lvs1的操作:
[root@proxy1 ~]# vi /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
root@localhost
}#keepalive服务服务信息通知邮箱,可以为多个
notification_email_from keepalived@localhost #发送信息的时候的发送邮箱
smtp_server 127.0.0.1 #smtp服务器
smtp_connect_timeout 30 #连接smtp超时时间
router_id proxy1 #keepalive服务器的ID标识,注意每个服务
vrrp_mcast_group4 224.1.1.1
}
vrrp_instance VI_1 { # 定义vrrp组,VI_1代表第一个组
state MASTER #角色,master|backup
interface ens33 #绑定网卡
virtual_router_id 66 #VRID标记(1-244)
priority 100 #优先级
advert_int 1 #检查时间间隔,默认为1s
authentication {
auth_type PASS #认证机制,PASS代表字符串
auth_pass 123456 #字符串
}
virtual_ipaddress { #指定漂移地址,此处既VIP,可以指定多个
192.168.137.66/24
}
}
virtual_server 192.168.137.66 80 { #设置VIP port
delay_loop 6 #多少秒检查一次real server的健康状态
lb_algo rr #调度算法
lb_kind DR #lvs模型
#persistence_timeout 50 #长连接时间
protocol TCP #协议类型
sorry_server 127.0.0.1 80
real_server 192.168.137.162 80 { #定义real server
weight 1 #定义权重
HTTP_GET {
url {
path /
status_code 200
}
connect_timeout 3 #连接超时时间
nb_get_retry 3 #重试次数
delay_before_retry 3 #重试连接时间间隔
}
}
real_server 192.168.137.163 80 {
weight 1
HTTP_GET {
url {
path /
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}
lvs2的操作:
[root@proxy2 ~]# vi /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
root@localhost
}
notification_email_from keepalived@localhost
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id proxy2
vrrp_mcast_group4 224.1.1.1
}
vrrp_instance VI_1 {
state BACKUP
interface ens33
virtual_router_id 66
priority 80
advert_int 1
authentication {
auth_type PASS
auth_pass 123456
}
virtual_ipaddress {
192.168.137.66/24
}
}
virtual_server 192.168.137.66 80 {
delay_loop 6
lb_algo rr
lb_kind DR
#persistence_timeout 50
protocol TCP
sorry_server 127.0.0.1 80
real_server 192.168.137.162 80 {
weight 1
HTTP_GET {
url {
path /
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 192.168.137.163 80 {
weight 1
HTTP_GET {
url {
path /
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}
接下来重启 lvs1 和 lvs2 的keepalived 服务
[root@proxy1 ~]# systemctl restart keepalived
[root@proxy2 ~]# systemctl restart keepalived
[root@proxy1 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fa st state UP qlen 1000
link/ether 00:0c:29:11:c7:a7 brd ff:ff:ff:ff:ff:ff
inet 192.168.137.160/24 brd 192.168.137.255 scope global ens33
valid_lft forever preferred_lft forever
inet 192.168.137.66/24 scope global secondary ens33
valid_lft forever preferred_lft forever
inet6 fe80::1b2:ee8f:644e:fb16/64 scope link
valid_lft forever preferred_lft forever
可以看到 ens33 网卡上已经多了一个 ip地址了
我们再查看下 lvs2 的
[root@proxy2 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 00:0c:29:79:9e:98 brd ff:ff:ff:ff:ff:ff
inet 192.168.137.161/24 brd 192.168.137.255 scope global ens33
valid_lft forever preferred_lft forever
inet6 fe80::8b47:ff57:3278:b565/64 scope link
valid_lft forever preferred_lft forever
inet6 fe80::1b2:ee8f:644e:fb16/64 scope link tentative dadfailed
valid_lft forever preferred_lft forever
这儿 没有 多一个地址 ,那是因为 lvs1 配置的优先级 高,当 lvs1 出现问题时候 ,地址会自动给 lvs2
我们 把 lvs1 的 的keepalive服务关闭 来看看地址的变化。
[root@proxy1 ~]# systemctl stop keepalived
[root@proxy1 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 00:0c:29:11:c7:a7 brd ff:ff:ff:ff:ff:ff
inet 192.168.137.160/24 brd 192.168.137.255 scope global ens33
valid_lft forever preferred_lft forever
inet6 fe80::1b2:ee8f:644e:fb16/64 scope link
valid_lft forever preferred_lft forever
[root@proxy1 ~]#
[root@proxy2 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 00:0c:29:79:9e:98 brd ff:ff:ff:ff:ff:ff
inet 192.168.137.161/24 brd 192.168.137.255 scope global ens33
valid_lft forever preferred_lft forever
inet 192.168.137.66/24 scope global secondary ens33
valid_lft forever preferred_lft forever
inet6 fe80::8b47:ff57:3278:b565/64 scope link
valid_lft forever preferred_lft forever
inet6 fe80::1b2:ee8f:644e:fb16/64 scope link tentative dadfailed
valid_lft forever preferred_lft forever
这时候可以看到 地址跑 lvs2 来了。
接下来我们 给 rs1 和rs2 安装 apache ,
yum install httpd
[root@rs1 ~]# echo "rs1" >> /var/www/html/index.html
[root@rs2 ~]# echo "rs2" >> /var/www/html/index.html
然后 启动服务
systemctl start httpd
注意由于是基于LVS的DR模型,我们需要设置本机VIP不能响应外部主机的arp请求
配置RIP和设置arp不能被外部主机解析此处使用脚本进行设置,脚本如下:
vip=192.168.137.66
mask='255.255.255.255'
dev=lo:1
case $1 in
start)
echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore
echo 1 > /proc/sys/net/ipv4/conf/lo/arp_ignore
echo 2 > /proc/sys/net/ipv4/conf/all/arp_announce
echo 2 > /proc/sys/net/ipv4/conf/lo/arp_announce
ifconfig $dev $vip netmask $mask #broadcast $vip up
#route add -host $vip dev $dev
echo "The RS Server is Ready!"
;;
stop)
ifconfig $dev down
echo 0 > /proc/sys/net/ipv4/conf/all/arp_ignore
echo 0 > /proc/sys/net/ipv4/conf/lo/arp_ignore
echo 0 > /proc/sys/net/ipv4/conf/all/arp_announce
echo 0 > /proc/sys/net/ipv4/conf/lo/arp_announce
echo "The RS Server is Canceled!"
;;
*)
echo "Usage: $(basename $0) start|stop"
exit 1
;;
esac
iptables -F && setenforce 0 && systemctl stop firewalld
rs1 和 rs 2 都执行 这个脚本
下来 我们 用 ipvsadm 查看下 lvs 的规则,这个命令需要安装:
[root@proxy1 ~]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 192.168.137.66:80 rr
-> 192.168.137.162:80 Route 1 0 0
-> 192.168.137.163:80 Route 1 0 0
[root@proxy2 ~]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 192.168.137.66:80 rr
-> 192.168.137.162:80 Route 1 0 0
-> 192.168.137.163:80 Route 1 0 0
这是 所有的 就 配置完了 下来 我们 用 client 这台机子 测试一下
[root@client ~]# curl 192.168.137.66
rs2
[root@client ~]# curl 192.168.137.66
rs1
[root@client ~]# curl 192.168.137.66
rs2
[root@client ~]# curl 192.168.137.66
rs1
[root@client ~]# curl 192.168.137.66
rs2
[root@client ~]# curl 192.168.137.66
rs1
[root@client ~]# curl 192.168.137.66
rs2
ok 这样测试 没问题 ,接下来 停止rs1 的apache服务看看 keepalived 是否 检查 rs 服务器 宕机
[root@rs1 ~]# systemctl stop httpd
查看下 lvs1的 调度规则
[root@proxy1 ~]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 192.168.137.66:80 rr
-> 192.168.137.163:80 Route 1 0 8
可以看出 192.168.137.162 已经没有在 调度规则里面了
这也就实现了 健康险检查功能
体会:
LVS是一个可以工作在网络第四层的负载均衡软件,因此它相对于Nginx一类工作在第七层的负载均衡软件有着无可比拟的性能优势,LVS有四种负载均衡方式,本次只是基于其中一种(DR负载均衡模式)