测试环境
主机名 | 系统 | IP地址 | 网关 | 作用 |
---|---|---|---|---|
vmhost | rhel7.5 | 192.168.12.7/24 | 真机,提供虚拟机node1-4进行实践练习环境,同时进行测试 添加192.168.12.1(临时地址,作为通信网关使用) | |
node1 | rhel7.5 | 192.168.27.11/24(eth0) | 192.168.12.1 | 虚拟机node1,安装ipvadm软件,查看keepalived配置情况 VIP(192.168.12.11/24)地址由keepalived控制添加,不需要配置 |
node2 | rhel7.5 | 192.168.27.12/24(eth0) | 192.168.12.1 | 虚拟机node2,安装httpd服务,模拟后端服务器 添加IP隧道模块,将VIP(192.168.12.11/32)地址填在tunl0中 |
node3 | rhel7.5 | 192.168.27.13/24(eth0) | 192.168.12.1 | 虚拟机ndoe3,安装httpd服务,模拟后端服务器 添加IP隧道模块,将VIP(192.168.12.11/32)地址填在tunl0中 |
node4 | rhel7.5 | 192.168.27.14/24(eth0) | 192.168.12.1 | 虚拟机ndoe4,作为LVS调度服务器的备用服务器,与node1实现高可用 VIP(192.168.12.11/24)地址由keepalived控制添加,不需要配置 |
软件
源码包:keepalived-2.0.19.tar.gz
编译依赖软件:gcc、openssl-devel
操作步骤
- 准备环节:node1、node4安装ipvsadm、gcc、openssl-devel
[root@node1 ~]# yum install -y ipvsadm gcc openssl-devel
[root@node4 ~]# yum install -y ipvsadm gcc openssl-devel
- 准备环节:node2、node3安装httpd,配置index.html文件,添加ipip模块,配置VIP地址,关闭内核rp_filter相关功能
[root@node2 ~]# yum install -y httpd
[root@node2 ~]# echo node2 > /var/www/html/index.html
[root@node2 ~]# modprobe ipip
[root@node2 ~]# ip addr add 192.168.12.11/32 dev tunl0
[root@node2 ~]# ip link set up tunl0
[root@node2 ~]# sysctl -a | grep rp_filter
[root@node2 ~]# sysctl -w net.ipv4.conf.all.rp_filter=0
[root@node2 ~]# sysctl -w net.ipv4.conf.default.rp_filter=0
[root@node2 ~]# sysctl -w net.ipv4.conf.eth0.rp_filter=0
[root@node2 ~]# sysctl -w net.ipv4.conf.tunl0.rp_filter=0
[root@node2 ~]# sysctl -p
[root@node3 ~]# yum install -y httpd
[root@node3 ~]# echo node2 > /var/www/html/index.html
[root@node3 ~]# modprobe ipip
[root@node3 ~]# ip addr add 192.168.12.11/32 dev tunl0
[root@node3 ~]# ip link set up tunl0
[root@node3 ~]# sysctl -a | grep rp_filter
[root@node3 ~]# sysctl -w net.ipv4.conf.all.rp_filter=0
[root@node3 ~]# sysctl -w net.ipv4.conf.default.rp_filter=0
[root@node3 ~]# sysctl -w net.ipv4.conf.eth0.rp_filter=0
[root@node3 ~]# sysctl -w net.ipv4.conf.tunl0.rp_filter=0
[root@node3 ~]# sysctl -p
- node1和node4开始安装keepalived,keepalived需要编译
[root@node1 opt]# ls
keepalived-2.0.19.tar.gz
[root@node1 opt]# tar zxf keepalived-2.0.19.tar.gz
#解压缩
[root@node1 opt]# ls
keepalived-2.0.19 keepalived-2.0.19.tar.gz
[root@node1 opt]# cd keepalived-2.0.19
[root@node1 keepalived-2.0.19]# ./configure --prefix=/usr/local/keepalived --with-init=systemd
#进行编译
[root@node1 keepalived-2.0.19]# make && make install
#进行安装
[root@node1 ~]# ln -s usr/local/keepalived/etc/keepalived/ /etc/
#拷贝配置文件到/etc/目录下,开启服务时会寻在/etc/keepalived/keepalived.conf文件
[root@node4 opt]# ls
keepalived-2.0.19.tar.gz
[root@node4 opt]# tar zxf keepalived-2.0.19.tar.gz
#解压缩
[root@node4 opt]# ls
keepalived-2.0.19 keepalived-2.0.19.tar.gz
[root@node4 opt]# cd keepalived-2.0.19
[root@node4 keepalived-2.0.19]# ./configure --prefix=/usr/local/keepalived --with-init=systemd
#进行编译
[root@node4 keepalived-2.0.19]# make && make install
#进行安装
[root@node4 ~]# ln -s usr/local/keepalived/etc/keepalived/ /etc/
#拷贝配置文件到/etc/目录下,开启服务时会寻在/etc/keepalived/keepalived.conf文件
- 配置node1的keepalived.conf文件,设置node1为master
[root@node1 ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs { #全局定义模块
notification_email { #报警邮件配置
root@localhost #邮件通知地址,此处填写本笔记
}
notification_email_from Alexandre.Cassen@firewall.loc #邮件发送信息
smtp_server 127.0.0.1 #邮件服务地址
smtp_connect_timeout 30 #邮件服务连接超时时间
router_id LVS_DEVEL #表示运行keepalived服务器的唯一标识。发邮件时显示在邮件主题的信息
vrrp_skip_check_adv_addr #如果接收的到报文和上一个报文来至同一个路由器,则不执行检查。默认是跳过检查,因为检查报文比较耗时
#vrrp_strict ##严格执行VRRP协议规范,此模式不支持节点单播,引起vip问题的就是这个参数,所以注释掉
vrrp_garp_interval 0
vrrp_gna_interval 0
}
vrrp_instance VI_1 { #虚拟服务器配置
state MASTER #状态有两个 MASTER 主 | BACKUP 从 ,node1设置为主
interface eth0 #对外的网卡接口,ifconfig 或者ip addr show可查看
virtual_router_id 51 #虚拟路由id,每个节点设置必须一样,相同的ID为一组
priority 100 #优先级
advert_int 1 #主往从发送多播消息的间隔时长
authentication { #设置验证信息,两个节点必须一致
auth_type PASS #类型
auth_pass 1111 #信息
}
virtual_ipaddress { #VIP设置,可以是多个
192.168.12.11
}
}
virtual_server 192.168.12.11 80 { #虚拟服务器对外提供服务的IP地址与端口
delay_loop 6 #健康检查时长
lb_algo wrr #LB集群调度策略
lb_kind TUN #集群IPVS规则,DR NAT TUN
#persistence_timeout 50 #http服务会话时长,为了方便测试本次注释掉了
protocol TCP #使用协议TCP
real_server 192.168.27.12 80 { #真实服务器node2对外提供服务的IP地址与端口
weight 2 #调度策略权重 2
TCP_CHECK { #TCP服务检查设置信息
connect_timeout 3 #连接超时时间
retry 3 #尝试连接次数
delay_before_retry 3 #重试连接间隔时间
}
}
real_server 192.168.27.13 80 { #真实服务器node3对外提供服务的IP地址与端口
weight 1 #调度策略权重 1
TCP_CHECK {
connect_timeout 3
retry 3
delay_before_retry 3
}
}
}
- 配置node4的keepalived.conf文件,设置node4为backup
#由于node1配置好了,直接复制为node4
[root@node1 ~]# scp /etc/keepalived/keepalived.conf root@node4:/etc/keepalived/keepalived.conf
#配置node4配置文件
[root@node4 ~]# vim /etc/keepalived/keepalived.conf
.......仅显示了不同内容部分
vrrp_instance VI_1 { #虚拟服务器配置
state BACKUP #node4为BACKUP
interface eth0 #对外的网卡接口,ifconfig 或者ip addr show可查看
virtual_router_id 51 #虚拟路由id,每个节点设置必须一样,相同的ID为一组
priority 50 #优先级改为50,只要比MASTER节点配置的低即可
.......仅显示了不同内容部分
- 开启node1和node4的keepalived服务
[root@node1 ~]# systemctl start keepalived
[root@node4 ~]# systemctl start keepalived
测试效果
- node1上查看是否配置ipvs规则,vip是否添加在网卡中
[root@node1 ~]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 192.168.12.11:80 wrr
-> 192.168.27.12:80 Tunnel 2 0 5
-> 192.168.27.13:80 Tunnel 1 0 4
# 显示配置信息正确,TUN,VIP:192.168.12.11,WRR,RS:node2与node3
[root@node1 ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 52:54:00:a4:30:62 brd ff:ff:ff:ff:ff:ff
inet 192.168.27.11/24 brd 192.168.27.255 scope global noprefixroute eth0
valid_lft forever preferred_lft forever
inet 192.168.12.11/32 scope global eth0
valid_lft forever preferred_lft forever
#VIP地址添加在eth0中
- 在测试客户端中测试效果
[root@vmhost images]# curl 192.168.12.11
node3
[root@vmhost images]# curl 192.168.12.11
node2
[root@vmhost images]# curl 192.168.12.11
node2
[root@vmhost images]# curl 192.168.12.11
node3
[root@vmhost images]# curl 192.168.12.11
node2
[root@vmhost images]# curl 192.168.12.11
node2
#可以实现调度轮询,且权重正确
- 关闭node2的httpd服务,查看是否可以进行健康检查
[root@vmhost images]# curl 192.168.12.11
node3
[root@vmhost images]# curl 192.168.12.11
curl: (7) Failed connect to 192.168.12.11:80; Connection refused
[root@vmhost images]# curl 192.168.12.11
curl: (7) Failed connect to 192.168.12.11:80; Connection refused
[root@vmhost images]# curl 192.168.12.11
node3
[root@vmhost images]# curl 192.168.12.11
node3
[root@vmhost images]# curl 192.168.12.11
node3
[root@vmhost images]# curl 192.168.12.11
node3
# 开始会有健康检查延时,后期显示正确,踢出了未开启服务的节点
- 查看关闭master服务,是否会迁移至backup,关闭node1,keepalived服务
[root@vmhost images]# curl 192.168.12.11
node3
[root@vmhost images]# curl 192.168.12.11
node3
[root@vmhost images]# curl 192.168.12.11
node3
#服务没有停止,backup结题了master节点的服务,起到了高可用效果