本文作者:五行哥
QQ:1226032602
E-mail:1226032602@qq.com
keepalived简介
keepalived官方网站
http://www.keepalived.org/doc/
https://www.keepalived.org/manpage.html
http://www.linux-ha.org/wiki/Main_Page
Keepalived为负载平衡和高可用性提供框架。负载平衡框架依赖于众所周知且广泛使用的Linux虚拟服务器(IPVS)内核模块,该模块提供第4层负载平衡。Keepalived实现了一组运行状况检查程序,以根据其运行状况动态地和自适应地维护和管理负载平衡的服务器池。通过虚拟冗余路由协议(VRRP)实现高可用性。VRRP是路由器故障转移的基础。此外,keepalived为VRRP有限状态机实现了一组挂钩,提供低级和高速协议交互。每个Keepalived框架可以单独使用,也可以一起使用,以提供灵活的基础架构
VRRP协议(Virtual Router Redundancy Protocol 中文虚拟路由器冗余协议)来实现的。
主节点通过多播地址向备节点发送多播包
备节点不能收到主节点组播包时占用VIP地址
keepalived三大功能
1、管理lvs,启动停止配置lvs
2、实现对lvs节点的健康检查(节点实现健康检查)
3、高可用功能(任意服务高可用)
keepalived安装
安装
yum install -y keepalived
配置文件
主
[root@lb02 keepalived]# cat keepalived.conf
global_defs {
router_id LVS_DEVEL
}
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 51
priority 150
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
10.0.0.3/24 dev eth0 label eth0:3
}
}
备
[root@lb02 keepalived]# cat keepalived.conf
global_defs {
router_id LVS_DEVEL1
}
vrrp_instance VI_1 {
state BACKUP
interface eth0
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
10.0.0.3/24 dev eth0 label eth0:3
}
}
多实例双主
机器1
[root@lb01keepalived]# cat keepalived.conf
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
}
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 51
priority 150
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
10.0.0.3/24 dev eth0 label eth0:3
}
}
vrrp_instance VI_2 {
state BACKUP
interface eth0
virtual_router_id 52
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1112
}
virtual_ipaddress {
10.0.0.4/24 dev eth0 label eth0:4
}
}
机器2
[root@lb02conf]# cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL1
}
vrrp_instance VI_1 {
state BACKUP
interface eth0
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
10.0.0.3/24 dev eth0 label eth0:3
}
}
vrrp_instance VI_2 {
state MASTER
interface eth0
virtual_router_id 52
priority 150
advert_int 1
authentication {
auth_type PASS
auth_pass 1112
}
virtual_ipaddress {
10.0.0.4/24 dev eth0 label eth0:4
}
}
主恢复后不接管的参数
MASTER
/etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
}
vrrp_instance VI_1 {
state BACKUP
nopreempt
interface eth0
virtual_router_id 51
priority 150
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
10.0.0.3/24 dev eth0 label eth0:3
}
}
BACKUP
/etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL1
}
vrrp_instance VI_1 {
state BACKUP
interface eth0
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
10.0.0.3/24 dev eth0 label eth0:3
}
}
keepalived检测
keepalived备节点监控脚本
[root@lb02 scripts]# cat check_split_brain.sh
#!/bin/sh
lb01_vip=10.0.0.12
lb01_ip=10.0.0.7
while true
do
ping -c 2 -W 3 $lb01_ip &>/dev/null
if [ $? -eq 0 -a `ip add|grep "$lb01_vip"|wc -l` -eq 1 ]
then
echo "ha is split brain.warning."
else
echo "ha is ok"
fi
sleep 5
done
两边web服务监听VIP,无法启动web服务,解决方法
[root@lb01 keepalived]# echo ‘net.ipv4.ip_nonlocal_bind = 1’ >> /etc/sysctl.conf
[root@lb01 keepalived]# sysctl -p
worker_processes 1;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
sendfile on;
keepalive_timeout 65;
upstream pool {
server 10.0.0.7:80 weight=1 max_fails=3 fail_timeout=20s;
server 10.0.0.8:80 weight=1 max_fails=3 fail_timeout=20s;
}
server {
listen 10.0.0.3:80;
server_name www.etiantian.org;
location / {
proxy_pass http://pool;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $remote_addr;
}
}
server {
listen 10.0.0.4:80;
server_name bbs.etiantian.org;
location / {
proxy_pass http://pool;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $remote_addr;
}
}
}
keepalived实例
主
cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id vcloudproxy1
}
vrrp_script chk_all {
script "/etc/keepalived/check_all.sh"
interval 2
}
vrrp_instance vcloudproxy {
state MASTER
interface eth0
virtual_router_id 16
priority 100
nopreempt
advert_int 2
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
10.20.8.19/24 dev eth0
}
track_script {
chk_all
}
}
备
cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id vcloudproxy2
}
vrrp_script chk_all {
script "/etc/keepalived/check_all.sh"
interval 2
}
vrrp_instance vcloudproxy {
state BACKUP
interface eth0
virtual_router_id 16
priority 90
nopreempt
advert_int 2
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
10.20.8.19/24 dev eth0
}
track_script {
chk_all
}
}
检测脚本
cat /etc/keepalived/check_all.sh
#!/bin/bash
A=`ps -C nginx --no-header | wc -l`
if [ $A -eq 0 ];then
sleep 2
if [ `ps -C nginx --no-header | wc -l` -eq 0 ];then
killall keepalived
fi
fi
监控nginx停止后调整keepalived实例权重来切换主备
cat /server/scripts/chk_nginx_proxy.sh
#!/bin/bash
if ! ss -lntup |grep nginx &> /dev/null
then
exit 1
else
exit 0
fi
cat /server/scripts/chk_nginx_proxy.sh
#!/bin/bash
service_name=nginx
service_status="ps -ef |grep -v grep |grep $service_name &> /dev/null"
if ! eval $service_status
then
exit 1
else
exit 0
fi
[root@lb01 ~]# cat /etc/keepalived/keepalived.conf
global_defs {
router_id lb01
}
vrrp_script chk_nginx_proxy {
script "/server/scripts/chk_nginx_proxy.sh"
interval 1
weight -60
}
vrrp_instance VI_1 {
state BACKUP
interface eth0
virtual_router_id 55
priority 150
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
10.0.0.3/24 dev eth0 label eth0:1
}
track_script {
chk_nginx_proxy
}
}
[root@lb02 ~]# cat /etc/keepalived/keepalived.conf
global_defs {
router_id lb02
}
vrrp_instance VI_1 {
state BACKUP
interface eth0
virtual_router_id 55
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
10.0.0.3/24 dev eth0 label eth0:1
}
}
多播地址的配置
global_defs {
router_id LVS_19
vrrp_mcast_group4 224.0.0.19 #<==这个就是指定多播地址的配置
}
keepalived 日志
sed -i '14 s#KEEPALIVED_OPTIONS="-D"#KEEPALIVED_OPTIONS="-D -d -S 0"#g' /etc/sysconfig/keepalived
/etc/rsyslog.conf
#keepalived
local0.* /var/log/keepalived.log
[root@lb01 ~]# egrep “local0” /etc/rsyslog.conf
*.info;mail.none;authpriv.none;cron.none;local0:none /var/log/messages
local0.* /var/log/keepalived.log
keepalived 脑裂问题
在指定时间内,无法检测到对方的心跳消息,各自取得服务所有权,两台机器占用同一个VIP地址
原因
心跳线链路故障
防火墙
网卡配置问题
多台keepalived
mcast_src_ip #发送多播数据包时的源IP地址,这里注意了,这里实际上就是在那个地址上发送VRRP通告,这个非常重要,一定要选择稳定的网卡端口来发送,这里相当于heartbeat的心跳端口,如果没有设置那么就用默认的绑定的网卡的IP
unicast_peer #表示对端接收VRRP单播报文的IP地址
nopreempt #设置不抢占
keepalived1
cat /etc/keepalived/keepalived.conf
global_defs {
router_id node1
}
vrrp_instance http {
state BACKUP
interface eth0
virtual_router_id 61
priority 100
advert_int 1
mcast_src_ip 10.0.0.7
nopreempt
authentication {
auth_type PASS
auth_pass sqP05dQgMSlzrxHj
}
unicast_peer {
10.0.0.31
10.0.0.51
}
virtual_ipaddress {
10.0.0.16/24
}
}
keepalived2
cat /etc/keepalived/keepalived.conf
global_defs {
router_id node2
}
vrrp_instance http {
state BACKUP
interface eth0
virtual_router_id 61
priority 90
advert_int 1
mcast_src_ip 10.0.0.31
nopreempt
authentication {
auth_type PASS
auth_pass sqP05dQgMSlzrxHj
}
unicast_peer {
10.0.0.7
10.0.0.51
}
virtual_ipaddress {
10.0.0.16/24
}
}
keepalived3
cat /etc/keepalived/keepalived.conf
global_defs {
router_id node3
}
vrrp_instance http {
state BACKUP
interface eth0
virtual_router_id 61
priority 80
advert_int 1
mcast_src_ip 10.0.0.51
nopreempt
authentication {
auth_type PASS
auth_pass sqP05dQgMSlzrxHj
}
unicast_peer {
10.0.0.7
10.0.0.31
}
virtual_ipaddress {
10.0.0.16/24
}
}
附
VRRP同步组(synchroization group)配置范例
vrrp_sync_group VG_1 {
group {
http //http和mysql是实例名
mysql
}
notify_master /path/to/to_master.sh //表示当切换到master状态时,要执行的脚本
notify_backup /path_to/to_backup.sh //表示当切换到backup状态时,要执行的脚本
notify_fault "/path/fault.sh VG_1" //
notify /path/to/notify.sh
smtp_alert
}
lvs
http://www.linux-vs.org/Documents.html
http://www.linuxvirtualserver.org/zh/index.html
ipvsadm
http://zh.linuxvirtualserver.org/node/5
查看
ipvsadm -L -c
ipvsadm -Ln
添加虚拟服务器
ipvsadm -A|E -t|u|f virutal-service-address:port [-s scheduler] [-p
[timeout]] [-M netmask]
ipvsadm -A -t 10.0.0.3:80 -s wrr -p 20
删除虚拟服务器
ipvsadm -D -t|u|f virtual-service-address
添加真实服务器
ipvsadm -a|e -t|u|f service-address:port -r real-server-address:port
[-g|i|m] [-w weight]
ipvsadm -a -t 10.0.0.3:80 -r 10.0.0.7:80 -g -w 1
ipvsadm -a -t 10.0.0.3:80 -r 10.0.0.8:80 -g -w 1
删除真实服务器
ipvsadm -d -t|u|f service-address -r server-address
清除
-C --clear 清除内核虚拟服务器表中的所有记录。
调度算法
-s --scheduler scheduler 使用的调度算法,有这样几个选项
rr|wrr|lc|wlc|lblc|lblcr|dh|sh|sed|nq,
默认的调度算法是: wlc.
模式
-g --gatewaying 指定LVS 的工作模式为直接路由模式(也是LVS 默认的模式)
-i --ipip 指定LVS 的工作模式为隧道模式
-m --masquerading 指定LVS 的工作模式为NAT 模式
ipvsadm --set 30 5 60 #设置tcp 超时时间
http://www.linux-vs.org/zh/lvs3.html
DR模式
http://www.linux-vs.org/VS-DRouting.html
load balancer
echo 1 > /proc/sys/net/ipv4/ip_forward
ipvsadm -A -t 172.26.20.110:23 -s wlc
ipvsadm -a -t 172.26.20.110:23 -r 172.26.20.112 -g
ip addr add 10.0.0.3/24 dev eth0 label eth0:0
ipvsadm -A -t 10.0.0.3:80 -s wrr -p 20
ipvsadm -a -t 10.0.0.3:80 -r 10.0.0.7:80 -g -w 1
ipvsadm -a -t 10.0.0.3:80 -r 10.0.0.8:80 -g -w 1
real server
echo 1 > /proc/sys/net/ipv4/ip_forward
echo 1 > /proc/sys/net/ipv4/conf/all/hidden
echo 1 > /proc/sys/net/ipv4/conf/lo/hidden
ifconfig lo:0 172.26.20.110 netmask 255.255.255.255 broadcast 172.26.20.110 up
net.ipv4.conf.all.arp_ignore = 1
net.ipv4.conf.all.arp_announce = 2
net.ipv4.conf.lo.arp_ignore = 1
net.ipv4.conf.lo.arp_announce = 2
/etc/sysconfig/network-scripts/ifcfg-lo:1
DEVICE=lo:1
IPADDR=10.0.0.3
NETMASK=255.255.255.255
ONBOOT=yes
NAME=loopback
NAT模式
http://www.linux-vs.org/VS-NAT.html
IPTUN模式
http://www.linux-vs.org/VS-IPTunneling.html
ipvs规则导出
ipvsadm-save -n > /root/ipvsadm.conf
ipvs规则导入
ipvsadm-restore < /root/ipvsadm.conf
HA
http://www.linux-vs.org/docs/ha/keepalived.html
vrrp_sync_group VG1 {
group {
VI_1
VI_2
}
}vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
10.23.8.80
}
}vrrp_instance VI_2 {
state MASTER
interface eth1
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
172.18.1.254
}
}virtual_server 10.23.8.80 80 {
delay_loop 6
lb_algo wrr
lb_kind NAT
persistence_timeout 600
protocol TCP real_server 172.18.1.11 80 {
weight 100
TCP_CHECK {
connect_timeout 3
}
}
real_server 172.18.1.12 80 {
weight 100
TCP_CHECK {
connect_timeout 3
}
}
}
主
global_defs {
router_id lb01
}vrrp_instance oldboy {
state MASTER
interface eth0
virtual_router_id 62
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1234
}
virtual_ipaddress {
10.0.0.3/24 dev eth0 label eth0:0
}
}virtual_server 10.0.0.3 80 {
delay_loop 6
lb_algo wrr
lb_kind DR
nat_mask 255.255.255.0
persistence_timeout 50
protocol TCP
real_server 10.0.0.7 80 {
weight 1
TCP_CHECK {
connect_timeout 8
nb_get_retry 3
delay_before_retry 3
connect_port 80
}
}
real_server 10.0.0.8 80 {
weight 1
TCP_CHECK {
connect_timeout 8
nb_get_retry 3
delay_before_retry 3
connect_port 80
}
}
}
备
global_defs {
router_id lb02
}
vrrp_instance oldboy {
state BACKUP
interface eth0
virtual_router_id 62
priority 50
advert_int 1
authentication {
auth_type PASS
auth_pass 1234
}
virtual_ipaddress {
10.0.0.3/24 dev eth0 label eth0:0
}
}
virtual_server 10.0.0.3 80 {
delay_loop 6
lb_algo wrr
lb_kind DR
nat_mask 255.255.255.0
persistence_timeout 50
protocol TCP
real_server 10.0.0.7 80 {
weight 1
TCP_CHECK {
connect_timeout 8
nb_get_retry 3
delay_before_retry 3
connect_port 80
}
}
real_server 10.0.0.8 80 {
weight 1
TCP_CHECK {
connect_timeout 8
nb_get_retry 3
delay_before_retry 3
connect_port 80
}
}
}