实验环境
rhel6.5
serevr1 salt-master,salt-minion keepalived+haproxy ip:172.25.254.100
server4 salt-minion keepalived+haproxy ip :172.25.254.104
server2 salt-minion ip:172.25.254.102
server3 salt-minion ip:172.25.254.103
虚拟ip 172.25.254.200
salt的基础配置见https://blog.csdn.net/weixin_42711549/article/details/83478459
源码编译keepalived
[root@server1 salt]# mkdir keepalived
[root@server1 salt]# mkdir keepalived/files
[root@server1 salt]# cd keepalived/
[root@server1 keepalived]# vim install.sls
[root@server1 keepalived]# cat install.sls
keepalived-install:
pkg.installed:
- pkgs:
- pcre-devel
- gcc
- openssl-devel
kp-install:
file.managed:
- name: /mnt/keepalived-2.0.6.tar.gz
- source: salt://keepalived/files/keepalived-2.0.6.tar.gz
cmd.run:
- name: cd /mnt && tar zxf keepalived-2.0.6.tar.gz && cd keepalived-2.0.6 && ./configure --prefix=/usr/local/keepalived --with-init=SYSV &> /dev/null && make &> /dev/null && make install &> /dev/null
- creates: /usr/local/keepalived
/etc/keepalived:
file.directory:
- mode: 755
/sbin/keepalived:
file.symlink:
- target: /usr/local/keepalived/sbin/keepalived
/etc/sysconfig/keepalived:
file.symlink:
- target: /usr/local/keepalived/etc/sysconfig/keepalived
/etc/init.d/keepalived:
file.managed:
- source: salt://keepalived/files/keepalived
- mode: 755
[root@server1 keepalived]# ls files/
keepalived-2.0.6.tar.gz
在server4上
[root@server4 etc]# cd /usr/local/keepalived/etc/rc.d/init.d/
[root@server4 init.d]# ls
keepalived
[root@server4 init.d]# scp keepalived root@172.25.254.100:/srv/salt/keepalived/files
[root@server4 init.d]# cd /usr/local/keepalived/etc/
[root@server4 etc]# cd keepalived/
[root@server4 keepalived]# ls
keepalived.conf samples
[root@server4 keepalived]# scp keepalived.conf root@172.25.254.100:/srv/salt/keepalived/files
在server1上推送编译keeplived
[root@server1 files]# salt server4 state.sls keepalived.install
修改keeplived 的配置文件
[root@server1 keepalived]# vim files/keepalived.conf
[root@server1 srv]# mkdir pillar
[root@server1 srv]# cd pillar/
[root@server1 pillar]# mkdir keepalived [root@server1 pillar]# cd keepalived/ [root@server1 keepalived]# vim install.sls
[root@server1 keepalived]# cat install.sls
{% if grains['fqdn'] == 'server1' %}
state: MASTER
vrid: 25
priority: 100
{% elif grains['fqdn'] == 'server4' %}
state: BACKUP
vrid: 25
priority: 50
{% endif %}
[root@server1 pillar]# cat top.sls
base:
'*':
- web.install
- keepalived.install
多节点推送
[root@server1 salt]# vim top.sls
[root@server1 salt]# cat top.sls
base:
'server1':
- haproxy.service
- keepalived.service
'server4':
- haproxy.service
- keepalived.service
'roles:apache':
- match: grain
- apache.service
'roles:nginx':
- match: grain
- nginx.service
[root@server1 salt]# cd /srv/pillar/web/ [root@server1 web]# vim install.sls
{% if grains['fqdn'] == 'server2' %}
webserver: httpd
port: 80
{% elif grains['fqdn'] == 'server3' %}
webserver: nginx
{% endif %}
[root@server1 salt]# salt '*' state.highstate
在浏览器测试:
高可用测试
高可用部署成功
重新推送则会恢复虚拟ip 回到之前的主机上
[root@server1 salt]# salt '*' state.highstate
Haproxy的健康检测
[root@server1 ~]# cd /opt
[root@server1 opt]# vim check_haproxy.sh
#!/bin/bash /etc/init.d/haproxy status &> /dev/null || /etc/init.d/haproxy restart &> /dev/null
if [ $? -ne 0 ];then
/etc/init.d/keepalived stop &> /dev/null
fi
[root@server1 ~]# chmod +x check_haproxy.sh [root@server1 ~]# scp check_haproxy.sh server4:/opt/
[root@server1 ~]# cd /srv/salt/keepalived/files/ [root@server1 files]# vim keepalived.conf
! Configuration File for keepalived
vrrp_script check_haproxy {
script "/opt/check_haproxy.sh"
interval 2
weight 2
}
global_defs {
notification_email {
root@localhost
}
notification_email_from keepalived@localhost
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id LVS_DEVEL
vrrp_skip_check_adv_addr
#vrrp_strict
vrrp_garp_interval 0
vrrp_gna_interval 0
}
vrrp_instance VI_1 {
state {{ STATE }}
interface eth0
virtual_router_id {{ VRID }}
priority {{ PRIORITY }}
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
172.25.25.200
}
track_script {
check_haproxy
}
}
[root@server1 files]# salt '*' state.highstate
关掉haproxy之后查看haproxy进程,如果可以重启,就依旧访问本机,虚拟ip会在本机,如果haproxy不能重启,就会自动把keepalived关掉,而去访问另外一台,虚拟ip也就在另外一台上面
测试:
- haproxy关掉后可以重启
[root@server1 files]# /etc/init.d/haproxy stop
- haproxy关掉后不可以重启
server1:
[root@server1 files]# cd /etc/init.d/ [root@server1 init.d]# /etc/init.d/haproxy stop //关掉haproxy
[root@server1 init.d]# chmod -x haproxy //在重启限定时间内,去掉haproxy的可执行权限保证不能重启
在serevr4上查看是否有虚拟ip
server1
[root@server1 init.d]# chmod +x haproxy //加上可执行权限之后,虚拟ip会自动加到本机
[root@server1 init.d]# /etc/init.d/haproxy start