######1、实验环境
-
172.25.29.1 salt-master server1 keepalived-master haproxy
-
172.25.29.2 salt-minion server2 httpd
-
172.25.29.3 salt-minion server3 nginx
-
172.25.29.4 salt-minion server4 keepalived-backup haproxy
######2、haproxy实现负载均衡
#########server1:
[root@server1 salt]# vim /etc/yum.repos.d/rhel-source.repo //添加高可用模块
[LoadBalancer]
name=LoadBalancer
baseurl=http://172.25.29.250/rhel6.5/LoadBalancer
gpgcheck=0
[root@server1 salt]# /etc/init.d/salt-minion start
[root@server1 salt]# pwd
/srv/salt
[root@server1 salt]# mkdir haproxy/
[root@server1 salt]# cd haproxy/
[root@server1 haproxy]# vim install.sls
haproxy-install:
pkg.installed:
- pkgs:
- haproxy
[root@server1 haproxy]# salt server1 state.sls haproxy.install //给server1推送安装haproxy
[root@server1 haproxy]# mkdir files
[root@server1 haproxy]# cd files/
[root@server1 files]# cp /etc/haproxy/haproxy.cfg .
[root@server1 files]# ls
haproxy.cfg
[root@server1 files]# cd ..
[root@server1 haproxy]# vim insatll.sls
haproxy-install:
pkg.installed:
- pkgs:
- haproxy
file.managed:
- name: /etc/haproxy/haproxy.cfg
- source: salt://haproxy/files/haproxy.cfg
service.running:
- name: haproxy
- reload: True
- watch:
- file: haproxy-install
[root@server1 haproxy]# cd files/
[root@server1 files]# ls
[root@server1 files]# vim haproxy.cfg
63 frontend main *:80
64 default_backend app
65
66 backend app
67 balance roundrobin
68 server app1 172.25.29.2:80 check
69 server app2 172.25.29.3:80 check
[root@server1 haproxy]# salt server1 state.sls haproxy.install
#########server2:
[root@server2 conf]# cd /var/www/html/
[root@server2 html]# vim index.html
<h1>server2</h1>
######3、keepalived实现高可用
#########server1:
[root@server1 salt]# pwd
/srv/salt
[root@server1 salt]# ls
_grains haproxy httpd keepalived nginx pkgs top.sls users
[root@server1 salt]# cd pkgs/
[root@server1 pkgs]# vim make.sls
make:
pkg.installed:
- pkgs:
- pcre-devel
- openssl-devel
- gcc
[root@server1 salt]# mkdir keepalived
[root@server1 salt]# cd keepalived
[root@server1 keepalived]# mkdir files
[root@server1 keepalived]# cd files/
[root@server1 files]# ls
keepalived-2.0.6.tar.gz
[root@server1 files]# cd ..
[root@server1 keepalived]# vim install.sls
include:
- pkgs.make
keepalived-install:
file.managed:
- name: /mnt/keepalived-2.0.6.tar.gz
- source: salt://keepalived/files/keepalived-2.0.6.tar.gz
cmd.run:
- name: cd /mnt && tar zxf keepalived-2.0.6.tar.gz && cd keepalived-2.0.6 && ./configure --prefix=/usr/local/keepalived --with-init=SYSV &> /dev/null && make &> /dev/null && make install &> /dev/null
- creates: /usr/local/keepalived
[root@server1 keepalived]# salt server4 state.sls keepalived.install
#########server4:
[root@server4 mnt]# cd /usr/local/keepalived/etc/rc.d/init.d/
[root@server4 init.d]# scp keepalived server1:/srv/salt/keepalived/files/
[root@server4 init.d]# cd /usr/local/keepalived/etc/keepalived
[root@server4 keepalived]# scp keepalived.conf server1:/srv/salt/keepalived/files/
#########server1:
[root@server1 keepalived]# ls files/
keepalived keepalived-2.0.6.tar.gz keepalived.conf
[root@server1 keepalived]# vim install.sls
include:
- pkgs.make
keepalived-install:
file.managed:
- name: /mnt/keepalived-2.0.6.tar.gz
- source: salt://keepalived/files/keepalived-2.0.6.tar.gz
cmd.run:
- name: cd /mnt && tar zxf keepalived-2.0.6.tar.gz && cd keepalived-2.0.6 && ./configure --prefix=/usr/local/keepalived --with-init=SYSV &> /dev/null && make &> /dev/null && make install &> /dev/null
- creates: /usr/local/keepalived
/etc/keepalived:
file.directory:
- mode: 755
/etc/sysconfig/keepalived:
file.symlink:
- target: /usr/local/keepalived/etc/sysconfig/keepalived
/sbin/keepalived:
file.symlink:
- target: /usr/local/keepalived/sbin/keepalived
[root@server1 keepalived]# cd /srv/pillar/
[root@server1 pillar]# mkdir keepalived/
[root@server1 pillar]# cd keepalived/
[root@server1 keepalived]# vim install.sls
{% if grains['fqdn'] == 'server1' %}
state: MASTER
vrid: 29
priority: 100
{% elif grains['fqdn'] == 'server4' %}
state: BACKUP
vrid: 29
priority: 50
{% endif %}
[root@server1 keepalived]# cd ..
[root@server1 pillar]# vim top.sls
base:
'*':
- web.install
- keepalived.install
[root@server1 keepalived]# cd /srv/salt/keepalived/
[root@server1 keepalived]# vim service.sls
include:
- keepalived.install
/etc/keepalived/keepalived.conf:
file.managed:
- source: salt://keepalived/files/keepalived.conf
- template: jinja
- context:
STATE: {{ pillar['state'] }}
VRID: {{ pillar['vrid'] }}
PRIORITY: {{ pillar['priority'] }}
kp-service:
file.managed:
- name: /etc/init.d/keepalived
- source: salt://keepalived/files/keepalived
- mode: 755
service.running:
- name: keepalived
- reload: True
- watch:
- file: /etc/keepalived/keepalived.conf
[root@server1 keepalived]# cd ..
[root@server1 salt]# vim top.sls
base:
'server1':
- haproxy.install
- keepalived.service
'server4':
- haproxy.install
- keepalived.service
'roles:apache':
- match: grain
- httpd.apache
'roles:nginx':
- match: grain
- nginx.service
[root@server1 salt]# cd keepalived/files/
[root@server1 files]# vim keepalived.conf
1 ! Configuration File for keepalived
2
3 global_defs {
4 notification_email {
5 root@loclahost
6 }
7 notification_email_from keepalived@localhost
8 smtp_server 127.0.0.1
9 smtp_connect_timeout 30
10 router_id LVS_DEVEL
11 vrrp_skip_check_adv_addr
12 #vrrp_strict
13 vrrp_garp_interval 0
14 vrrp_gna_interval 0
15 }
16
17 vrrp_instance VI_1 {
18 state {{ STATE }}
19 interface eth0
20 virtual_router_id {{ VRID }}
21 priority {{ PRIORITY }}
22 advert_int 1
23 authentication {
24 auth_type PASS
25 auth_pass 1111
26 }
27 virtual_ipaddress {
28 172.25.29.100
29 }
30 }
[root@server1 files]# salt '*' state.highstate //高级推送
[root@server1 files]# ip addr
inet 172.25.29.100/32 scope global eth0
当我们停掉server1的keepalived时,vip会转移到server4上,在此过程中,浏览器测试不受影响
[root@server4 salt]# ip addr
inet 172.25.29.100/32 scope global eth0
######4、haproxy健康检查的实现
默认情况下,haproxy对后端有健康检查,但keepalived并不会对haproxy进行健康检查,为了实现这一功能,可使用如下方法
[root@server1 opt]# pwd
/opt
[root@server1 opt]# vim check_haproxy.sh //编写健康检查的脚本
#!/bin/bash
/etc/init.d/haproxy status &> /dev/null || /etc/init.d/haproxy restart &> /dev/null
if [ $? -ne 0];then
/etc/init.d/keepalived stop &> /dev/null
fi
[root@server1 opt]# chmod +x check_haproxy.sh
[root@server1 opt]# cd /srv/salt/keepalived/files
[root@server1 files]# vim keepalived.conf
1 ! Configuration File for keepalived
2 vrrp_script check_haproxy{
3 script "/opt/check_haproxy.sh"
4 interval 2
5 weight 2
6 }
... ....
34 track_script{
35 check_haproxy
36 }
37 }
[root@server1 files]# salt server1 state.sls keepalived.service
[root@server1 files]# salt server4 state.sls keepalived.service //向server1和server4在此推送keepalived
[root@server1 files]# ps ax
14018 ? Ss 0:02 /usr/sbin/haproxy -D -f /etc/haproxy/haproxy.cfg -p /v
[root@server1 files]# /etc/init.d/haproxy stop
[root@server1 files]# ps ax //haproxy会被自动重启
24814 ? Ss 0:00 /usr/sbin/haproxy -D -f /etc/haproxy/haproxy.cfg -p /var/r