实验环境
主机 | IP | 角色 | 服务 |
---|---|---|---|
server1 | 172.25.254.1 | haproxy,keepalived | master |
server2 | 172.25.254.2 | httpd | minion |
server3 | 172.25.254.3 | nginx | minion |
server4 | 172.25.254.4 | haproxy,keepalived | minion |
配置
- server1、2、3、3都需要指定master
[root@server1 salt]# cd /etc/salt/
[root@server1 salt]# vim minion
master: 172.25.254.1
- 开启服务
[root@server1 salt]# systemctl start salt-master
[root@server1 salt]# systemctl start salt-minion
[root@server2 salt]# systemctl start salt-minion
[root@server3 salt]# systemctl start salt-minion
[root@server4 salt]# systemctl start salt-minion
- 交换公钥
[root@server1 salt]# salt-key -L
[root@server1 salt]# salt-key -A # 添加显示的所有主机
[root@server1 salt]# salt-key -L
用haproxy实现负载均衡
编辑install.sls文件
[root@server1 salt]# cd /srv/salt/
[root@server1 salt]# ls
apache _grains nginx top.sls
[root@server1 salt]# mkdir haproxy
[root@server1 salt]# vim install.sls
haproxy-install:
pkg.installed:
- pkgs:
- haproxy
file.managed:
- name: /etc/haproxy/haproxy.cfg
- source: salt://haproxy/files/haproxy.cfg
service.running:
- name: haproxy
- enable: true
- reload: true
- watch:
- file: haproxy-install
将haproxy配置文件发送到server1salt目录下,编辑haproxy的配置文件
[root@server1 files]# vim haproxy.cfg
[root@server1 files]# vim haproxy.cfg
#---------------------------------------------------------------------
# main frontend which proxys to the backends
#---------------------------------------------------------------------
frontend main *:80
default_backend app
backend app
balance roundrobin
server app1 172.25.254.2:80 check
server app2 172.25.254.3:80 check
[root@server1 files]# pwd
/srv/salt/haproxy/files
[root@server1 files]# ll
total 4
-rw-r--r-- 1 root root 2595 Jun 17 10:56 haproxy.cfg
[root@server1 files]# pwd
/srv/salt/haproxy/files
[root@server1 files]# ll
total 4
-rw-r--r-- 1 root root 2595 Jun 17 10:56 haproxy.cfg
在base(/etc/salt)目录下,编辑top.sls,并推送
[root@server1 salt]# cat top.sls
base:
'server1':
- haproxy.install
'server4':
- haproxy.install
'server2':
- apache.service
'server3':
- nginx.service
#这里的apache,nginx的推送我在上一篇博客中已经写过了。
#如果有需要的宝宝可以去我的主页看
[root@server1 salt]# salt '*' state.highstate
用keepalived配置高可用
- 在server1的/srv/salt目录下,建立keepalived目录,进到目录里边编辑安装keepalived的sls推送文件
[root@server1 keepalived]# cat install.sls
kp-install:
pkg.installed:
- pkgs:
- keepalived
file.managed:
- name: /etc/keepalived/keepalived.conf
- source: salt://keepalived/files/keepalived.conf
- template: jinja
- context:
STATE: {{ pillar['state'] }}
VRID: {{ pillar['vrid'] }}
PRIORITY: {{ pillar['priority'] }}
service.running:
- name: keepalived
- enabled: true
- reload: true
- watch:
- file: kp-install
[root@server1 keepalived]# cat /srv/pillar/web.sls
{% if grains['fqdn'] == 'server2' %}
webserver: httpd
state: MASTER
vrid: 9
priority: 100
ip: 172.25.254.2
port: 80
{% elif grains['fqdn'] == 'server3' %}
webserver: nginx
state: BACKUP
vrid: 9
priority: 50
ip: 172.25.254.3
port: 80
{% endif %}
- 建立files目录,将修改好的keepalived.conf文件放进去
[root@server1 keepalived]# cat /srv/pillar/web.sls
{% if grains['fqdn'] == 'server1' %}
webserver: httpd
state: MASTER
vrid: 9
priority: 100
ip: 172.25.254.2
port: 80
{% elif grains['fqdn'] == 'server4' %}
webserver: nginx
state: BACKUP
vrid: 9
priority: 50
ip: 172.25.254.3
port: 80
{% endif %}
[root@server1 keepalived]# cat files/keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
root@localhost
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id LVS_DEVEL
}
vrrp_instance VI_1 {
state {{ STATE }}
interface eth0
virtual_router_id {{ VRID }}
priority {{ PRIORITY }}
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
172.25.254.100
}
}
virtual_server 192.168.200.100 443 {
delay_loop 6
lb_algo rr
lb_kind NAT
nat_mask 255.255.255.0
persistence_timeout 50
protocol TCP
real_server 192.168.201.100 443 {
weight 1
SSL_GET {
url {
path /
digest ff20ad2481f97b1754ef3e12ecd3a9cc
}
url {
path /mrtg/
digest 9b3a0c85a887a256d6939da88aabd8cd
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}
virtual_server 10.10.10.2 1358 {
delay_loop 6
lb_algo rr
lb_kind NAT
persistence_timeout 50
protocol TCP
sorry_server 192.168.200.200 1358
real_server 192.168.200.2 1358 {
weight 1
HTTP_GET {
url {
path /testurl/test.jsp
digest 640205b7b0fc66c1ea91c463fac6334d
}
url {
path /testurl2/test.jsp
digest 640205b7b0fc66c1ea91c463fac6334d
}
url {
path /testurl3/test.jsp
digest 640205b7b0fc66c1ea91c463fac6334d
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 192.168.200.3 1358 {
weight 1
HTTP_GET {
url {
path /testurl/test.jsp
digest 640205b7b0fc66c1ea91c463fac6334c
}
url {
path /testurl2/test.jsp
digest 640205b7b0fc66c1ea91c463fac6334c
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}
virtual_server 10.10.10.3 1358 {
delay_loop 3
lb_algo rr
lb_kind NAT
nat_mask 255.255.255.0
persistence_timeout 50
protocol TCP
real_server 192.168.200.4 1358 {
weight 1
HTTP_GET {
url {
path /testurl/test.jsp
digest 640205b7b0fc66c1ea91c463fac6334d
}
url {
path /testurl2/test.jsp
digest 640205b7b0fc66c1ea91c463fac6334d
}
url {
path /testurl3/test.jsp
digest 640205b7b0fc66c1ea91c463fac6334d
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 192.168.200.5 1358 {
weight 1
HTTP_GET {
url {
path /testurl/test.jsp
digest 640205b7b0fc66c1ea91c463fac6334d
}
url {
path /testurl2/test.jsp
digest 640205b7b0fc66c1ea91c463fac6334d
}
url {
path /testurl3/test.jsp
digest 640205b7b0fc66c1ea91c463fac6334d
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}
- 在/srv/salt下编辑全部节点的推送的top.sls文件
[root@server1 keepalived]# cat /srv/salt/top.sls
base:
'server1':
- haproxy.install
- keepalived.install
'server4':
- haproxy.install
- keepalived.install
'server2':
- apache.service
'server3':
- nginx.service