Pacemaker+haproxy
实现高可用与均衡负载
root@server4 examples]# cd
[root@server4 ~]# ls
haproxy-1.6.11 nginx-1.10.1.tar.gz nginx-sticky-module-ng
haproxy-1.6.11.tar.gz nginx-1.14.0 nginx-sticky-module-ng.tar.gz
nginx-1.10.1 nginx-1.14.0.tar.gz rpmbuild
[root@server4 ~]# mv haproxy-1.6.11.tar.gz rpmbuild/SOURCES/
[root@server4 examples]# rpmbuild -bb haproxy.spec
3、安装 haproxy
[root@server4 examples]# cd /root/rpmbuild/RPMS/x86_64/
[root@server4 x86_64]# ls
haproxy-1.6.11-1.x86_64.rpm
[root@server4 x86_64]# rpm -ivh haproxy-1.6.11-1.x86_64.rpm
4、修改配置文件
[root@server4 x86_64]# cd -
/root/haproxy-1.6.11/examples
[root@server4 examples]# ls
acl-content-sw.cfg debug2ansi haproxy.spec ssl.cfg
auth.cfg debug2html haproxy.vim stats_haproxy.sh
check debugfind init.haproxy transparent_proxy.cfg
check.conf errorfiles option-http_proxy.cfg
content-sw-sample.cfg haproxy.init seamless_reload.txt
[root@server4 examples]# cp content-sw-sample.cfg /etc/haproxy/haproxy.cfg
[root@server4 examples]# cd /etc/haproxy/
[root@server4 haproxy]# vim haproxy.cfg
10 global
11 maxconn 65535 ##最大链接
12 stats socket /var/run/haproxy.stat mode 600 level admin
13 log 127.0.0.1 local0
14 uid 200 ##指定用户、组
15 gid 200
16 chroot /var/empty ##
17 daemon
18 ####注意:局部改变全局,即局部变量优先
19 defaults
20 mode http ##http服务
21 log global ##日志格式
22 option httplog
23 option dontlognull
24 monitor-uri /monitoruri ##前端健康检查
25 maxconn 8000 ##默认最大链接,覆盖global
26 timeout client 30s ##客户端中断时间 30s
27 stats uri /admin/stats ##后端健康检查
28 retries 2 ##再次链接时间,2s
29 option redispatch
30 timeout connect 5s ##链接断开,5s
31 timeout server 30s ##服务断开,30s
34 # The public 'www' address in the DMZ
35 frontend public
36 bind *:80
37 # use_backend static if { hdr_beg(host) -i img }
38 # use_backend static if { path_beg /img /css }
39 default_backend dynamic
40
41 # the application servers go here
42 backend dynamic
43 balance roundrobin
44 server web1 172.25.12.2:80 check inter 1000
45 server web2 172.25.12.3:80 check inter 1000
5、修改限制文件,建立用户、组
[root@server4 haproxy]# vim /etc/security/limits.conf
haproxy - nofile 65536
[root@server4 haproxy]# groupadd -g 200 haproxy
[root@server4 haproxy]# useradd -u 200 -g 200 -M -s /sbin/nologin haproxy
[root@server4 haproxy]# /etc/init.d/haproxy start
Starting haproxy: [ OK ]
Server3和server5开启httpd实现均衡负载
物理主机网页测试:
172.25.77.4/admin/stats ##后端监控检查
172.25.77.4/monitoruri ##检测haproxy服务是否ok
第一阶段
第二阶段 pacemaker
[root@server4 ~]# yum install -y corosync pacemaker
[root@server4 ~]# cd /etc/corosync/
[root@server4 corosync]# cp corosync.conf.example corosync.conf
[root@server1 corosync]# vim corosync.conf
4 totem {
5 version: 2
6 secauth: off
7 threads: 0
8 interface {
9 ringnumber: 0
10 bindnetaddr: 172.25.12.0
11 mcastaddr: 226.94.1.12
12 mcastport: 5405
13 ttl: 1
14 }
15 }
35 service {
36 name: pacemaker
37 ver: 1
38 }
把ver:1 修改为1
[root@server4 corosync]# /etc/init.d/pacemaker start
[root@server4 corosync]# /etc/init.d/corosync start
手动开启 先启动/pacemaker再启动/corosync
root@server4 corosync]# scp corosync.conf server6:/etc/corosync/
root@server4's password:
corosync.conf 100% 483 0.5KB/s 00:00
[root@server4 corosync]# /etc/init.d/corosync start
Starting Corosync Cluster Engine (corosync): [ OK ]
[root@server4 ~]# yum install crmsh-1.2.6-0.rc2.2.1.x86_64.rpm pssh-2.3.1-2.1.x86_64.rpm -y
##注意:当redhat集群被占用时,
[root@server4~]# chkconfig modclusterd off
[root@server4 ~]# chkconfig cman off
[root@server4 ~]# chkconfig rgmanager off
[root@server4 ~]# chkconfig iscsi off
[root@server4~]# lvmconf --disable-cluster
[root@server4 ~]# chkconfig clvmd off
[root@server4 ~]# chkconfig ricci off
Server4:
[root@server4 ~]# cd rpmbuild/
[root@server4 RPMS]# cd x86_64/
[root@server x86_64]# scp haproxy-1.6.11-1.x86_64.rpm server1:/root/
root@server1's password:
haproxy-1.6.11-1.x86_64.rpm 100% 768KB 768.2KB/s 00:00
[root@server4 ~]# cd /etc/haproxy/
[root@server4 haproxy]# scp haproxy.cfg server6:/etc/haproxy/
Server6:
[root@server6 ~]# rpm -ivh haproxy-1.6.11-1.x86_64.rpm
[root@server4 corosync]# crm
crm(live)# configure
crm(live)configure# property stonith-enabled=false
crm(live)configure# commit
crm(live)configure# primitive vip ocf:heartbeat:IPaddr2 params ip=172.25.12.100 cidr_netmask=32 op monitor interval=1min
crm(live)configure# commit
crm(live)configure# property no-quorum-policy=ignore
crm(live)configure# commit
crm(live)configure# primitive haproxy lsb:haproxy op monitor interval=1min
crm(live)configure# commit
crm(live)configure# group hagroup vip haproxy
crm(live)configure# commit
crm(live)configure# property stonith-enabled=true
crm(live)configure# primitive vmfence stonith:fence_xvm params pcmk_host_map="server6:vm6;server4:vm4" op monitor interval=1min
crm(live)configure# commit
crm(live)configure# show
node server1
node server4
node server6
primitive haproxy lsb:haproxy \
op monitor interval="1min"
primitive nginx lsb:nginx \
op monitor interval="30s"
primitive vip ocf:heartbeat:IPaddr2 \
params ip="172.25.77.100" cidr_netmask="24" \
op monitor interval="1min"
primitive vmfence stonith:fence_
group hagroup vip haproxy
group nginxgroup nginx
property $id="cib-bootstrap-options" \
dc-version="1.1.10-14.el6-368c726" \
cluster-infrastructure="cman" \
expected-quorum-votes="2" \
no-quorum-policy="ignore" \
stonith-enabled="true"