安装Pacemaker和crosync在Server2|Server3上
[root@server2 ~]#yum install -y pacemaker corosync -y
[root@server2 ~]#ls
crmsh-1.2.6-0.rc2.2.1.x86_64.rpm pssh-2.3.1-2.1.x86_64.rpm
[root@server2 ~]#yum install * -y
[root@server2 ~]# cd /etc/corosync/
[root@server2 corosync]# ls
corosync.conf.example corosync.conf.example.udpu service.d uidgid.d
[root@server2 corosync]# cp corosync.conf.example corosync.conf
[root@server2 corosync]# vim corosync.conf
.....
# Please read the corosync.conf.5 manual page
compatibility: whitetank
totem {
version: 2
secauth: off
threads: 0
interface {
ringnumber: 0
bindnetaddr: 172.25.66.0 #修改为自己的网段
mcastaddr: 226.94.1.1
mcastport: 6005 #保证RS两端端口一致
ttl: 1
}
}
logging {
fileline: off
to_stderr: no
to_logfile: yes
to_syslog: yes
logfile: /var/log/cluster/corosync.log
debug: off
timestamp: on
logger_subsys {
subsys: AMF
debug: off
}
}
amf {
mode: disabled
}
service{ #设定pacemeker以插件的方式工作,corosync开启时,自动打开pacemaker
ver:0
name:pacemaker
}
....
[root@server2 corosync]# /etc/init.d/corosync start
Starting Corosync Cluster Engine (corosync): [ OK ]
**在Server3中同步认证
[root@server2 corosync]# scp corosync.conf root@172.25.66.3:/etc/corosync/
root@172.25.66.3's password:
corosync.conf 100% 479 0.5KB/s 00:00
[root@server2 corosync]# crm status #查看状态
Last updated: Sun Oct 1 10:07:11 2017
Last change: Sun Oct 1 10:03:28 2017 via crmd on server2
Stack: classic openais (with plugin)
Current DC: server2 - partition with quorum
Version: 1.1.10-14.el6-368c726
2 Nodes configured, 2 expected votes
0 Resources configured
Online: [ server2 server3 ]
[root@server2 corosync]# crm_mon #监控
[root@server2 corosync]#
添加fence机制
[root@foundation66 Desktop]# systemctl status fence_virtd.service
[root@server2 corosync]# crm
crm(live)# configure
crm(live)configure# show
node server2
node server3
property $id="cib-bootstrap-options" \
dc-version="1.1.10-14.el6-368c726" \
cluster-infrastructure="classic openais (with plugin)" \
expected-quorum-votes="2"
crm(live)configure# bye
bye
[root@server2 corosync]# stonith_admin -I
fence_pcmk
fence_legacy
2 devices found
[root@server2 corosync]# yum install fence-virt -y #在server3上也同时安装
[root@server2 corosync]# stonith_admin -I
fence_xvm
fence_virt
fence_pcmk
fence_legacy
4 devices found
[root@server2 corosync]# crm configure show
node server2
node server3
property $id="cib-bootstrap-options" \
dc-version="1.1.10-14.el6-368c726" \
cluster-infrastructure="classic openais (with plugin)" \
expected-quorum-votes="2"
[root@server2 ~]# crm
crm(live)# configure
crm(live)configure# show
node server2
node server3
property $id="cib-bootstrap-options" \
dc-version="1.1.10-14.el6-368c726" \
cluster-infrastructure="classic openais (with plugin)" \
expected-quorum-votes="2"
crm(live)configure# primitive fence stonith:fence_xvm params pcmk_host_map="server2:swever2;server3:server3" op monitor interval=lmin
crm(live)configure# commit
crm(live)configure# show
node server2
node server3
primitive fence stonith:fence_xvm \
params pcmk_host_map="server2:swever2;server3:server3" \
op monitor interval="lmin"
property $id="cib-bootstrap-options" \
dc-version="1.1.10-14.el6-368c726" \
cluster-infrastructure="classic openais (with plugin)" \
expected-quorum-votes="2"
crm(live)configure# bye
bye
[root@server2 ~]# mkdie /etc/cluster #在server3上也建立同样的目录
物理机发送key
[root@foundation66 Desktop]# systemctl start fence_virtd.service
[root@foundation66 Desktop]# systemctl status fence_virtd.service
[root@foundation66 Desktop]# scp /etc/cluster/fence_xvm.key root@172.25.66.2:/etc/cluster/
root@172.25.66.2's password:
fence_xvm.key 100% 128 0.1KB/s 00:00
[root@foundation66 Desktop]# scp /etc/cluster/fence_xvm.key root@172.25.66.3:/etc/cluster/
root@172.25.66.3's password:
fence_xvm.key 100% 128 0.1KB/s 00:00
[root@foundation66 Desktop]#
添加fence
[root@server2 cluster]# crm
crm(live)# resource
crm(live)resource# show
fence (stonith:fence_xvm): Stopped
crm(live)resource# refresh
Waiting for 1 replies from the CRMd. OK
crm(live)resource# start vmfence
ERROR: resource vmfence does not exist
crm(live)# configure
crm(live)configure# show
node server2
node server3
primitive fence stonith:fence_xvm \
params pcmk_host_map="server2:swever2;server3:server3" \
op monitor interval="lmin"
property $id="cib-bootstrap-options" \
dc-version="1.1.10-14.el6-368c726" \
cluster-infrastructure="classic openais (with plugin)" \
expected-quorum-votes="2"
crm(live)configure# cd
crm(live)# resource
crm(live)resource# refresh
Waiting for 1 replies from the CRMd. OK
crm(live)resource# show
fence (stonith:fence_xvm): Stopped
crm(live)resource# start fence
crm(live)resource# show
fence (stonith:fence_xvm): Stopped
crm(live)resource# bye
bye
[root@server2 cluster]#
**在另外一台RS上crm_mon查看状态
添加vip资源
[root@server2 cluster]# crm
crm(live)# configure
crm(live)configure# primitive vip ocf:heartbeat:IPaddr2 params ip=172.25.66.100 cidr_netmask=24 op monitor interval=1min
crm(live)configure# commit
crm(live)configure# bye
bye
crm(live)configure# property no-quorum-policy=ignore #节点数过少时不监测
crm(live)configure# commit
crm(live)configure# show
node server2
node server3
primitive fence stonith:fence_xvm \
params pcmk_host_map="server2:swever2;server3:server3" \
op monitor interval="lmin" \
meta target-role="Started"
primitive vip ocf:heartbeat:IPaddr2 \
params ip="172.25.66.100" cidr_netmask="24" \
op monitor interval="1min"
property $id="cib-bootstrap-options" \
dc-version="1.1.10-14.el6-368c726" \
cluster-infrastructure="classic openais (with plugin)" \
expected-quorum-votes="2" \
no-quorum-policy="ignore"
crm(live)configure# cd
crm(live)# resource
crm(live)resource# cleanup vmfence
Error performing operation: No such device
crm(live)resource# cleanup fence #清除
Cleaning up fence on server2
Cleaning up fence on server3
Waiting for 1 replies from the CRMd. OK
crm(live)resource# bye
bye
[root@server2 cluster]#
监控状态crm_mon
Last updated: Sun Oct 1 11:09:35 2017
Last change: Sun Oct 1 11:09:26 2017 via crmd on server3
Stack: classic openais (with plugin)
Current DC: server2 - partition with quorum
Version: 1.1.10-14.el6-368c726
2 Nodes configured, 2 expected votes
2 Resources configured
Online: [ server2 server3 ]
fence (stonith:fence_xvm): Started server2
vip (ocf::heartbeat:IPaddr2): Started server3
节点迁移|server3状态为off
[root@server3 ~]# ip addr show
inet 172.25.66.100/24 brd 172.25.66.255 scope global secondary eth1
#测试:
[root@server3 ~]# /etc/init.d/corosync stop
Signaling Corosync Cluster Engine (corosync) to terminate: [ OK ]
Waiting for corosync services to unload:. [ OK ]
[root@server3 ~]#
[root@server2 ~]# ip addr
.....
inet 172.25.66.100/24 brd 172.25.66.255 scope global secondary eth1
.....
[root@server2 ~]#
集群的资源不要手动开启
添加Apache服务
.....
921 <Location /server-status>
922 SetHandler server-status
923 Order deny,allow
924 Deny from all
925 Allow from 172.0.0.1
926 </Location>
.....