所需软件包:crmsh-1.2.6-0.rc2.2.1.x86_64.rpm pssh-2.3.1-2.1.x86_64.rpm
使用上次的虚拟机
先确定有没有其它软件在运行
【server1】
ps ax ##将cman等服务关掉
/etc/init.d/ldirectord stop chkconfig ldirectord off
/etc/init.d/keepalived status /etc/init.d/heartbeat status /etc/init.d/cman status ##没关掉的关掉
若用新的虚拟机,需要安装heartbeat、drbd、mysql-server、httpd
yum install -y pacemaker ##【server1/server2】
rpm -q corosync cd /etc/corosync/ cp corosync.conf.example corosync.conf
vim corosync.conf
修改totem段的 interface 段的bindnetaddr为自己的网段,修改mcastaddr或mcastport不要和他人冲突
并在最后添加以下内容:
service {
name:pacemaker
ver:0
}
当是0时,会自动启动pacemaker后台进程,通过组件调动,而不是脚本
scp corosync.conf root@172.25.45.2:/etc/corosync
/etc/init.d/corosync start tail -f /var/log/messages ##无报错则在server2也打开corosync服务
crm_verify -L
crm_verify -LV
此处报错是fence的原因。
cd yum install crmsh-1.2.6-0.rc2.2.1.x86_64.rpm pssh-2.3.1-2.1.x86_64.rpm -y
##在server2上运行命令:crm_mon 进行监控
[root@server1 ~]# crm
crm(live)# status
crm(live)# configure
crm(live)configure# show
crm(live)configure# property stonith-enabled=false
crm(live)configure# show
crm(live)configure# commit
crm(live)configure# quit
也可以用一条命令进行查看
crm configure show
crm_verify -L crm_verify -LV ##不报错
另一种方法:可以通过加入fence解决
crm(live)configure# primitive vmfence stonith:fence_xvm params pcmk_host_map="server1.example.com:new1;server2.example.com:new2" op monitor interval=1min
[root@server1 ~]# crm
crm(live)# configure
crm(live)configure# primitive vip ocf:heartbeat:IPaddr2 params ip=172.25.45.100 cidr_netmask=32 op monitor interval=30s
crm(live)configure# show
crm(live)configure# commit
crm(live)configure# bye
测试:
/etc/init.d/corosync stop
不能切换
/etc/init.d/corosync start
[root@server1 ~]# crm
crm(live)# configure
crm(live)configure# property no-quorum-policy=ignore
crm(live)configure# show
crm(live)configure# commit
crm(live)configure# bye
测试:
/etc/init.d/corosync stop
vip切换到server2上
/etc/init.d/corosync start ##不切回
vim /etc/httpd/conf/httpd.conf
取消921到926的注释,将 Allow from 的值改为127.0.0.1
scp /etc/httpd/conf/httpd.conf 172.25.45.2:/etc/httpd/conf/
[root@server1 ~]# crm
crm(live)# configure
crm(live)configure# primitive website ocf:heartbeat:apache params configfile=/etc/httpd/conf/httpd.conf op monitor interval=60s
crm(live)configure# show
crm(live)configure# collocation website-with-vip inf: website vip
crm(live)configure# commit
website和vip绑定在一个节点
crm(live)configure# delete website-with-vip
crm(live)configure# commit ##website切回server1
crm(live)configure# group apache vip website
crm(live)configure# commit
crm(live)configure# show
crm(live)configure# cd
crm(live)# node
crm(live)node# standby server2.example.com
crm(live)node# online server2.example.com
不切回server2
cat /proc/drbd drbdadm primary sqldata
mount /dev/drbd1 /var/lib/mysql/ df -h
/etc/init.d/mysqld start ##可以成功启动mysql
/etc/init.d/mysqld stop unmount /var/lib/mysql/ drbdadm secondary sqldata
[root@server1 ~]# crm
crm(live)# resource
crm(live)resource# show
crm(live)resource# stop apache
crm(live)resource# show
crm(live)resource# cd
crm(live)# configure
crm(live)configure# show
crm(live)configure# delete apache
crm(live)configure# delete website
crm(live)configure# show
crm(live)configure# commit
crm(live)configure# Ctrl-C, leaving
这里以新的虚拟机环境为例,若已有相关环境,则有些步骤可省略
【server1/server2】
stonith_admin -I ##检测fence
yum provides */fence_xvm
yum install -y fence-virt-0.2.3-15.el6.x86_64 stonith_admin -I
stonith_admin -M -a fence_xvm
mkdir /etc/cluster/
【物理机】
systemctl start fence_virtd systemctl status fence_virtd
netstat -anulp | grep :1229
virsh list
scp fence_xvm.key root@172.25.45.1:/etc/cluster/ scp fence_xvm.key root@172.25.45.2:/etc/cluster/
【server1】
[root@server1 ~]# crm
crm(live)# configure
crm(live)configure# property stonith-enabled=true
crm(live)configure# commit
crm(live)configure# primitive vmfence stonith:fence_xvm params pcmk_host_map="server1.example.com:new1;server2.example.com:new2" op monitor interval=1min
crm(live)configure# commit
crm(live)configure# bye
ip addr show
ip addr del 172.25.45.100/32 dev eth0
/etc/init.d/network stop
server1断电重启,vip切换到server2上
/etc/init.d/corosync start
vmfence切换到server1上
注意:若crm_mon上已出现报错,即是修改后也不更新,可通过cleanup手动更新
[root@server1 ~]# crm
crm(live)# configure
crm(live)configure# primitive sqldata ocf:linbit:drbd params drbd_resource=sqldata op monitor interval=60s
crm(live)configure# ms sqldataclone sqldata meta master-max=1 master-node-max=1 clone-max=2 clone-node-max=1 notify=true
crm(live)configure# commit
crm(live)configure# bye
cat /proc/drbd
[root@server1 ~]# crm
crm(live)# configure
crm(live)configure# primitive sqlfs ocf:heartbeat:Filesystem params device=/dev/drbd1 directory=/var/lib/mysql fstype=ext4
crm(live)configure# colocation sqlfs_on_drbd inf: sqlfs sqldataclone:Master
crm(live)configure# order sqlfs-after-sqldata inf: sqldataclone:promote sqlfs:start
crm(live)configure# commit
crm(live)configure# primitive mysql lsb:mysqld op monitor interval=30s
crm(live)configure# group mygroup vip sqlfs mysql
crm(live)configure# show
crm(live)configure# commit
crm(live)configure# cd
crm(live)# node
crm(live)node# standby
crm(live)node# online
【server2】
/etc/init.d/mysqld stop
健康检查,自动修复
/etc/init.d/network stop
server2挂掉重启
/etc/init.d/corosync start
转载于:https://blog.51cto.com/wjl19940429/1831431