-
配置高级的yum源(server1、server2)
[root@server1 yum.repos.d]# vim rhel-source.reponame=Red Hat Enterprise Linux $releasever - $basearch - Source baseurl=http://172.25.61.250/rhel6.5 enabled=1 gpgcheck=1 gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release [LoadBalancer] name=Red Hat Enterprise Linux $releasever - $basearch - Source baseurl=http://172.25.61.250/rhel6.5/LoadBalancer enabled=1 gpgcheck=0 gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release [HighAvailability] name=Red Hat Enterprise Linux $releasever - $basearch - Source baseurl=http://172.25.61.250/rhel6.5/HighAvailability enabled=1 gpgcheck=0 gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release [ResilientStorage] name=Red Hat Enterprise Linux $releasever - $basearch - Source baseurl=http://172.25.61.250/rhel6.5/ResilientStorage enabled=1 gpgcheck=0 gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release [ScalableFileSystem] name=Red Hat Enterprise Linux $releasever - $basearch - Source baseurl=http://172.25.61.250/rhel6.5/ScalableFileSystem enabled=1 gpgcheck=0 gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
[root@server1 yum.repos.d]# yum clean all
[root@server1 yum.repos.d]# yum repolist
[root@server1 yum.repos.d]# scp rhel-source.repo server2:/etc/yum.repos.d/
-
下软件,提供crm命令行接口,配置心跳节点,同时启动
[root@server1 yum.repos.d]# yum clean all
[root@server1 yum.repos.d]# yum repolist
[root@server1 yum.repos.d]# scp rhel-source.repo server2:/etc/yum.repos.d/ -
安装软件
server1:yum install pssh-2.3.1-2.1.x86_64.rpm crmsh-1.2.6-0.rc2.2.1.x86_64.rpm -y yum install pacemaker -y cd /etc/corosync/ [root@server1 corosync]# cp corosync.conf.example corosync.conf [root@server1 corosync]# vim corosync.conf bindnetaddr: 172.25.61.0 #同网段 mcastaddr: 226.94.1.1 mcastport: 5413 ttl: 1 service { name:pacemaker ver:0 } [root@server1 corosync]# scp /etc/corosync/corosync.conf server2:/etc/corosync/ [root@server1 corosync]# scp /etc/corosync/corosync.conf server2:/etc/corosync/ ##给sever2发一份
4.server1和server2开启服务
[root@server2 corosync]# /etc/init.d/corosync start
Starting Corosync Cluster Engine (corosync): [ OK ]
[root@server1 corosync]# /etc/init.d/corosync start
Starting Corosync Cluster Engine (corosync): [ OK ]
-
设置集群资源
[root@server2 corosync]# crm crm(live)# configure crm(live)configure# show node server1 node server2 property $id="cib-bootstrap-options" \ dc-version="1.1.10-14.el6-368c726" \ cluster-infrastructure="classic openais (with plugin)" \ expected-quorum-votes="2"ww crm(live)configure# ####初始化 crm(live)configure# verify error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity Errors found during check: config not valid 01.将fence设备关闭 crm(live)configure# property stonith-enabled=false crm(live)configure# verify crm(live)configure# commit 02.先设置集群资源VIP,再设置apache服务 crm(live)configure# primitive vip ocf:heartbeat:IPaddr2 params ip=172.25.61.100 nic=eth0 cidr_netmask=24 crm(live)configure# verify crm(live)configure# commit
-
server1和server2安装http,撰写默认发布页
[root@server1 corosync]# cd /var/www/html/
[root@server1 html]# vim index.html
[root@server1 html]# cat index.html
server1 -
启动脚本
crm(live)configure# primitive apache lsb:httpd op monitor interval=10s crm(live)configure# verify crm(live)configure# commit
注意:动态监控下看到此时的VIP在server1上,httpd在server2上
-
设置资源组
crm(live)configure# group website vip apache crm(live)configure# verify crm(live)configure# commit
-
测试
[root@foundation61 Desktop]# curl 172.25.61.100 server1 [root@foundation61 Desktop]# curl 172.25.61.100 server1 [root@foundation61 Desktop]# curl 172.25.61.100 server1 [root@foundation61 Desktop]# curl 172.25.61.100 server2 [root@foundation61 Desktop]# curl 172.25.61.100 server2 [root@foundation61 Desktop]# curl 172.25.61.100 server2
用客户主机访问,server1的corosyna服务停掉之后,有访问到了server2节点