Pacemaker+Haproxy实现高可用和负载均衡

  1. 配置高级的yum源(server1、server2)
    [root@server1 yum.repos.d]# vim rhel-source.repo

     name=Red Hat Enterprise Linux $releasever - $basearch - Source
     baseurl=http://172.25.61.250/rhel6.5
     enabled=1
     gpgcheck=1
     gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
     
     [LoadBalancer]
     name=Red Hat Enterprise Linux $releasever - $basearch - Source
     baseurl=http://172.25.61.250/rhel6.5/LoadBalancer
     enabled=1
     gpgcheck=0
     gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
     
     [HighAvailability]
     name=Red Hat Enterprise Linux $releasever - $basearch - Source
     baseurl=http://172.25.61.250/rhel6.5/HighAvailability
     enabled=1
     gpgcheck=0
     gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
     
     [ResilientStorage]
     name=Red Hat Enterprise Linux $releasever - $basearch - Source
     baseurl=http://172.25.61.250/rhel6.5/ResilientStorage
     enabled=1
     gpgcheck=0
     gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
     
     [ScalableFileSystem]
     name=Red Hat Enterprise Linux $releasever - $basearch - Source
     baseurl=http://172.25.61.250/rhel6.5/ScalableFileSystem
     enabled=1
     gpgcheck=0
     gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
    

[root@server1 yum.repos.d]# yum clean all
[root@server1 yum.repos.d]# yum repolist
[root@server1 yum.repos.d]# scp rhel-source.repo server2:/etc/yum.repos.d/

  1. 下软件,提供crm命令行接口,配置心跳节点,同时启动
    [root@server1 yum.repos.d]# yum clean all
    [root@server1 yum.repos.d]# yum repolist
    [root@server1 yum.repos.d]# scp rhel-source.repo server2:/etc/yum.repos.d/

  2. 安装软件
    server1:

     yum install pssh-2.3.1-2.1.x86_64.rpm  crmsh-1.2.6-0.rc2.2.1.x86_64.rpm -y
     yum install pacemaker -y
      cd /etc/corosync/
      [root@server1 corosync]# cp corosync.conf.example corosync.conf
     [root@server1 corosync]# vim corosync.conf
                       bindnetaddr: 172.25.61.0	#同网段
                       mcastaddr: 226.94.1.1	
                       mcastport: 5413		
                       ttl: 1
       service {
               name:pacemaker
               ver:0
       }
     
     [root@server1 corosync]# scp /etc/corosync/corosync.conf server2:/etc/corosync/
     
     [root@server1 corosync]# scp /etc/corosync/corosync.conf server2:/etc/corosync/
     ##给sever2发一份
    

4.server1和server2开启服务

	[root@server2 corosync]# /etc/init.d/corosync start
	Starting Corosync Cluster Engine (corosync): [ OK ]
	[root@server1 corosync]# /etc/init.d/corosync start
	Starting Corosync Cluster Engine (corosync): [ OK ]
  1. 设置集群资源

     [root@server2 corosync]# crm
     crm(live)# configure
     crm(live)configure# show
     node server1
     node server2
     property $id="cib-bootstrap-options" \
     	dc-version="1.1.10-14.el6-368c726" \
     	cluster-infrastructure="classic openais (with plugin)" \
     	expected-quorum-votes="2"ww
     crm(live)configure# 				####初始化
     
     crm(live)configure# verify 
        error: unpack_resources: 	Resource start-up disabled since no STONITH resources have been defined
        error: unpack_resources: 	Either configure some or disable STONITH with the stonith-enabled option
        error: unpack_resources: 	NOTE: Clusters with shared data need STONITH to ensure data integrity
     Errors found during check: config not valid
     
     01.将fence设备关闭
     crm(live)configure# property stonith-enabled=false
     crm(live)configure# verify
     crm(live)configure# commit
     
     02.先设置集群资源VIP,再设置apache服务
     crm(live)configure# primitive vip ocf:heartbeat:IPaddr2 params ip=172.25.61.100 nic=eth0 cidr_netmask=24
     crm(live)configure# verify 
     crm(live)configure# commit 
    
  2. server1和server2安装http,撰写默认发布页

    [root@server1 corosync]# cd /var/www/html/
    [root@server1 html]# vim index.html
    [root@server1 html]# cat index.html
    server1

  3. 启动脚本

     crm(live)configure# primitive apache lsb:httpd op monitor interval=10s
     crm(live)configure# verify 
     crm(live)configure# commit 
    

注意:动态监控下看到此时的VIP在server1上,httpd在server2上

  1. 设置资源组

     crm(live)configure# group website vip apache 
     crm(live)configure# verify 
     crm(live)configure# commit 
    
  2. 测试

     [root@foundation61 Desktop]# curl 172.25.61.100
     server1
     [root@foundation61 Desktop]# curl 172.25.61.100
     server1
     [root@foundation61 Desktop]# curl 172.25.61.100
     server1
     [root@foundation61 Desktop]# curl 172.25.61.100
     server2
     [root@foundation61 Desktop]# curl 172.25.61.100
     server2
     [root@foundation61 Desktop]# curl 172.25.61.100
     server2
    

用客户主机访问,server1的corosyna服务停掉之后,有访问到了server2节点

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值