前言
采用的系统都为redhat liunx5.4企业版
Ip地址规划为
Node1.hf.com            192.168.100.10/24
Node2.hf.com            192.168.100.20/24
虚拟ip地址为          192.168.100.100/24

 

一 配置前准备
Node1上配置

 

 

配置network
[root@node1 ~]# cat /etc/sysconfig/network
NETWORKING=yes
NETWORKING_IPV6=no
HOSTNAME=node1.hf.com
[root@node1 ~]# cat /etc/yum.repos.d/rhel-debuginfo.repo
[rhel-Server]
name=Red Hat Enterprise Linux Server
baseurl=file:///mnt/cdrom/Server
enabled=1
gpgcheck=1
gpgkey=file:///mnt/cdrom/RPM-GPG-KEY-redhat-release
[rhel-Cluster] 做群集需要用到的仓库
name=Red Hat Enterprise Linux Cluster
baseurl=file:///mnt/cdrom/Cluster
enabled=1
gpgcheck=1
gpgkey=file:///mnt/cdrom/RPM-GPG-KEY-redhat-release
[rhel-ClusterStorage] 做群集文件系统需要用到的仓库
name=Red Hat Enterprise Linux ClusterStorage
baseurl=file:///mnt/cdrom/ClusterStorage
enabled=1
gpgcheck=1
gpgkey=file:///mnt/cdrom/RPM-GPG-KEY-redhat-release
修改host文件
[root@node1 ~]# cat /etc/hosts
# Do not remove the following line, or various programs
# that require network functionality will fail.
127.0.0.1                localhost.localdomain localhost
::1              localhost6.localdomain6 localhost6
192.168.100.10 node1.hf.com
192.168.100.20 node2.hf.com
[root@node1 ~]#
 
 
 
 
 
 
Node2上配置

 

 

 

修改network
[root@node2 ~]# cat /etc/sysconfig/network
NETWORKING=yes
NETWORKING_IPV6=no
HOSTNAME=node2.hf.com
配置yum
[root@node2 ~]# cat /etc/yum.repos.d/rhel-debuginfo.repo
[rhel-Server]
name=Red Hat Enterprise Linux Server
baseurl=file:///mnt/cdrom/Server
enabled=1
gpgcheck=1
gpgkey=file:///mnt/cdrom/RPM-GPG-KEY-redhat-release
[rhel-Cluster] 做群集需要用到的仓库
name=Red Hat Enterprise Linux Cluster
baseurl=file:///mnt/cdrom/Cluster
enabled=1
gpgcheck=1
gpgkey=file:///mnt/cdrom/RPM-GPG-KEY-redhat-release
[rhel-ClusterStorage] 做群集文件系统需要用到的仓库
name=Red Hat Enterprise Linux ClusterStorage
baseurl=file:///mnt/cdrom/ClusterStorage
enabled=1
gpgcheck=1
gpgkey=file:///mnt/cdrom/RPM-GPG-KEY-redhat-release
配置hosts文件写入节点
[root@node2 ~]# cat /etc/hosts
# Do not remove the following line, or various programs
# that require network functionality will fail.
127.0.0.1                localhost.localdomain localhost
::1              localhost6.localdomain6 localhost6
192.168.100.10 node1.hf.com
192.168.100.20 node2.hf.com
[root@node2 ~]#
二  实现无密码通信
Node1上

 

 

 

[root@node1 ~]# ssh-copy-id -i .ssh/id_rsa.pub node2.hf.com 拷贝到 node1 节点
Node2上

 

 

 

[root@node2 ~]# ssh-copy-id -i .ssh/id_rsa.pub node1.hf.com 拷贝到 node2 节点
三 上传rpm包
Node1上
[root@node1 ~]# ll
total 162336
drwxr-xr-x 2 root root       4096 Mar 24 22:55 Desktop
-rw------- 1 root root       1284 Mar 25 06:50 anaconda-ks.cfg
-rw-r--r-- 1 root root     271360 Apr 23 14:21 cluster-glue-1.0.6-1.6.el5.i386.rpm
-rw-r--r-- 1 root root     133254 Apr 23 14:21 cluster-glue-libs-1.0.6-1.6.el5.i386.rpm 为了在群集中增加对更多节点的支持
-rw-r--r-- 1 root root     170052 Apr 23 14:21 corosync-1.2.7-1.1.el5.i386.rpm
-rw-r--r-- 1 root root     158502 Apr 23 14:21 corosynclib-1.2.7-1.1.el5.i386.rpm corosync的主配置文件
-rw-r--r-- 1 root root     221868 May 7 11:12 drbd83-8.3.8-1.el5.centos.i386.rpm corosync 的库文件
-rw-r--r-- 1 root root     165591 Apr 23 14:21 heartbeat-3.0.3-2.3.el5.i386.rpm heartbeat 在这里是做四层的资源代理用的

-rw-r--r-- 1 root root     289600 Apr 23 14:21 heartbeat-libs-3.0.3-2.3.el5.i386.rpm heartbeat 的库文件
-rw-r--r-- 1 root root      35768 Mar 25 06:49 install.log
-rw-r--r-- 1 root root       4713 Mar 25 06:49 install.log.syslog
-rw-r--r-- 1 root root     125974 May 7 11:12 kmod-drbd83-8.3.8-1.el5.centos.i686.rpm
-rw-r--r-- 1 root root      60458 Apr 23 14:21 libesmtp-1.0.4-5.el5.i386.rpm
-rw-r--r-- 1 root root 162247449 Jul 15 2011 mysql-5.5.15-linux2.6-i686.tar.gz
-rw-r--r-- 1 root root     207085 Apr 23 14:21 openais-1.1.3-1.6.el5.i386.rpm 做丰富 pacemake 的内容使用
-rw-r--r-- 1 root root      94614 Apr 23 14:21 openaislib-1.1.3-1.6.el5.i386.rpm
-rw-r--r-- 1 root root     796813 Apr 23 14:21 pacemaker-1.1.5-1.1.el5.i386.rpm pacemake的主配置文档
-rw-r--r-- 1 root root     207925 Apr 23 14:21 pacemaker-cts-1.1.5-1.1.el5.i386.rpm
-rw-r--r-- 1 root root     332026 Apr 23 14:21 pacemaker-libs-1.1.5-1.1.el5.i386.rpm
-rw-r--r-- 1 root root      32818 Apr 23 14:21 perl-TimeDate-1.16-5.el5.noarch.rpm
-rw-r--r-- 1 root root     388632 Apr 23 14:21 resource-agents-1.0.4-1.1.el5.i386.rpm
[root@node1 ~]#
Node2上
[root@node2 ~]# ll
total 162336
drwxr-xr-x 2 root root       4096 Mar 24 22:55 Desktop
-rw------- 1 root root       1284 Mar 25 06:50 anaconda-ks.cfg
-rw-r--r-- 1 root root     271360 Apr 23 14:21 cluster-glue-1.0.6-1.6.el5.i386.rpm
-rw-r--r-- 1 root root     133254 Apr 23 14:21 cluster-glue-libs-1.0.6-1.6.el5.i386.rpm
-rw-r--r-- 1 root root     170052 Apr 23 14:21 corosync-1.2.7-1.1.el5.i386.rpm
-rw-r--r-- 1 root root     158502 Apr 23 14:21 corosynclib-1.2.7-1.1.el5.i386.rpm
-rw-r--r-- 1 root root     221868 May 7 11:12 drbd83-8.3.8-1.el5.centos.i386.rpm
-rw-r--r-- 1 root root     165591 Apr 23 14:21 heartbeat-3.0.3-2.3.el5.i386.rpm
-rw-r--r-- 1 root root     289600 Apr 23 14:21 heartbeat-libs-3.0.3-2.3.el5.i386.rpm
-rw-r--r-- 1 root root      35768 Mar 25 06:49 install.log
-rw-r--r-- 1 root root       4713 Mar 25 06:49 install.log.syslog
-rw-r--r-- 1 root root     125974 May 7 11:12 kmod-drbd83-8.3.8-1.el5.centos.i686.rpm
-rw-r--r-- 1 root root      60458 Apr 23 14:21 libesmtp-1.0.4-5.el5.i386.rpm
-rw-r--r-- 1 root root 162247449 Jul 15 2011 mysql-5.5.15-linux2.6-i686.tar.gz
-rw-r--r-- 1 root root     207085 Apr 23 14:21 openais-1.1.3-1.6.el5.i386.rpm
-rw-r--r-- 1 root root      94614 Apr 23 14:21 openaislib-1.1.3-1.6.el5.i386.rpm
-rw-r--r-- 1 root root     796813 Apr 23 14:21 pacemaker-1.1.5-1.1.el5.i386.rpm
-rw-r--r-- 1 root root     207925 Apr 23 14:21 pacemaker-cts-1.1.5-1.1.el5.i386.rpm
-rw-r--r-- 1 root root     332026 Apr 23 14:21 pacemaker-libs-1.1.5-1.1.el5.i386.rpm
-rw-r--r-- 1 root root      32818 Apr 23 14:21 perl-TimeDate-1.16-5.el5.noarch.rpm
-rw-r--r-- 1 root root     388632 Apr 23 14:21 resource-agents-1.0.4-1.1.el5.i386.rpm
[root@node1 ~]#
四 安装服务包
[root@node1 ~]#yum localinstall *.rpm–nogpgcheck –y    //安装所有服务包
[root@node2 ~]#yum localinstall *.rpm-nogpgcheck –y       //安装所有服务包
五 节点上增加 相同的设备
Node1上
[root@node1 ~]# fdisk -l
 
Disk /dev/sda: 21.4 GB, 21474836480 bytes
255 heads, 63 sectors/track, 2610 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
 
   Device Boot      Start         End      Blocks   Id System
/dev/sda1    *           1          13      104391   83 Linux
/dev/sda2               14        1288    10241437+ 83 Linux
/dev/sda3             1289        1415     1020127+ 82 Linux swap / Solaris
[root@node1 ~]# fdisk /dev/sda
 
The number of cylinders for this disk is set to 2610.
There is nothing wrong with that, but this is larger than 1024,
and could in certain setups cause problems with:
1) software that runs at boot time (e.g., old versions of LILO)
2) booting and partitioning software from other OSs
   (e.g., DOS FDISK, OS/2 FDISK)
 
Command (m for help): n
Command action
   e   extended
   p   primary partition (1-4)
p
Selected partition 4
First cylinder (1416-2610, default 1416):
Using default value 1416
Last cylinder or +size or +sizeM or +sizeK (1416-2610, default 2610): +1G 
 
Command (m for help): p
 
Disk /dev/sda: 21.4 GB, 21474836480 bytes
255 heads, 63 sectors/track, 2610 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
 
   Device Boot      Start         End      Blocks   Id System
/dev/sda1    *           1          13      104391   83 Linux
/dev/sda2               14        1288    10241437+ 83 Linux
/dev/sda3             1289        1415     1020127+ 82 Linux swap / Solaris
/dev/sda4             1416        1538      987997+ 83 Linux
 
Command (m for help): w
The partition table has been altered!
 
Calling ioctl() to re-read partition table.
 
WARNING: Re-reading the partition table failed with error 16: Device or resource busy.
The kernel still uses the old table.
The new table will be used at the next reboot.
Syncing disks.
[root@node1 ~]# partprobe /dev/sda
[root@node1 ~]# cat /proc/partitions
major minor #blocks name
 
   8     0   20971520 sda
   8     1     104391 sda1
   8     2   10241437 sda2
   8     3    1020127 sda3
   8     4     987997 sda4
[root@node1 ~]#
Node2上
[root@node2 ~]# fdisk -l
 
Disk /dev/sda: 21.4 GB, 21474836480 bytes
255 heads, 63 sectors/track, 2610 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
 
   Device Boot      Start         End      Blocks   Id System
/dev/sda1    *           1          13      104391   83 Linux
/dev/sda2               14        1318    10482412+ 83 Linux
/dev/sda3             1319        1383      522112+ 82 Linux swap / Solaris
[root@node2 ~]# fdisk /dev/sda
 
The number of cylinders for this disk is set to 2610.
There is nothing wrong with that, but this is larger than 1024,
and could in certain setups cause problems with:
1) software that runs at boot time (e.g., old versions of LILO)
2) booting and partitioning software from other OSs
   (e.g., DOS FDISK, OS/2 FDISK)
 
Command (m for help): p
 
Disk /dev/sda: 21.4 GB, 21474836480 bytes
255 heads, 63 sectors/track, 2610 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
 
   Device Boot      Start         End      Blocks   Id System
/dev/sda1    *           1          13      104391   83 Linux
/dev/sda2               14        1318    10482412+ 83 Linux
/dev/sda3             1319        1383      522112+ 82 Linux swap / Solaris
 
Command (m for help): n
Command action
   e   extended
   p   primary partition (1-4)
p
Selected partition 4
First cylinder (1384-2610, default 1384):
Using default value 1384
Last cylinder or +size or +sizeM or +sizeK (1384-2610, default 2610): +1G
 
Command (m for help): w
The partition table has been altered!
 
Calling ioctl() to re-read partition table.
 
WARNING: Re-reading the partition table failed with error 16:
The kernel still uses the old table.
The new table will be used at the next reboot.
Syncing disks.
[root@node2 ~]# fdisk -l
 
Disk /dev/sda: 21.4 GB, 21474836480 bytes
255 heads, 63 sectors/track, 2610 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
 
   Device Boot      Start         End      Blocks   Id System
/dev/sda1    *           1          13      104391   83 Linux
/dev/sda2               14        1318    10482412+ 83 Linux
/dev/sda3             1319        1383      522112+ 82 Linux swap / Solaris
/dev/sda4             1384        1506      987997+ 83 Linux
[root@node2 ~]# partprobe /dev/sda
[root@node2 ~]# cat /proc/partitions
major minor #blocks name
 
   8     0   20971520 sda
   8     1     104391 sda1
   8     2   10482412 sda2
   8     3     522112 sda3
   8     4     987997 sda4
[root@node2 ~]#
六 配置 drbd
Node1上
[root@node1 ~]# cp /usr/share/doc/drbd83-8.3.8/drbd.conf /etc/
[root@node1 ~]# cd /etc/drbd.d/
[root@node1 drbd.d]# cp global_common.conf global_common.conf.bak
[root@node1 drbd.d]# cat global_common.conf
global {
        usage-count no; 
}
common {
        protocol C;
        handlers {
                pri-on-incon-degr "/usr/lib/drbd/notify-pri-on-incon-degr.sh; /usr/lib/drbd/notify-emergency-reboot.sh; echo b > /proc/sysrq-trigger ; reboot -f";
                pri-lost-after-sb "/usr/lib/drbd/notify-pri-lost-after-sb.sh; /usr/lib/drbd/notify-emergency-reboot.sh; echo b > /proc/sysrq-trigger ; reboot -f";
                local-io-error "/usr/lib/drbd/notify-io-error.sh; /usr/lib/drbd/notify-emergency-shutdown.sh; echo o > /proc/sysrq-trigger ; halt -f";
        }
        startup {
                wfc-timeout 120;  
                degr-wfc-timeout 120;  
}
        disk {
                on-io-error detach; 
        }
        net {
cram-hmac-alg "sha1";   
shared-secret "mydrbdlab";
        }
        syncer {
                rate 100M; 
        }
}
 
[root@node1 drbd.d]#
[root@node1 drbd.d]#
[root@node1 drbd.d]# vim /etc/drbd.d/mysql.res
resource mysql {
 on node1.hf.com {
    device    /dev/drbd0;
    disk      /dev/sda4;
    address   192.168.100.10:7789;
    meta-disk internal;
 }
 on node2.hf.com {
    device    /dev/drbd0;
    disk      /dev/sda4;
    address   192.168.100.20:7789;
    meta-disk internal;
 }
}
Node2上配置
[root@node1 drbd.d]# scp -r /etc/drbd.* node2.hf.com:/etc/
drbd.conf                                      100% 133     0.1KB/s   00:00   
global_common.conf                             100% 1547     1.5KB/s   00:00   
global_common.conf.bak                         100% 1418     1.4KB/s   00:00   
mysql.res                                      100% 288     0.3KB/s   00:00   
[root@node1 drbd.d]#
Node1上初始化定义的mysql的资源
[root@node1 drbd.d]# drbdadm create-md mysql
Writing meta data...
initializing activity log
NOT initialized bitmap
New drbd meta data block successfully created.
[root@node1 drbd.d]#
Node2上初始化定义的mysql的资源
[root@node2 ~]# drbdadm create-md mysql
Writing meta data...
initializing activity log
NOT initialized bitmap
New drbd meta data block successfully created.
[root@node2 ~]#
启动服务
[root@node1 drbd.d]# service drbd start
Starting DRBD resources: [
mysql
Found valid meta data in the expected location, 1011703808 bytes into /dev/sda4.
d(mysql) s(mysql) n(mysql) ]..........
***************************************************************
 DRBD's startup script waits for the peer node(s) to appear.
 - In case this node was already a degraded cluster before the
   reboot the timeout is 120 seconds. [degr-wfc-timeout]
 - If the peer was available before the reboot the timeout will
   expire after 120 seconds. [wfc-timeout]
   (These values are for resource 'mysql'; 0 sec -> wait forever)
 To abort waiting enter 'yes' [ 28]:
.
[root@node1 drbd.d]#
 
[root@node2 ~]# service drbd start
Starting DRBD resources: [
mysql
Found valid meta data in the expected location, 1011703808 bytes into /dev/sda4.
d(mysql) s(mysql) n(mysql) ].
[root@node2 ~]#
 
查看状态
Node上
[root@node1 drbd.d]# drbd-overview
 0:mysql Connected Secondary/Secondary Inconsistent/Inconsistent C r----
[root@node1 drbd.d]#
 
Node2上
[root@node2 ~]# drbd-overview
 0:mysql Connected Secondary/Secondary Inconsistent/Inconsistent C r----
[root@node2 ~]#
设置主节点 并查看
Node1上
[root@node1 drbd.d]# drbdadm -- --overwrite-data-of-peer primary mysql
[root@node1 drbd.d]# drbd-overview
 0:mysql SyncSource Primary/Secondary UpToDate/Inconsistent C r----
        [>....................] sync'ed: 3.4% (961816/987928)K delay_probe: 1
[root@node1 drbd.d]#
Node2上
[root@node2 ~]# drbd-overview
 0:mysql SyncTarget Secondary/Primary Inconsistent/UpToDate C r----
        [================>...] sync'ed: 88.9% (113368/987928)K queue_delay: 1.0 ms
[root@node2 ~]#
 
查看同步过程
命令为watch –n 1 ‘cat /proc/drbd’

 

 

 

 

创建文件系统
Node1上
[root@node1 drbd.d]# mkfs -t ext3 /dev/drbd0                                   
mke2fs 1.39 (29-May-2006)
Filesystem label=
OS type: Linux
Block size=4096 (log=2)
Fragment size=4096 (log=2)
123648 inodes, 246982 blocks
12349 blocks (5.00%) reserved for the super user
First data block=0
Maximum filesystem blocks=255852544
8 block groups
32768 blocks per group, 32768 fragments per group
15456 inodes per group
Superblock backups stored on blocks:
        32768, 98304, 163840, 229376
 
Writing inode tables: done                            
Creating journal (4096 blocks): done
Writing superblocks and filesystem accounting information: done
 
This filesystem will be automatically checked every 33 mounts or
180 days, whichever comes first. Use tune2fs -c or -i to override.
[root@node1 drbd.d]# mkdir /mysqldata           
[root@node1 drbd.d]# mount /dev/drbd0 /mysqldata/
[root@node1 drbd.d]# cd /mysqldata/
[root@node1 mysqldata]# touch f1 f2
[root@node1 mysqldata]# cd ../        
[root@node1 /]# cd /etc/drbd.d/
[root@node1 drbd.d]# ll /mysqldata/
total 16
-rw-r--r-- 1 root root      0 May 12 12:22 f1
-rw-r--r-- 1 root root      0 May 12 12:22 f2
drwx------ 2 root root 16384 May 12 12:20 lost+found
[root@node1 drbd.d]# umount /mysqldata
[root@node1 drbd.d]# drbdadm secondary mysql
[root@node1 drbd.d]# drbd-overview
 0:mysql Connected Secondary/Secondary UpToDate/UpToDate C r----
[root@node1 drbd.d]#
将node2设置为primary节点
[root@node2 ~]# drbdadm primary mysql
[root@node2 ~]# drbd-overview
 0:mysql Connected Primary/Secondary UpToDate/UpToDate C r----
[root@node2 ~]# mkdir /mysqldata
[root@node2 ~]# mount /dev/drbd0 /mysqldata
[root@node2 ~]# ll /mysqldata/
total16
-rw-r--r-- 1 root root      0 05-12 12:22 f1
-rw-r--r-- 1 root root      0 05-12 12:22 f2
drwx------ 2 root root 16384 05-12 12:20 lost+found
[root@node2 ~]# umount /mysqldata/
[root@node2 ~]#
七 Mysql的安装 和配置
[root@node1 drbd.d]# groupadd -r mysql
[root@node1 drbd.d]# useradd -g mysql -r mysql
[root@node1 drbd.d]#
Node2上
[root@node2 ~]# drbdadm secondary mysql
[root@node2 ~]#
Node1重获primary权限
[root@node1 drbd.d]# drbdadm primary mysql
[root@node1 drbd.d]# drbd-overview
 0:mysql Connected Primary/Secondary UpToDate/UpToDate C r----
[root@node1 drbd.d]#
挂载drbd设备
[root@node1 drbd.d]# mount /dev/drbd0 /mysqldata/
[root@node1 drbd.d]# mkdir /mysqldata/data
[root@node1 drbd.d]# chown -R mysql.mysql /mysqldata/data/
[root@node1 drbd.d]# ls /mysqldata/
data f1 f2 lost+found
mysql的安装
[root@node1 ~]# tar -zxvf mysql-5.5.15-linux2.6-i686.tar.gz -C /usr/local/
创建链接
[root@node1 ~]# cd /usr/local/
[root@node1 local]# ln -sv mysql-5.5.15-linux2.6-i686/ mysql
create symbolic link `mysql' to `mysql-5.5.15-linux2.6-i686/'
[root@node1 local]# cd mysql
[root@node1 mysql]# chown -R mysql:mysql .
初始化数据库
[root@node1 mysql]# scripts/mysql_install_db --user=mysql --datadir=/mysqldata/data
[root@node1 mysql]# chown -R root .
为mysql提供主配置文件
[root@node1 mysql]# cp support-files/my-large.cnf /etc/my.cnf
[root@node1 mysql]# vim /etc/my.cnf
并修改此文件中thread_concurrency的值为你的CPU个数乘以2,比如这里使用如下行:
# vim /etc/my.cnf
thread_concurrency = 2
另外还需要添加如下行指定mysql数据文件的存放位置:
datadir = /mysqldata/data
 
为mysql提供sysv服务脚本,使其能使用service命令:
 
[root@node1 mysql]# cp support-files/mysql.server /etc/rc.d/init.d/mysqld
[root@node1 mysql]#
node2 上的配置文件, sysv 服务脚本和此相同,故直接复制过去:
[root@node1 mysql]# scp /etc/my.cnf node2.hf.com:/etc/
my.cnf                                         100% 4690     4.6KB/s   00:00   
[root@node1 mysql]# scp /etc/rc.d/init.d/mysqld node2.hf.com:/etc/rc.d/init.d/
mysqld                                         100%   10KB 10.4KB/s   00:00   
[root@node1 mysql]# chkconfig --add mysqld
设置为开机不启动
 
[root@node1 mysql]# chkconfig mysqld off
测试开启和关闭服务
 
[root@node1 mysql]# service mysqld start
Starting MySQL.................                             [ OK ]
[root@node1 mysql]# service mysqld stop
Shutting down MySQL..                                       [ OK ]
[root@node1 mysql]#
测试后关闭服务查看
 
[root@node1 mysql]# ls /mysqldata/data/
ib_logfile0 ibdata1 mysql-bin.000001 node1.hf.com.err     test
ib_logfile1 mysql     mysql-bin.index   performance_schema
[root@node1 mysql]#
[root@node1 mysql]# vim /etc/man.config
 
MANPATH /usr/local/mysql/man
 
[root@node1 mysql]# vim /etc/profile
PATH=$PATH:/usr/local/mysql/bin  
[root@node1 mysql]# . /etc/profile
[root@node1 mysql]# echo $PATH
/usr/kerberos/sbin:/usr/kerberos/bin:/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin
[root@node1 mysql]#
[root@node1 mysql]# ln -sv /usr/local/mysql/include/ /usr/include/mysql
create symbolic link `/usr/include/mysql' to `/usr/local/mysql/include/'
[root@node1 mysql]# echo '/usr/local/mysql/lib'> /etc/ld.so.conf.d/mysql.conf
[root@node1 mysql]# ldconfig
 
Node2 上配置
[root@node1 mysql]# umount /mysqldata/
[root@node1 mysql]# drbd-overview     
 0:mysql Connected Secondary/Secondary UpToDate/UpToDate C r----
[root@node2 mysql]# groupadd -r mysql
[root@node2 mysql]# useradd -g mysql -r mysql
[root@node2 mysql]# drbdadm primary mysql
[root@node2 mysql]# drbd-overview
 0:mysql Connected Primary/Secondary UpToDate/UpToDate C r----
[root@node2 mysql]#
[root@node2 mysql]# drbdadm primary mysql 设置为主节点
[root@node2 mysql]# drbd-overview 查看
 0:mysql Connected Primary/Secondary UpToDate/UpToDate C r----
[root@node2 mysql]# mount /dev/drbd0 /mysqldata/ 挂载
[root@node2 mysql]# ls /mysqldata/ 查看
data f1 f2 lost+found
[root@node2 mysql]#
解压mysql
[root@node2 ~]# tar -zxvf mysql-5.5.15-linux2.6-i686.tar.gz -C /usr/local/
[root@node2 ~]# cd /usr/local/
[root@node2 local]# ln -sv mysql-5.5.15-linux2.6-i686/ mysql 创建链接
[root@node2 local]# cd mysql
因为在node1上已经初始化了,所以我们不能在此初始化
[root@node2 mysql]# chown -R root:mysql
mysql主配置文件和sysc服务脚本已经从node1复制过来了,不用在添加。
添加至服务列表设置开机不启动 :
[root@node2 mysql]# chkconfig --add mysqld
[root@node2 mysql]# chkconfig mysqld off
测试服务
[root@node2 mysql]# service mysqld start
Starting MySQL.................                             [ OK ]
[root@node2 mysql]# service mysqld stop
Shutting down MySQL..                                       [ OK ]
查看
[root@node2 mysql]# ls /mysqldata/data/
ibdata1       mysql             mysql-bin.000003 node2.hf.com.err
ib_logfile0 mysql-bin.000001 mysql-bin.index    performance_schema
ib_logfile1 mysql-bin.000002 node1.hf.com.err test
[root@node2 mysql]#
[root@node2 mysql]# vim /etc/man.config
添加
MANPATH  /usr/local/mysql/man
[root@node2 mysql]# ln -sv /usr/local/mysql/include/ /usr/include/mysql
[root@node2 mysql]# echo '/usr/local/mysql/lib' > /etc/ld.so.conf.d/mysql.conf
[root@node2 mysql]# ldconfig
[root@node2 mysql]# echo $PATH
/usr/kerberos/sbin:/usr/kerberos/bin:/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin
[root@node2 mysql]#
卸载drbd设备
[root@node2 mysql]# umount /dev/drbd0
八Corosync+pacemaker的安装和配置
前面已安装过了服务包
1 下面进行配置
 [root@node1 mysql]# cd /etc/corosync/
[root@node1 corosync]# cp corosync.conf.example corosync.conf
[root@node1 corosync]# vim corosync.conf
compatibility: whitetank
totem {   // 这是用来传递心跳时的相关协议的信息
        version: 2
        secauth: off
        threads: 0
        interface {
                ringnumber: 0
                bindnetaddr: 192.168.100.0 //
我们只改动这里就行啦
                mcastaddr: 226.94.1.1
                mcastport: 5405
        }
}
logging {
        fileline: off
        to_stderr: no  //
是否发送标准出错
        to_logfile: yes  // 日志
        to_syslog: yes   // 系统日志 (建议关掉一个),会降低性能
        logfile: /var/log/cluster/corosync.log  // 需要手动创建目录cluster
        debug: off // 排除时可以起来
        timestamp: on // 日志中是否记录时间
     //****** 以下是openais的东西,可以不用×××*****//
        logger_subsys {
                subsys: AMF
                debug: off
        }  
}
amf {
        mode: disabled
}
//********* 补充一些东西,前面只是底层的东西,因为要用pacemaker ******//
service {
        ver: 0
        name: pacemaker
use_mgmtd: yes
}
//****** 虽然用不到openais ,但是会用到一些子选项 ********//
aisexec {
        user: root
        group: root
}
2: 创建cluster目录
[root@node1 corosync]# mkdir /var/log/cluster
3: 为了便面其他主机加入该集群,需要认证,生成一authkey
[root@node1 corosync]# corosync-keygen
[root@node1 corosync]# corosync-keygen
Corosync Cluster Engine Authentication key generator.
Gathering 1024 bits for key from /dev/random.
Press keys on your keyboard to generate entropy.
Writing corosync key to /etc/corosync/authkey.
[root@node1 corosync]#
[root@node1 corosync]# ll
total 28
-rw-r--r-- 1 root root 5384 Jul 28 2010 amf.conf.example
-r-------- 1 root root 128 May 12 15:40 authkey
-rw-r--r-- 1 root root 533 May 12 15:34 corosync.conf
-rw-r--r-- 1 root root 436 Jul 28 2010 corosync.conf.example
drwxr-xr-x 2 root root 4096 Jul 28 2010 service.d
drwxr-xr-x 2 root root 4096 Jul 28 2010 uidgid.d
[root@node1 corosync]#
[root@node1 corosync]# scp -p authkey corosync.conf node2.hf.com:/etc/corosync/
authkey                                       100% 128     0.1KB/s   00:00   
corosync.conf                                 100% 533     0.5KB/s   00:00   
[root@node1 corosync]#
[root@node1 corosync]# ssh node2.hf.com 'mkdir /var/log/cluster'
启动服务
[root@node1 corosync]# service corosync start
Starting Corosync Cluster Engine (corosync):               [ OK ]
 [root@node2 corosync]# service corosync start
Starting Corosync Cluster Engine (corosync):               [ OK ]
[root@node2 corosync]#
验证引擎
Node1
[root@node1 corosync]# grep -i -e "corosync cluster engine" -e "configuration file" /var/log/messages
Mar 24 22:54:47 localhost smartd[2876]: Opened configuration file /etc/smartd.conf
Mar 24 22:54:47 localhost smartd[2876]: Configuration file /etc/smartd.conf was parsed, found DEVICESCAN, scanning devices
May 12 09:24:57 localhost smartd[2884]: Opened configuration file /etc/smartd.conf
May 12 09:24:57 localhost smartd[2884]: Configuration file /etc/smartd.conf was parsed, found DEVICESCAN, scanning devices
May 12 15:45:11 localhost corosync[4873]:   [MAIN ] Corosync Cluster Engine ('1.2.7'): started and ready to provide service.
May 12 15:45:11 localhost corosync[4873]:   [MAIN ] Successfully read main configuration file '/etc/corosync/corosync.conf'.
[root@node1 corosync]#
Node2
[root@node2 corosync]# grep -i -e "corosync cluster engine" -e "configuration file" /var/log/messages
Mar 24 19:01:17 localhost smartd[2837]: Opened configuration file /etc/smartd.conf
Mar 24 19:01:18 localhost smartd[2837]: Configuration file /etc/smartd.conf was parsed, found DEVICESCAN, scanning devices
May 12 15:46:10 localhost corosync[12566]:   [MAIN ] Corosync Cluster Engine ('1.2.7'): started and ready to provide service.
May 12 15:46:10 localhost corosync[12566]:   [MAIN ] Successfully read main configuration file '/etc/corosync/corosync.conf'.
[root@node2 corosync]#
查看初始化成员节点通知是否发出
Node1
[root@node1 corosync]# grep -i totem /var/log/messages
May 12 15:45:12 localhost corosync[4873]:   [TOTEM ] Initializing transport (UDP/IP).
May 12 15:45:12 localhost corosync[4873]:   [TOTEM ] Initializing transmit/receive security: libtomcrypt SOBER128/SHA1HMAC (mode 0).
May 12 15:45:12 localhost corosync[4873]:   [TOTEM ] The network interface [192.168.100.10] is now up.
May 12 15:45:27 localhost corosync[4873]:   [TOTEM ] A processor joined or left the membership and a new membership was formed.
May 12 15:45:27 localhost corosync[4873]:   [TOTEM ] A processor failed, forming new configuration.
May 12 15:45:27 localhost corosync[4873]:   [TOTEM ] A processor joined or left the membership and a new membership was formed.
May 12 15:45:27 localhost corosync[4873]:   [TOTEM ] A processor failed, forming new configuration.
May 12 15:45:27 localhost corosync[4873]:   [TOTEM ] A processor joined or left the membership and a new membership was formed.
May 12 15:45:29 localhost corosync[4873]:   [TOTEM ] A processor failed, forming new configuration.
May 12 15:45:29 localhost corosync[4873]:   [TOTEM ] A processor joined or left the membership and a new membership was formed.
May 12 15:45:47 localhost corosync[4873]:   [TOTEM ] A processor joined or left the membership and a new membership was formed.
May 12 15:45:49 localhost corosync[4873]:   [TOTEM ] A processor failed, forming new configuration.
May 12 15:45:50 localhost corosync[4873]:   [TOTEM ] A processor joined or left the membership and a new membership was formed.
May 12 15:45:56 localhost corosync[4873]:   [TOTEM ] A processor failed, forming new configuration.
May 12 15:45:58 localhost corosync[4873]:   [TOTEM ] A processor joined or left the membership and a new membership was formed.
May 12 15:46:01 localhost corosync[4873]:   [TOTEM ] A processor joined or left the membership and a new membership was formed.
[root@node1 corosync]#
Node2 上查看
[root@node2 corosync]# grep -i totem /var/log/messages
May 12 15:46:10 localhost corosync[12566]:   [TOTEM ] Initializing transport (UDP/IP).
May 12 15:46:10 localhost corosync[12566]:   [TOTEM ] Initializing transmit/receive security: libtomcrypt SOBER128/SHA1HMAC (mode 0).
May 12 15:46:10 localhost corosync[12566]:   [TOTEM ] The network interface [192.168.100.20] is now up.
May 12 15:46:28 localhost corosync[12566]:   [TOTEM ] A processor joined or left the membership and a new membership was formed.
May 12 15:46:29 localhost corosync[12566]:   [TOTEM ] Process pause detected for 1101 ms, flushing membership messages.
May 12 15:46:29 localhost corosync[12566]:   [TOTEM ] A processor failed, forming new configuration.
May 12 15:46:29 localhost corosync[12566]:   [TOTEM ] A processor joined or left the membership and a new membership was formed.
May 12 15:46:29 localhost corosync[12566]:   [TOTEM ] Process pause detected for 1844 ms, flushing membership messages.
May 12 15:46:29 localhost corosync[12566]:   [TOTEM ] Process pause detected for 2281 ms, flushing membership messages.
May 12 15:46:30 localhost corosync[12566]:   [TOTEM ] A processor failed, forming new configuration.
May 12 15:46:30 localhost corosync[12566]:   [TOTEM ] Process pause detected for 1786 ms, flushing membership messages.
May 12 15:46:30 localhost corosync[12566]:   [TOTEM ] A processor joined or left the membership and a new membership was formed.
[root@node2 corosync]#
检查过程中是否有错误产生
Node1
[root@node1 corosync]# grep -i error: /var/log/messages |grep -v unpack_resources
May 12 15:45:27 localhost corosync[4873]:   [pcmk ] ERROR: pcmk_wait_dispatch: Child process mgmtd exited (pid=4885, rc=100)
[root@node1 corosync]#
Node2
[root@node2 corosync]# grep -i error: /var/log/messages |grep -v unpack_resources
May 12 15:46:28 localhost corosync[12566]:   [pcmk ] ERROR: pcmk_wait_dispatch: Child process mgmtd exited (pid=12578, rc=100)
[root@node2 corosync]#
九 查看群集状态
Node1上
[root@node1 corosync]# crm status
============
Last updated: Sat May 12 15:56:32 2012
Stack: openais
Current DC: node1.hf.com - partition with quorum
Version: 1.1.5-1.1.el5-01e86afaaa6d4a8c4836f68df80ababd6ca3902f
2 Nodes configured, 2 expected votes
0 Resources configured.
============
 
Online: [ node1.hf.com node2.hf.com ]
 
[root@node1 corosync]#
 
 
Node2上
[root@node2 corosync]# crm status
============
Last updated: Sat May 12 15:57:12 2012
Stack: openais
Current DC: node1.hf.com - partition with quorum
Version: 1.1.5-1.1.el5-01e86afaaa6d4a8c4836f68df80ababd6ca3902f
2 Nodes configured, 2 expected votes
0 Resources configured.
============
 
Online: [ node1.hf.com node2.hf.com ]
 
[root@node2 corosync]#
十 配置群集的工作属性
 
节点一上
[root@node1 corosync]# crm configure property stonith-enabled=false
[root@node1 corosync]# crm configure property no-quorum-policy=ignore
[root@node1 corosync]# crm configure rsc_defaults resource-stickiness=100
[root@node1 corosync]# crm configure show
node node1.hf.com
node node2.hf.com
property $id="cib-bootstrap-options" \
        dc-version="1.1.5-1.1.el5-01e86afaaa6d4a8c4836f68df80ababd6ca3902f" \
        cluster-infrastructure="openais" \
        expected-quorum-votes="2" \
        stonith-enabled="false" \
        no-quorum-policy="ignore"
rsc_defaults $id="rsc-options" \
        resource-stickiness="100"
[root@node1 corosync]# service drbd stop
Stopping all DRBD resources: .
[root@node1 corosync]# chkconfig drbd off
[root@node1 corosync]# drbd-overview
drbd not loaded
[root@node1 corosync]#
 
 
 
节点二上
 
 
[root@node2 corosync]# crm configure property stonith-enabled=false
[root@node2 corosync]# crm configure property no-quorum-policy=ignore
[root@node2 corosync]# crm configure rsc_defaults resource-stickiness=100
[root@node2 corosync]# crm configure show
node node1.hf.com
node node2.hf.com
property $id="cib-bootstrap-options" \
 dc-version="1.1.5-1.1.el5-01e86afaaa6d4a8c4836f68df80ababd6ca3902f" \
 cluster-infrastructure="openais" \
 expected-quorum-votes="2" \
 stonith-enabled="false" \
 no-quorum-policy="ignore"
rsc_defaults $id="rsc-options" \
 resource-stickiness="100"
[root@node2 corosync]# service drbd stop
Stopping all DRBD resources: .
[root@node2 corosync]# chkconfig drbd off
[root@node2 corosync]# drbd-overview
drbd not loaded
[root@node2 corosync]#
配置drbd为集群资源:
Node1上查看
[root@node1 corosync]# crm ra classes
heartbeat
lsb
ocf / heartbeat linbit pacemaker
stonith
[root@node1 corosync]# crm ra list ocf linbit
drbd   
Node2上查看
[root@node2 corosync]# crm ra classes
heartbeat
lsb
ocf / heartbeat linbit pacemaker
stonith
[root@node2 corosync]# crm ra list ocf linbit
drbd      
查看drbd资源代理信息
Node1上
[root@node1 corosync]# crm ra info ocf:linbit:drbd
This resource agent manages a DRBD resource
as a master/slave resource. DRBD is a shared-nothing replicated storage
device. (ocf:linbit:drbd)
 
Master/Slave OCF Resource Agent for DRBD
 
Parameters (* denotes required, [] the default):
 
drbd_resource* (string): drbd resource name
    The name of the drbd resource from the drbd.conf file.
 
drbdconf (string, [/etc/drbd.conf]): Path to drbd.conf
    Full path to the drbd.conf file.
 
Operations' defaults (advisory minimum):
 
    start         timeout=240
    promote       timeout=90
    demote        timeout=90
    notify        timeout=90
    stop          timeout=100
    monitor_Slave interval=20 timeout=20 start-delay=1m
    monitor_Master interval=10 timeout=20 start-delay=1m
[root@node1 corosync]#
Node2上
[root@node2 corosync]# crm ra info ocf:linbit:drbd
This resource agent manages a DRBD resource
as a master/slave resource. DRBD is a shared-nothing replicated storage
device. (ocf:linbit:drbd)
 
Master/Slave OCF Resource Agent for DRBD
 
Parameters (* denotes required, [] the default):
 
drbd_resource* (string): drbd resource name
    The name of the drbd resource from the drbd.conf file.
 
drbdconf (string, [/etc/drbd.conf]): Path to drbd.conf
    Full path to the drbd.conf file.
 
Operations' defaults (advisory minimum):
 
    start         timeout=240
    promote       timeout=90
    demote        timeout=90
    notify        timeout=90
    stop          timeout=100
    monitor_Slave interval=20 timeout=20 start-delay=1m
    monitor_Master interval=10 timeout=20 start-delay=1m
[root@node2 corosync]#
 
 
 
drbd 需要同时运行在两个节点上,但只能有一个节点( primary/secondary 模型)是 Master ,而另一个节点为 Slave ;因此,它是一种比较特殊的集群资源,其资源类型为多状态( Multi-state clone 类型,即主机节点有 Master Slave 之分,且要求服务刚启动时两个节点都处于 slave 状态。
crm(live)configure# show MS_mysqldrbd
ms MS_mysqldrbd mysqldrbd \
        meta master-max="1" master-node-max="1" clone-max="2" clone-node-max="1" notify="true"
[root@node1 corosync]# crm
crm(live)# configure
crm(live)configure# primitive mysqldrbd ocf:heartbeat:drbd params drbd_resource="mysql" op monitor role="Master" interval="30s" op monitor role="Slave" interval="31s" op start timeout="240s" op stop timeout="100s"
crm(live)configure# ms MS_mysqldrbd mysqldrbd meta master-max=1 master-node-max=1 clone-max=2 clone-node-max=1 notify="true"
crm(live)configure# show mysqldrbd
primitive mysqldrbd ocf:heartbeat:drbd \
        params drbd_resource="mysql" \
        op monitor interval="30s" role="Master" \
        op monitor interval="31s" role="Slave" \
        op start interval="0" timeout="240s" \
        op stop interval="0" timeout="100s"
crm(live)configure#
crm(live)configure# verify
crm(live)configure# commit
INFO: apparently there is nothing to commit
INFO: try changing something first
crm(live)configure# exit
bye
[root@node1 ~]#
查看 状态
[root@node1 ~]# crm status
============
Last updated: Sat May 12 16:20:01 2012
Stack: openais
Current DC: node1.hf.com - partition with quorum
Version: 1.1.5-1.1.el5-01e86afaaa6d4a8c4836f68df80ababd6ca3902f
2 Nodes configured, 2 expected votes
0 Resources configured.
============
 
Online: [ node1.hf.com node2.hf.com ]
 
[root@node1 ~]#
 
 
 
 
Node2
[root@node2 corosync]# crm
crm(live)# configure
crm(live)configure# primitive mysqldrbd ocf:heartbeat:drbd params drbd_resource="mysql" op monitor role="Master" interval="30s" op monitor role="Slave" interval="31s" op start timeout="240s" op stop timeout="100s"
crm(live)configure# ms MS_mysqldrbd mysqldrbd meta master-max=1 master-node-max=1 clone-max=2 clone-node-max=1 notify="true"
crm(live)configure# show mysqldrbd
primitive mysqldrbd ocf:heartbeat:drbd \
 params drbd_resource="mysql" \
 op monitor interval="30s" role="Master" \
 op monitor interval="31s" role="Slave" \
 op start interval="0" timeout="240s" \
 op stop interval="0" timeout="100s"
crm(live)configure# show MS_mysqldrbd
ms MS_mysqldrbd mysqldrbd \
 meta master-max="1" master-node-max="1" clone-max="2" clone-node-max="1" notify="true"
crm(live)configure# verify
crm(live)configure# commit
crm(live)configure# exit
bye
[root@node2 corosync]#
[root@node2 corosync]# crm status
============
Last updated: Sat May 12 16:25:49 2012
Stack: openais
Current DC: node1.hf.com - partition with quorum
Version: 1.1.5-1.1.el5-01e86afaaa6d4a8c4836f68df80ababd6ca3902f
2 Nodes configured, 2 expected votes
1 Resources configured.
============
 
Online: [ node1.hf.com node2.hf.com ]
 
 Master/Slave Set: MS_mysqldrbd [mysqldrbd]
     Masters: [ node1.hf.com ]
     Slaves: [ node2.hf.com ]
[root@node2 corosync]#
再次查看node1 上的
[root@node2 corosync]# crm status
============
Last updated: Sat May 12 16:25:49 2012
Stack: openais
Current DC: node1.hf.com - partition with quorum
Version: 1.1.5-1.1.el5-01e86afaaa6d4a8c4836f68df80ababd6ca3902f
2 Nodes configured, 2 expected votes
1 Resources configured.
============
 
Online: [ node1.hf.com node2.hf.com ]
 
 Master/Slave Set: MS_mysqldrbd [mysqldrbd]
     Masters: [ node1.hf.com ]
     Slaves: [ node2.hf.com ]
[root@node2 corosync]#
 
由上面的信息可以看出此时的 drbd 服务的 Primary 节点为 node1. Secondary 节点为 node2 当然,也可以在 node1 上使用如下命令验正当前主机是否已经成为 mysql 资源的 Primary 节点
 
[root@node1 ~]# drbdadm role mysql                             
Primary/Secondary
[root@node1 ~]#
[root@node2 corosync]# drbdadm role mysql
Secondary/Primary
[root@node2 corosync]#
Node1上
[root@node1 ~]# crm
crm(live)# configure
crm(live)configure# primitive MysqlFS ocf:heartbeat:Filesystem params device="/dev/drbd0" directory="/mysqldata" fstype="ext3" op start timeout=60s op stop timeout=60s
crm(live)configure# commit
crm(live)configure# exit
bye
[root@node1 ~]#
 
Mysql资源的定义
先为 mysql 集群创建一个 IP 地址资源,通过集群提供服务时使用,这个地址就是客户端访问 mysql 服务器使用的 ip 地址;
[root@node1 ~]# crm configure primitive myip ocf:heartbeat:IPaddr params ip=192.168.100.100
[root@node1 ~]#
配置 mysqld 服务为高可用资源 :
[root@node1 ~]# crm configure primitive mysqlserver lsb:mysqld
[root@node1 ~]# crm status
============
Last updated: Sat May 12 16:34:28 2012
Stack: openais
Current DC: node1.hf.com - partition with quorum
Version: 1.1.5-1.1.el5-01e86afaaa6d4a8c4836f68df80ababd6ca3902f
2 Nodes configured, 2 expected votes
4 Resources configured.
============
 
Online: [ node1.hf.com node2.hf.com ]
 
 Master/Slave Set: MS_mysqldrbd [mysqldrbd]
     Masters: [ node1.hf.com ]
     Slaves: [ node2.hf.com ]
 MysqlFS         (ocf::heartbeat:Filesystem):    Started node1.hf.com
 myip    (ocf::heartbeat:IPaddr):        Started node2.hf.com
[root@node1 ~]#
 
 
 
配置资源的各种约束:
集群拥有所有必需资源,但它可能还无法进行正确处理。资源约束则用以指定在哪些群集节点上运行资源,以何种顺序装载资源,以及特定资源依赖于哪些其它资源。 pacemaker 共给我们提供了三种资源约束方法:
1 Resource Location (资源位置):定义资源可以、不可以或尽可能在哪些节点上运行
2 Resource Collocation (资源排列):排列约束用以定义集群资源可以或不可以在某个节点上同时运行
3 Resource Order (资源顺序):顺序约束定义集群资源在节点上启动的顺序
定义约束时,还需要指定分数。各种分数是集群工作方式的重要组成部分。其实,从迁移资源到决定在已降级集群中停止哪些资源的整个过程是通过以某种方式修改分数来实现的。分数按每个资源来计算,资源分数为负的任何节点都无法运行该资源。在计算出资源分数后,集群选择分数最高的节点。 INFINITY (无穷大)目前定义为 1,000,000 。加减无穷大遵循以下 3 个基本规则:
1 )任何值 + 无穷大 = 无穷大
2 )任何值 - 无穷大 = - 无穷大
3 )无穷大 - 无穷大 = - 无穷大
 
定义资源约束时,也可以指定每个约束的分数。分数表示指派给此资源约束的值。分数较高的约束先应用,分数较低的约束后应用。通过使用不同的分数为既定资源创建更多位置约束,可以指定资源要故障转移至的目标节点的顺序。
 
我们要定义如下的约束:
[root@node1 ~]# crm
concrm(live)# configure
crm(live)configure# colocation MysqlFS_with_mysqldrbd inf: MysqlFS MS_mysqldrbd:Master myip mysqlserver
crm(live)configure# order MysqlFS_after_mysqldrbd inf: MS_mysqldrbd:promote MysqlFS:start
crm(live)configure# order myip_after_MysqlFS mandatory: MysqlFS myip
crm(live)configure# order mysqlserver_after_myip mandatory: myip mysqlserver
crm(live)configure# verify
crm(live)configure# commit
crm(live)configure# exit
bye
[root@node1 ~]# crm configure show
node node1.hf.com
node node2.hf.com
primitive MysqlFS ocf:heartbeat:Filesystem \
        params device="/dev/drbd0" directory="/mysqldata" fstype="ext3" \
        op start interval="0" timeout="60s" \
        op stop interval="0" timeout="60s"
primitive myip ocf:heartbeat:IPaddr \
        params ip="192.168.100.100"
primitive mysqldrbd ocf:heartbeat:drbd \
        params drbd_resource="mysql" \
        op monitor interval="30s" role="Master" \
        op monitor interval="31s" role="Slave" \
        op start interval="0" timeout="240s" \
        op stop interval="0" timeout="100s"
primitive mysqlserver lsb:mysqld
ms MS_mysqldrbd mysqldrbd \
        meta master-max="1" master-node-max="1" clone-max="2" clone-node-max="1" notify="true"
colocation MysqlFS_with_mysqldrbd inf: MysqlFS MS_mysqldrbd:Master myip mysqlserver
order MysqlFS_after_mysqldrbd inf: MS_mysqldrbd:promote MysqlFS:start
order myip_after_MysqlFS inf: MysqlFS myip
order mysqlserver_after_myip inf: myip mysqlserver
property $id="cib-bootstrap-options" \
        dc-version="1.1.5-1.1.el5-01e86afaaa6d4a8c4836f68df80ababd6ca3902f" \
        cluster-infrastructure="openais" \
        expected-quorum-votes="2" \
        stonith-enabled="false" \
        no-quorum-policy="ignore"
rsc_defaults $id="rsc-options" \
        resource-stickiness="100"
[root@node1 ~]# crm status
============
Last updated: Sat May 12 16:38:10 2012
Stack: openais
Current DC: node1.hf.com - partition with quorum
Version: 1.1.5-1.1.el5-01e86afaaa6d4a8c4836f68df80ababd6ca3902f
2 Nodes configured, 2 expected votes
4 Resources configured.
============
 
Online: [ node1.hf.com node2.hf.com ]
 
 Master/Slave Set: MS_mysqldrbd [mysqldrbd]
     Masters: [ node1.hf.com ]
     Slaves: [ node2.hf.com ]
 MysqlFS         (ocf::heartbeat:Filesystem):    Started node1.hf.com
 myip    (ocf::heartbeat:IPaddr):        Started node1.hf.com
 mysqlserver     (lsb:mysqld):   Started node1.hf.com
[root@node1 ~]#
可见,服务现在在 node1 上正常运行
[root@node1 ~]# service mysqld status                     
MySQL running (7732)                                        [ OK ]
node1 上的操作,查看 mysql 的运行状态:
 
[root@node1 ~]#
查看是否自动挂载
 
[root@node1 ~]# mount
/dev/sda2 on / type ext3 (rw)
proc on /proc type proc (rw)
sysfs on /sys type sysfs (rw)
devpts on /dev/pts type devpts (rw,gid=5,mode=620)
/dev/sda1 on /boot type ext3 (rw)
tmpfs on /dev/shm type tmpfs (rw)
none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw)
sunrpc on /var/lib/nfs/rpc_pipefs type rpc_pipefs (rw)
/dev/hdc on /media/RHEL_5.4 i386 DVD type iso9660 (ro,noexec,nosuid,nodev,uid=0)
/dev/hdc on /mnt/cdrom type iso9660 (ro)
/dev/drbd0 on /mysqldata type ext3 (rw)
[root@node1 ~]#
查看目录及vip
[root@node1 ~]# ls /mysqldata/
data f1 f2 lost+found
[root@node1 ~]# ifconfig
eth0       Link encap:Ethernet HWaddr 00:0C:29:B0:88:60 
          inet addr:192.168.100.10 Bcast:192.168.100.255 Mask:255.255.255.0
          inet6 addr: fe80::20c:29ff:feb0:8860/64 Scope:Link
          UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
          RX packets:359151 errors:0 dropped:0 overruns:0 frame:0
          TX packets:839416 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:1000
          RX bytes:199127383 (189.9 MiB) TX bytes:1151839097 (1.0 GiB)
          Interrupt:67 Base address:0x2000
 
eth0:0     Link encap:Ethernet HWaddr 00:0C:29:B0:88:60 
          inet addr:192.168.100.100 Bcast:192.168.100.255 Mask:255.255.255.0
          UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
          Interrupt:67 Base address:0x2000
 
lo         Link encap:Local Loopback 
          inet addr:127.0.0.1 Mask:255.0.0.0
          inet6 addr: ::1/128 Scope:Host
          UP LOOPBACK RUNNING MTU:16436 Metric:1
          RX packets:2351 errors:0 dropped:0 overruns:0 frame:0
          TX packets:2351 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0
          RX bytes:4072117 (3.8 MiB) TX bytes:4072117 (3.8 MiB)
 
[root@node1 ~]#
 
模拟失效
[root@node1 ~]# crm node standby 让node1下线
[root@node1 ~]# crm status 查看运行状态
============
Last updated: Sat May 12 16:43:04 2012
Stack: openais
Current DC: node1.hf.com - partition with quorum
Version: 1.1.5-1.1.el5-01e86afaaa6d4a8c4836f68df80ababd6ca3902f
2 Nodes configured, 2 expected votes
4 Resources configured.
============
 
Node node1.hf.com: standby
Online: [ node2.hf.com ]
 
 Master/Slave Set: MS_mysqldrbd [mysqldrbd]
     Slaves: [ node1.hf.com node2.hf.com ]
[root@node1 ~]#
可见我们的资源已经都切换到了 node2 上:
查看 node2 的运行状态以及是否自动挂载
[root@node2 ~]# mount
/dev/sda2 on / type ext3 (rw)
proc on /proc type proc (rw)
sysfs on /sys type sysfs (rw)
devpts on /dev/pts type devpts (rw,gid=5,mode=620)
/dev/sda1 on /boot type ext3 (rw)
tmpfs on /dev/shm type tmpfs (rw)
none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw)
sunrpc on /var/lib/nfs/rpc_pipefs type rpc_pipefs (rw)
/dev/hdc on /media/RHEL_5.4 i386 DVD type iso9660 (ro,noexec,nosuid,nodev,uid=0)
/dev/hdc on /mnt/cdrom type iso9660 (ro)
/dev/drbd0 on /mysqldata type ext3 (rw)
[root@node2 ~]# service mysqld status
MySQL running (20860)                                    
 
查看目录
[root@node2 ~]# ls /mysqldata/
data f1 f2 lost+found
查看vip
[root@node2 ~]# ifconfig
eth0       Link encap:Ethernet HWaddr 00:0C:29:ED:74:04 
          inet addr:192.168.100.20 Bcast:192.168.100.255 Mask:255.255.255.0
          inet6 addr: fe80::20c:29ff:feed:7404/64 Scope:Link
          UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
          RX packets:959499 errors:0 dropped:0 overruns:0 frame:0
          TX packets:285287 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:1000
          RX bytes:1329787421 (1.2 GiB) TX bytes:27406236 (26.1 MiB)
          Interrupt:67 Base address:0x2000
 
eth0:0     Link encap:Ethernet HWaddr 00:0C:29:ED:74:04 
          inet addr:192.168.100.100 Bcast:192.168.100.255 Mask:255.255.255.0
          UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
          Interrupt:67 Base address:0x2000
 
lo         Link encap:Local Loopback 
          inet addr:127.0.0.1 Mask:255.0.0.0
          inet6 addr: ::1/128 Scope:Host
          UP LOOPBACK RUNNING MTU:16436 Metric:1
          RX packets:20645 errors:0 dropped:0 overruns:0 frame:0
          TX packets:20645 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0
          RX bytes:4649467 (4.4 MiB) TX bytes:4649467 (4.4 MiB)
 
[root@node2 ~]#
查看群集运行状态
[root@node2 ~]# crm status
============
Last updated: Sat May 12 17:56:29 2012
Stack: openais
Current DC: node1.hf.com - partition with quorum
Version: 1.1.5-1.1.el5-01e86afaaa6d4a8c4836f68df80ababd6ca3902f
2 Nodes configured, 2 expected votes
4 Resources configured.
============
 
Node node1.hf.com: standby
Online: [ node2.hf.com ]
 
 Master/Slave Set: MS_mysqldrbd [mysqldrbd]
     Masters: [ node2.hf.com ]
     Stopped: [ mysqldrbd:1 ]
 MysqlFS         (ocf::heartbeat:Filesystem):    Started node2.hf.com
 myip    (ocf::heartbeat:IPaddr):        Started node2.hf.com
 mysqlserver     (lsb:mysqld):   Started node2.hf.com
[root@node2 ~]#
查看目录
[root@node2 ~]# ls /mysqldata/
data f1 f2 lost+found
[root@node2 ~]#
 
 
[root@node2 ~]# mysql
Welcome to the MySQL monitor. Commands end with ; or \g.
Your MySQL connection id is 1
Server version: 5.5.15-log MySQL Community Server (GPL)
 
Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
 
Oracle is a registered trademark of Oracle Corporation and/or its
affiliates. Other names may be trademarks of their respective
owners.
 
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
 
mysql> grant all on *.* to test@'192.168.%.%' identified by '123456';
Query OK, 0 rows affected (0.01 sec)
 
mysql> flush privileges;
Query OK, 0 rows affected (0.01 sec)
 
mysql>
在节点一上进行访问
[root@node1 ~]# mysql -u test -h 192.168.2.100 -p123456
Welcome to the MySQL monitor. Commands end with ; or \g.
Your MySQL connection id is 1
Server version: 5.5.15-log MySQL Community Server (GPL)
 
Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
 
Oracle is a registered trademark of Oracle Corporation and/or its
affiliates. Other names may be trademarks of their respective
owners.
 
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
 
mysql>
OK