rhel7 pcs pacemaker corosync配置主从高可用

1、准备环境

1.1、配置yum源配置文件

mount -t auto /dev/cdrom  /mnt

rm -rf /etc/yum.repos.d/
mkdir -p /etc/yum.repos.d/



cat >> /etc/yum.repos.d/rhel-Media.repo<<EOF
# rhel-Media.repo
#
#  This repo can be used with mounted DVD media, verify the mount point for
#  rhel-7.  You can use this repo and yum to install items directly off the
#  DVD ISO that we release.
#
# To use this repo, put in your DVD and use it with the other repos too:
#  yum --enablerepo=c7-media [command]
#  
# or for ONLY the media repo, do this:
#
#  yum --disablerepo=\* --enablerepo=rhel7-media [command]

[rhel7-media]
name=rhel-$releasever - Media
baseurl=file:///mnt/
gpgcheck=0
enabled=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-rhel-7
EOF


yum clean all
yum makecache

1.2、安装依赖包

mount -t auto /dev/cdrom  /mnt 

yum -y install gcc make binutils gcc-c++ compat-libstdc++-33 elfutils-libelf-devel elfutils-libelf-devel-static elfutils-libelf-devel ksh libaio libaio-devel numactl-devel sysstat unixODBC unixODBC-devel pcre-devel libXext* unzip chrony pacemaker pcs fence-agents-all httpd sbd device-mapper device-mapper-multipath

yum install pcs-0.9.162-5.el7.x86_64.rpm  pacemaker-1.1.18-11.el7.x86_64.rpm  pacemaker-cli-1.1.18-11.el7.x86_64.rpm  pacemaker-cluster-libs-1.1.18-11.el7.x86_64.rpm   pacemaker-cluster-libs-1.1.18-11.el7.x86_64.rpm  corosync-2.4.3-2.el7.x86_64.rpm  corosynclib-2.4.3-2.el7.x86_64.rpm  pacemaker-libs-1.1.18-11.el7.x86_64.rpm  resource-agents-3.9.5-124.el7.x86_64.rpm  python-clufter-0.77.0-2.el7.noarch.rpm   clufter-bin-0.77.0-2.el7.x86_64.rpm  clufter-common-0.77.0-2.el7.noarch.rpm 

1.3、关闭防火墙以及selinux

sed -i 's/=enforcing/=disabled/g' /etc/selinux/config
setenforce 0
getenforce

systemctl stop firewalld
systemctl disable firewalld


1.4、绑定网卡名称 

cat >/etc/udev/rules.d/70-persistent-net.rules<<EOF
SUBSYSTEM=="net",ACTION=="add",DRIVERS=="?*",ATTR{address}=="00:0c:29:46:37:a8",ATTR{type}=="1",KERNEL=="eth*",NAME="eth0"
SUBSYSTEM=="net",ACTION=="add",DRIVERS=="?*",ATTR{address}=="00:0c:29:46:37:b2",ATTR{type}=="1",KERNEL=="eth*",NAME="eth1"
SUBSYSTEM=="net",ACTION=="add",DRIVERS=="?*",ATTR{address}=="00:0c:29:46:37:bc",ATTR{type}=="1",KERNEL=="eth*",NAME="eth2"
SUBSYSTEM=="net",ACTION=="add",DRIVERS=="?*",ATTR{address}=="00:0c:29:46:37:c6",ATTR{type}=="1",KERNEL=="eth*",NAME="eth3"
EOF



sed -i 's/quiet/quiet net.ifnames=0 biosdevname=0/g'  /etc/default/grub
cat  /etc/default/grub
grub2-mkconfig -o /boot/grub2/grub.cfg

1.5、配置网卡bond

    



systemctl stop  NetworkManager
systemctl disable   NetworkManager 

cat >/etc/sysconfig/network-scripts/ifcfg-bond0<<EOF1
BOOTPROTO=static
DEVICE=bond0
IPADDR=192.168.33.10
PREFIX=255.255.255.0
GATEWAY=192.168.33.1
USERCTL=no
ONBOOT=yes
EOF1
cat >/etc/sysconfig/network-scripts/ifcfg-eth0<<EOF2
DEVICE=eth0
PREFIX=24
BOOTPROTO=static
MASTER=bond0
SLAVE=yes
ONBOOT=yes
EOF2
cat >/etc/sysconfig/network-scripts/ifcfg-eth2<<EOF3
DEVICE=eth2
PREFIX=24
BOOTPROTO=static
MASTER=bond0
SLAVE=yes
ONBOOT=yes
EOF3




cat >/etc/sysconfig/network-scripts/ifcfg-bond1<<EOF1
BOOTPROTO=static
DEVICE=bond1
IPADDR=192.168.1.10
PREFIX=255.255.255.0
USERCTL=no
ONBOOT=yes
EOF1
cat >/etc/sysconfig/network-scripts/ifcfg-eth1<<EOF2
DEVICE=eth1
PREFIX=24
BOOTPROTO=static
MASTER=bond1
SLAVE=yes
ONBOOT=yes
EOF2
cat >/etc/sysconfig/network-scripts/ifcfg-eth3<<EOF3
DEVICE=eth3
PREFIX=24
BOOTPROTO=static
MASTER=bond1
SLAVE=yes
ONBOOT=yes
EOF3



cat >/etc/sysconfig/network-scripts/ifcfg-bond2<<EOF1
BOOTPROTO=static
DEVICE=bond2
IPADDR=19.21.68.10
PREFIX=255.255.255.0
USERCTL=no
ONBOOT=yes
EOF1
cat >/etc/sysconfig/network-scripts/ifcfg-eth4<<EOF2
DEVICE=eth4
PREFIX=24
BOOTPROTO=static
MASTER=bond2
SLAVE=yes
ONBOOT=yes
EOF2
cat >/etc/sysconfig/network-scripts/ifcfg-eth5<<EOF3
DEVICE=eth5
PREFIX=24
BOOTPROTO=static
MASTER=bond2
SLAVE=yes
ONBOOT=yes
EOF3

cat >/etc/modprobe.d/bonding.conf<<EOF4
alias bond0 bonding
alias bond1 bonding
alias bond2 bonding
options bond0 miimon=100 mode=1 primary=eth0
options bond1 miimon=100 mode=1 primary=eth1
options bond2 miimon=100 mode=1 primary=eth4
EOF4
echo "ifenslave bond0 eth0 eth2">>/etc/rc.d/rc.local
echo "ifenslave bond1 eth1 eth3">>/etc/rc.d/rc.local
echo "ifenslave bond2 eth4 eth5">>/etc/rc.d/rc.local

echo "/etc/init.d/network restart">>/etc/rc.d/rc.local
chmod 550 /etc/rc.d/rc.local
chmod 550 /etc/rc.local


reboot

 cat /proc/net/bonding/bond0
 cat /proc/net/bonding/bond1
 cat /proc/net/bonding/bond2

1.6、配置/etc/hosts

hostnamectl set-hostname  xmcs01



cat >>/etc/hosts<<EOF
192.168.33.10 xmcs01
192.168.33.11 xmcs02
192.168.33.12 ha-core-db
19.21.68.10 xmcs01hb
19.21.68.11 xmcs02hb
192.168.1.10 xmcs01fc
192.168.1.11 xmcs02fc
EOF
 cat /etc/hosts

1.7、配置共享盘 

yum install device-mapper-multipath
设置开机启动
systemctl enable multipathd.service
添加配置文件
需要multipath正常工作只需要如下配置即可,如果想要了解详细的配置,请参考Multipath

# vi /etc/multipath.conf
blacklist {
    devnode "^sda"
}
defaults {
    user_friendly_names yes
    path_grouping_policy multibus
    failback immediate
    no_path_retry fail
    find_multipaths yes
   reservation_key 0x1
}
启动服务
systemctl restart multipathd.service




fdisk /dev/sdb---n---p---1----t---8e--w

创建PV
pvcreate /dev/sdb1

pvdisplay /dev/sdb1

vgcreate vg01 /dev/sdb1

创建LV
lvcreate -l 100%VG -n lvol1 vg01

mkfs.xfs /dev/vg01/lvol1

mkdir /data

mount /dev/vg01/lvol1 /data


2、初始化cluster

2.1、启动服务

在每一台上启动服务

systemctl enable chronyd

systemctl start chronyd

systemctl status chronyd




echo "redhat" |passwd --stdin hacluster

systemctl start pcsd

systemctl enable pcsd


systemctl start pcsd.service    

systemctl status pcsd.service 

 

2.2、验证hacluster用户

 



  # 节点1    认证配置
pcs cluster auth xmcs01 xmcs02 -u hacluster -p redhat

[root@xmcs01 ~]# pcs cluster auth xmcs01 xmcs02 -u hacluster -p redhat
xmcs01: Authorized
xmcs02: Authorized

 

2.3、创建cluster




# 节点1  生成集群文件,另一节点会自动生成
pcs cluster setup --name mycluster xmcs01 xmcs02 

[root@xmcs01 ~]# pcs cluster setup --name mycluster xmcs01 xmcs02 
Destroying cluster on nodes: xmcs01, xmcs02...
xmcs01: Stopping Cluster (pacemaker)...
xmcs02: Stopping Cluster (pacemaker)...
xmcs01: Successfully destroyed cluster
xmcs02: Successfully destroyed cluster

Sending 'pacemaker_remote authkey' to 'xmcs01', 'xmcs02'
xmcs01: successful distribution of the file 'pacemaker_remote authkey'
xmcs02: successful distribution of the file 'pacemaker_remote authkey'
Sending cluster config files to the nodes...
xmcs01: Succeeded
xmcs02: Succeeded

Synchronizing pcsd certificates on nodes xmcs01, xmcs02...
xmcs01: Success
xmcs02: Success
Restarting pcsd on the nodes in order to reload the certificates...
xmcs01: Success
xmcs02: Success




[root@xmcs01 ~]# cat /etc/corosync/corosync.conf
totem {
    version: 2
    cluster_name: mycluster
    secauth: off
    transport: udpu
}

nodelist {
    node {
        ring0_addr: xmcs01
        nodeid: 1
    }

    node {
        ring0_addr: xmcs02
        nodeid: 2
    }
}

quorum {
    provider: corosync_votequorum
    two_node: 1
}

logging {
    to_logfile: yes
    logfile: /var/log/cluster/corosync.log
    to_syslog: yes
}



 

2.4、配置web登录

vi /usr/lib/pcsd/ssl.rb

webrick_options = {

  :Port               => 2224,
 
  :BindAddress        => '0.0.0.0',

  :Host               => '0.0.0.0',

  :SSLEnable          => true,

  :SSLVerifyClient    => OpenSSL::SSL::VERIFY_NONE,

  :SSLCertificate     => OpenSSL::X509::Certificate.new(crt),

  :SSLPrivateKey      => OpenSSL::PKey::RSA.new(key),

  :SSLCertName        => [[ "CN", server_name ]],

  :SSLOptions         => get_ssl_options(),

}

    scp /usr/lib/pcsd/ssl.rb xmcs02:/usr/lib/pcsd/ssl.rb


pcs cluster start --all
pcs cluster enable --all

systemctl enable pcsd


 systemctl restart pcsd

# netstat -tunlp     # 查看 2224 端口是否启用

# web 端登录  https://192.168.33.10:2224

# 输入账号密码  hacluster / redhat


配置多心跳 


[root@xmcs01 ~]# cat /etc/corosync/corosync.conf
totem {
    version: 2
    cluster_name: mycluster
    secauth: off
    transport: udpu
rrp_mode: passive
interface {
ringnumber: 0
bindnetaddr: 192.168.33.0
broadcast: yes
mcastport: 5546
}
interface {
ringnumber: 1
bindnetaddr: 19.21.68.0
broadcast: yes
mcastport: 5547
}

}

nodelist {
    node {
        ring0_addr: xmcs01
        nodeid: 1
    }

    node {
        ring0_addr: xmcs02
        nodeid: 2
    }
}

quorum {
    provider: corosync_votequorum
    two_node: 1
}

logging {
    to_logfile: yes
    logfile: /var/log/cluster/corosync.log
    to_syslog: yes
}

scp /etc/corosync/corosync.conf xmcs02:/etc/corosync/corosync.conf


pcs cluster stop --all
pcs cluster sync
pcs cluster start --all


[root@xmcs01 corosync]# corosync-cfgtool -s
Printing ring status.
Local node ID 1
RING ID 0
        id      = 192.168.68.11
        status  = ring 0 active with no faults
RING ID 1
        id      = 17.21.68.11
        status  = ring 1 active with no faults

 

开放lvmha功能 (sheng)

lvmconf --enable-halvm --services --startstopservices

[root@xmcs01 ~]# lvmconf --enable-halvm --services --startstopservices
Warning: Stopping lvm2-lvmetad.service, but it can still be activated by:
  lvm2-lvmetad.socket
Removed symlink /etc/systemd/system/sysinit.target.wants/lvm2-lvmetad.socket.
[root@xmcs02 u01]# lvmconf --enable-halvm --services --startstopservices
Warning: Stopping lvm2-lvmetad.service, but it can still be activated by:
  lvm2-lvmetad.socket
Removed symlink /etc/systemd/system/sysinit.target.wants/lvm2-lvmetad.socket

[root@xmcs02 lvm]# cp /etc/lvm/lvm.conf /etc/lvm/lvm.confbak
[root@xmcs02 lvm]# pwd
/etc/lvm

vi /etc/lvm/lvm.conf

volume_list =[ "centos"]

scp /etc/lvm/lvm.conf xmcs02:/etc/lvm/lvm.conf

 cp /boot/initramfs-3.10.0-862.el7.x86_64.img  /root/.
dracut -H -f /boot/initramfs-$(uname -r).img $(uname -r)
reboot


lvcreate -Zn -n lvdata -l +100%FREE  vg01

 

3、配置cluster

pcs resource create ha-core-db ocf:heartbeat:IPaddr2 ip=192.168.33.12 cidr_netmask=32 op monitor interval=30s
pcs resource create oracle-lvm LVM volgrpname=vg01 exclusive=yes
pcs resource create  oracle-fs Filesystem device='/dev/mapper/data-data_lv' directory=/data fstype=xfs op monitor interval=10s
pcs resource create oracledb ocf:heartbeat:oracle sid=oab home=/u01/app/oracle/product/12.2/db clear_backupmode=1 shutdown_method=immediate --group oracle_group op monitor interval=10s
pcs resource create oralsn ocf:heartbeat:oralsnr sid=oab home=/u01/app/oracle/product/12.2/db --group oracle_group  op monitor interval=10s



 op monitor interval=10s
op defaults timeout=10s




防止资源回切
pcs resource defaults resource-stickiness=100
pcs resource defaults 

设置资源超时间
pcs resource op defaults timeout=10s
pcs resource op defaults

投票属性
pcs property set no-quorum-policy=ignore

集群故障时候服务迁移
pcs resource defaults migration-threshold=1




约束
pcs constraint colocation add  ha-core-db oracle-lvm ora_lsnr oracle-fs oracle-oab
pcs constraint order  oracle-fs then oracle-oab
pcs constraint

资源位置
pcs constraint location oracle-oab  perfers xmcs01 score


# 资源粘性




fence配置
无fence
 pcs property set stonith-enabled=false

pcs resource defaults resource-stickiness=0
pcs resource defaults 



fence_scsi配置
pcs stonith create scsi fence_scsi pcmk_host_list="xmcs01 xmcs02" pcmk_reboot_action="off" devices="/dev/mapper/mpathb" meta provides="unfencing" --force


fence_mpath配置

fence_mpath --devices="/dev/mapper/mpathb" --key=1 --action=on -v
fence_mpath --devices="/dev/mapper/mpathb" --key=2 --action=on -v

pcs stonith create xmcs01-mpath fence_mpath key=1 pcmk_host_list="xmcs01" pcmk_reboot_action="reboot" devices="/dev/mapper/mpathb" meta provides="unfencing"

pcs stonith create xmcs02-mpath fence_mpath key=2 pcmk_host_list="xmcs02" pcmk_reboot_action="reboot" devices="/dev/mapper/mpathb" meta provides="unfencing"

mpathpersist -i -k -d /dev/mapper/mpathb




pcs stonith show

pcs property set stonith-enabled=true #很重要!


 

 

 

pcs resource cleanup   oracle_group

 

 手工切换

 pcs resource move oracle_group  xmcs01

测试:

ifdown bond0-切换

 ifup bond0 -自动回切

 

 

  • 0
    点赞
  • 7
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值