安装、
yum install rpm-build* -y
rpmbuild -tb haproxy-1.6.11.tar.gz
cd rpmbuild/
[root@server1 rpmbuild]# cd RPMS/
[root@server1 RPMS]# ls
x86_64
[root@server1 RPMS]# cd x86_64/
[root@server1 x86_64]# rpm -qpl haproxy-1.6.11-1.x86_64.rpm
[root@server1 x86_64]# rpm -ivh haproxy-1.6.11-1.x86_64.rpm
Preparing... ########################################### [100%]
1:haproxy ########################################### [100%]
[root@server1 ~]# tar zxf haproxy-1.6.11.tar.gz
[root@server1 ~]# cd haproxy-1.6.11
[root@server1 haproxy-1.6.11]# cd examples/
[root@server1 examples]# cp content-sw-sample.cfg /etc/haproxy/haproxy.cfg
[root@server1 examples]# cd /etc/haproxy/
[root@server1 haproxy]# groupadd -g 200 haproxy
[root@server1 haproxy]# useradd -u 200 -g haproxy
[root@server1 haproxy]# id haproxy
uid=200(haproxy) gid=200(haproxy) groups=200(haproxy)
修改配置文件
[root@server1 haproxy]# vi haproxy.cfg
global
maxconn 10000
stats socket /var/run/haproxy.stat mode 600 level admin
log 127.0.0.1 local0
uid 200
gid 200
chroot /var/empty
daemon
defaults
mode http
log global
option httplog
option dontlognull
monitor-uri /monitoruri
maxconn 8000
timeout client 30s
option prefer-last-server
retries 2
option redispatch
timeout connect 5s
timeout server 5s
stats uri /adminstatus
stats auth admin:westos 加密
# The public 'www' address in the DMZ
frontend public
bind 172.25.254.1:80 主ip 80端口
#bind 192.168.1.10:443 ssl crt /etc/haproxy/haproxy.pem
#use_backend static if { hdr_beg(host) -i img }
#use_backend static if { path_beg /img /css }
default_backend static
# The static backend backend for 'Host: img', /img and /css.
backend static 实现轮询
balance roundrobin
server statsrv1 172.25.254.2:80 check inter 1000
server statsrv2 172.25.254.3:80 check inter 1000
[root@server1 haproxy]# /etc/init.d/haproxy restart
Shutting down haproxy: [ OK ]
Starting haproxy: [ OK ]
在server2 和server3 中配置http服务 创建index.html 文件
在主机中测试
haproxy 日志
[root@server1 log]# vim /etc/rsyslog.conf
重启rsyslog
在客户端继续测试 然后查看日志
备用服务器
当serveer2和server3宕机
配置haproxy 配置文件
在在主机上执行
访问控制
测试403
重定向
在主机测试 403 > 302
在server 2中测试
动静分离
在server 3的html 下创建image目录
里面放一张图片名a.jpg
server2 和server3 启动http服务
访问
读写分离
测试
只读server2
haproxy + peacemaker
开启一台server4
配置haproxy 完成后
scp haproxy.cfg 拷贝到server4中
两边同时安装 corosync 心跳组建
yum install corosync pacemaker -y
vi haproxy.cfg
防止端口冲突
cp /etc/corosync/corosync.conf.example /etc/corosync/corosync.conf
vim /etc/corosync/corosync.conf
# Please read the corosync.conf.5 manual page
compatibility: whitetank
totem {
version: 2 版本号
secauth: off
threads: 0
interface {
ringnumber: 0
bindnetaddr: 172.25.254.0 集群节点网络
mcastaddr: 226.94.1.1 多波地址
mcastport: 5405 多波端口
ttl: 1
}
}
logging {
fileline: off
to_stderr: no
to_logfile: yes
to_syslog: yes
logfile: /var/log/cluster/corosync.log 日志记录位置
debug: off
timestamp: on
logger_subsys {
subsys: AMF
debug: off
}
}
amf {
mode: disabled
}
service{ ##添加pacemaker服务
name:pacemaker
ver:0 ##0表示pacemaker跟随软件自启
1表示不自动启动服务
}
scp corosync.conf 172.25.254.4:/etc/corosync/
[root@server1 corosync]# /etc/init.d/corosync start
Starting Corosync Cluster Engine (corosync): [ OK ]
Server1 和 server4 一起执行
[root@server4 ~]# yum install crmsh.x86_64 0:1.2.6-0.rc2.2.1 pssh.x86_64 0:2.3.1-2.1 -y
[root@server1 ~]# yum install crmsh.x86_64 0:1.2.6-0.rc2.2.1 pssh.x86_64 0:2.3.1-2.1 -y
[root@server4 ~]# crm_verify -L
Errors found during check: config not valid
-V may provide more details
[root@server4 ~]# crm_verify -LV ##有报错,因为默认引导fence,而fence还未配置
error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined
error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option
error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity
Errors found during check: config not valid
crmsh 和 pcs
配置主机fence
(1) 在主机上
<1> yum search fence
<2> yum install fence-virtd-multicast.x86_64 fence-virtd-libvirt.x86_64 fence-virtd.x86_64 -y ##安装fence
<3> rpm -qa | grep fence 必须有一下三个包
fence-virtd-0.2.3-15.el6.x86_64 fence-virtd-libvirt-0.2.3-15.el6.x86_64 fence-virtd-multicast-0.2.3-15.el6.x86_64
<3> fence_virtd -c ##配置fence,此时fence的状态必须是关闭状态
Module search path [/usr/lib64/fence-virt]: 默认
Available backends:
libvirt 0.1
Available listeners:
multicast 1.1
Listener modules are responsible for accepting requests
from fencing clients.
Listener module [multicast]: 默认配置
The multicast listener module is designed for use environments
where the guests and hosts may communicate over a network using
multicast.
The multicast address is the address that a client will use to
send fencing requests to fence_virtd.
Multicast IP Address [225.0.0.12]: 默认
Using ipv4 as family.
Multicast IP Port [1229]: 默认
Setting a preferred interface causes fence_virtd to listen only
on that interface. Normally, it listens on the default network
interface. In environments where the virtual machines are
using the host machine as a gateway, this *must* be set
(typically to virbr0).
Set to 'none' for no interface.
Interface [none]: 填br0(桥接)
The key file is the shared key information which is used to
authenticate fencing requests. The contents of this file must
be distributed to each physical host and virtual machine within
a cluster.
Key File [/etc/cluster/fence_xvm.key]: 默认
Backend modules are responsible for routing requests to
the appropriate hypervisor or management layer.
Backend module [checkpoint]: 默认配置
No backend module named checkpoint found!
Use this value anyway [y/N]? n ##%%输入N
Backend module [checkpoint]: libvirt
The libvirt backend module is designed for single desktops or
servers. Do not use in environments where virtual machines
may be migrated between hosts.
Libvirt URI [qemu:///system]: 默认
=== End Configuration ===
Replace /etc/fence_virt.conf with the above [y/N]? y
<4> 第一次配置fence时/etc/cluster是不存在的,需要手动创建
Mkdir -p /etc/cluster
<5> 配置fence的密码
cd /etc/cluster
dd if=/dev/urandom of=fence_xvm.key bs=128 count=1 生成key file fence_xvm.key ##可以查看一下文件类型
<6> server1和server2必须在/etc/cluster下拥有相同的fence密码
scp fence_xvm.key root@172.25.254.1:/etc/cluster
scp fence_xvm.key root@172.25.254.4:/etc/cluster
<7> systemctl start fence_virtd.service ##启动fence服务
配置好之后回到 server4
[root@server4 ~]# crm
crm(live)# configure
crm(live)configure# show
node server1
node server4
property $id="cib-bootstrap-options" \
dc-version="1.1.10-14.el6-368c726" \
cluster-infrastructure="classic openais (with plugin)" \
expected-quorum-votes="2"
crm(live)configure# property stonith-enabled=false ##先关掉fence
crm(live)configure# commit
[root@server4 ~]# crm_verify -LV ##关掉fence后不报错
[root@server4 ~]# crm
crm(live)# configure
shoecrm(live)configure# show
node server1
node server4
property $id="cib-bootstrap-options" \
dc-version="1.1.10-14.el6-368c726" \
cluster-infrastructure="classic openais (with plugin)" \
expected-quorum-votes="2" \
stonith-enabled="false"
crm(live)configure# primitive vip ocf:heartbeat:IPaddr2 params ip=172.25.254.100 cidr_netmask=24 op monitor interval=1min
##配置vip heartbeat工具 vip netmask 健康监测时间
crm(live)configure# commit
crm(live)configure# bye
[root@server1 ~]# vim /etc/haproxy/haproxy.cfg
frontend public
bind *:80 ##监听所有的80端口(为了监听vip)
[root@server4 ~]# crm_mon ##动态监听
Last updated: Tue Apr 17 22:16:05 2018
Last change: Tue Apr 17 22:12:45 2018 via cibadmin on server4
Stack: classic openais (with plugin)
Current DC: server4 - partition with quorum
Version: 1.1.10-14.el6-368c726
2 Nodes configured, 2 expected votes
1 Resources configured
Online: [ server1 server4 ]
vip (ocf::heartbeat:IPaddr2): Started server1
[root@server4 ~]# crm
crm(live)# configure
crm(live)configure# show
node server1
node server4
primitive vip ocf:heartbeat:IPaddr2 \
params ip="172.25.254.100" cidr_netmask="24" \
op monitor interval="1min"
property $id="cib-bootstrap-options" \
dc-version="1.1.10-14.el6-368c726" \
cluster-infrastructure="classic openais (with plugin)" \
expected-quorum-votes="2" \
stonith-enabled="false"
crm(live)configure# property no-quorum-policy=ignore ##宕机后vip可以浮动
crm(live)configure# commit
byecrm(live)configure# bye
[root@server1 x86_64]# /etc/init.d/corosync stop
[root@server4 ~]# crm_mon ##server1转移到server4
[root@server1 ~]# cd rpmbuild/RPMS/x86_64/
[root@server1 x86_64]# scp haproxy-1.6.11-1.x86_64.rpm server4:~/
[root@server4 ~]# rpm -ivh haproxy-1.6.11-1.x86_64.rpm
[root@server4 ~]# /etc/init.d/haproxy start
[root@server1 x86_64]# scp /etc/haproxy/haproxy.cfg server4:/etc/haproxy/
[root@server1 x86_64]# /etc/init.d/haproxy stop ##启动集群管理1时一定要关闭haproxy和取消开机自启
Shutting down haproxy: [ OK ]
[root@server1 x86_64]# chkconfig --list haproxy
haproxy 0:off 1:off 2:off 3:off 4:off 5:off 6:off
[root@server1 x86_64]# crm
crm(live)# configure
crm(live)configure# show
node server1
node server4
primitive vip ocf:heartbeat:IPaddr2 \
params ip="172.25.254.100" cidr_netmask="24" \
op monitor interval="1min"
property $id="cib-bootstrap-options" \
dc-version="1.1.10-14.el6-368c726" \
cluster-infrastructure="classic openais (with plugin)" \
expected-quorum-votes="2" \
stonith-enabled="false" \
no-quorum-policy="ignore"
crm(live)configure# primitive haproxy lsb:haproxy op monitor interval=1min
crm(live)configure# commit
crm(live)configure# group lbgroup vip haproxy ##建立组,使crm_mon中vip和haproxy在同一server上
crm(live)configure# commit
crm(live)configure# bye
[root@server4 cluster]# crm
crm(live)# configure
crm(live)configure# primitive vmfence stonith:fence_xvm params pcmk_host_map="server1:host1;server4:host4" op monitor interval=1min ##设置fence
crm(live)configure# commit
crm(live)configure# bye
[root@server4 cluster]# crm
crm(live)# configure
crm(live)configure# property stonith-enabled=true ##开启fence
crm(live)configure# commit
crm(live)configure# bye
[root@server1 ~]# crm
crm(live)# resource
crm(live)resource# cleanup vmfence ##配错fence后清除更新
Cleaning up vmfence on server1
Cleaning up vmfence on server4
Waiting for 1 replies from the CRMd. OK
crm(live)resource# show
Resource Group: lbgroup
vip (ocf::heartbeat:IPaddr2): Started
haproxy (lsb:haproxy): Started
vmfence (stonith:fence_xvm): Started
集群服务开启haproxy就会被pacemaker 控制