深入理解程序的数据存储
- 程序在保存文字数据时,是存到数据库中了
- 序在保存非文字数据(如图片、视频、压缩包等)时,是存到相应的文件目录中
验证
- 发一篇文章,文章内容包含文字和图片
- 在NFS上查看图片
此时已经 实现共享。
[root@nfs]# ls /web_share/html/wp-content/uploads/
2022
[root@nfs 01]# ls /web_share/html/wp-content/uploads/2022/
03
- 在数据库服务器上查看文字数据
[root@database ~]# mysql
MariaDB [(none)]> use wordpress;
MariaDB [wordpress]> select * from wp_posts\G
安装额外的调度器
- proxy2:eth0->192.168.4.6/24;eth1 -> 192.168.2.6/24
- 准备环境
[root@zzgrhel8 ~]# clone-vm7
Enter VM number: 7
VM tedu_node07 Create... ...
[root@zzgrhel8 ~]# virsh start tedu_node07
[root@zzgrhel8 ~]# virsh console tedu_node07
CentOS Linux 7 (Core)
Kernel 3.10.0-862.el7.x86_64 on an x86_64
localhost login: root
Password: 123456
# 执行以下命令进行初始化
hostnamectl set-hostname proxy2
nmcli connection modify eth1 ipv4.method manual ipv4.addresses 192.168.2.6/24
nmcli connection modify eth0 ipv4.method manual ipv4.addresses 192.168.4.6/24
nmcli connection down eth1
nmcli connection up eth1
nmcli connection down eth0
nmcli connection up eth0
echo a | passwd --stdin root
[root@localhost ~]# logout
CentOS Linux 7 (Core)
Kernel 3.10.0-862.el7.x86_64 on an x86_64
proxy2 login: # 按ctrl+]
- 配置HAProxy服务器
[root@proxy2 ~]# yum install -y haproxy
# 将第1台haproxy配置文件拷贝到haprox2
[root@proxy ~]# scp /etc/haproxy/haproxy.cfg 192.168.2.6:/etc/haproxy/
# 起动第2台haproxy2的服务
[root@proxy2 ~]# systemctl enable haproxy.service --now
[root@proxy2 ~]# ss -tlnp | grep :80
LISTEN 0 128 *:80
- 客户端访问http://192.168.4.6
在调度器上配置Keepalived
- 安装并修改配置文件
[root@proxy ~]# yum install -y keepalived.x86_64
[root@proxy2 ~]# yum install -y keepalived.x86_64
[root@proxy ~]# vim /etc/keepalived/keepalived.conf
1 ! Configuration File for keepalived
2
3 global_defs {
4 notification_email {
5 acassen@firewall.loc
6 failover@firewall.loc
7 sysadmin@firewall.loc
8 }
9 notification_email_from Alexandre.Cassen@firewall.loc
10 smtp_server 192.168.200.1
11 smtp_connect_timeout 30
12 router_id proxy1 # 改这里
13 vrrp_iptables # 加一行
14 vrrp_skip_check_adv_addr
15 vrrp_strict
16 vrrp_garp_interval 0
17 vrrp_gna_interval 0
18 }
19
20 vrrp_instance VI_1 {
21 state MASTER
22 interface ens34 # 注意网卡名
23 virtual_router_id 51
24 priority 100
25 advert_int 1
26 authentication {
27 auth_type PASS
28 auth_pass 1111
29 }
30 virtual_ipaddress {
31 192.168.4.80 # VIP地址
32 }
33 }
[root@proxy ~]# systemctl enable keepalived.service --now
[root@proxy ~]# ip a s ens34 | grep '4\.80'
inet 192.168.4.80/32 scope global eth0
修改proxy2的配置,并启动
[root@proxy2 ~]# vim /etc/keepalived/keepalived.conf
1 ! Configuration File for keepalived
2
3 global_defs {
4 notification_email {
5 acassen@firewall.loc
6 failover@firewall.loc
7 sysadmin@firewall.loc
8 }
9 notification_email_from Alexandre.Cassen@firewall.loc
10 smtp_server 192.168.200.1
11 smtp_connect_timeout 30
12 router_id proxy2 # 改id
13 vrrp_iptables # 加一行
14 vrrp_skip_check_adv_addr
15 vrrp_strict
16 vrrp_garp_interval 0
17 vrrp_gna_interval 0
18 }
19
20 vrrp_instance VI_1 {
21 state BACKUP # 改状态
22 interface ens34 # 注意网卡名
23 virtual_router_id 51
24 priority 80 # 优先级低于MASTER
25 advert_int 1
26 authentication {
27 auth_type PASS
28 auth_pass 1111
29 }
30 virtual_ipaddress {
31 192.168.4.80 # VIP地址
32 }
33 }
[root@proxy2 ~]# systemctl enable keepalived.service --now
[root@proxy2 ~]# ip a s ens34 | grep '4\.80' # 查不到
因为真机为Windows,所以网卡名为ens34,Linux系统为eth0
- 客户端访问http://192.168.4.80
- 修改客户端的名称解析
Windows:
192.168.4.80 www.lab.com #写入hosts文件
Linux:
[root@zzgrhel8 ~]# vim /etc/hosts ... ... 192.168.4.80 www.lab.com
- 客户端访问http://www.lab.com
验证高可用
- 在浏览器所在的主机上查看www.lab.com的地址
win+R输入cmd
Linux:
[root@zzgrhel8 ~]# ping -c2 www.lab.com PING www.lab.com (192.168.4.80) 56(84) bytes of data.
- 验证VIP
vip在proxy上
[root@proxy ~]# ip a s eth0 | grep '4\.80'
inet 192.168.4.80/32 scope global eth0
[root@proxy2 ~]# ip a s eth0 | grep '4\.80' # 没有vip
模拟proxy故障,将其关机
[root@proxy ~]# shutdown -h now
# 查看proxy2上有没有出现vip
[root@proxy2 ~]# ip a s eth0 | grep '4\.80'
inet 192.168.4.80/32 scope global eth0
浏览器上继续访问http://www.lab.com,服务仍然可用
重新启动proxy,vip将会切回
[root@proxy ~]# ip a s eth0 | grep '4\.80'
inet 192.168.4.80/32 scope global eth0
[root@proxy2 ~]# ip a s eth0 | grep '4\.80' # vip消失
配置ceph
主机角色 | 主机名 | IP地址 |
---|---|---|
ceph节点1 | node1 | 192.168.2.41/24 |
ceph节点2 | node2 | 192.168.2.42/24 |
ceph节点3 | node3 | 192.168.2.43/24 |
- 每台机器还要再添加2块20GB的硬盘
[root@zzgrhel8 ~]# clone-vm7 Enter VM number: 8 [root@zzgrhel8 ~]# clone-vm7 Enter VM number: 9 [root@zzgrhel8 ~]# clone-vm7 Enter VM number: 10 [root@zzgrhel8 ~]# virsh start tedu_node08 [root@zzgrhel8 ~]# virsh start tedu_node09 [root@zzgrhel8 ~]# virsh start tedu_node10 [root@zzgrhel8 ~]# virsh console tedu_node08 CentOS Linux 7 (Core) Kernel 3.10.0-862.el7.x86_64 on an x86_64 localhost login: root Password: 123456 # 执行以下命令进行初始化 hostnamectl set-hostname node1 nmcli connection modify eth1 ipv4.method manual ipv4.addresses 192.168.2.41/24 nmcli connection down eth1 nmcli connection up eth1 echo a | passwd --stdin root [root@localhost ~]# logout CentOS Linux 7 (Core) Kernel 3.10.0-862.el7.x86_64 on an x86_64 localhost login: # 按ctrl+] [root@zzgrhel8 ~]# virsh console tedu_node09 CentOS Linux 7 (Core) Kernel 3.10.0-862.el7.x86_64 on an x86_64 localhost login: root Password: 123456 # 执行以下命令进行初始化 hostnamectl set-hostname node2 nmcli connection modify eth1 ipv4.method manual ipv4.addresses 192.168.2.42/24 nmcli connection down eth1 nmcli connection up eth1 echo a | passwd --stdin root [root@localhost ~]# logout CentOS Linux 7 (Core) Kernel 3.10.0-862.el7.x86_64 on an x86_64 localhost login: # 按ctrl+] [root@zzgrhel8 ~]# virsh console tedu_node10 CentOS Linux 7 (Core) Kernel 3.10.0-862.el7.x86_64 on an x86_64 localhost login: root Password: 123456 # 执行以下命令进行初始化 hostnamectl set-hostname node3 nmcli connection modify eth1 ipv4.method manual ipv4.addresses 192.168.2.43/24 nmcli connection down eth1 nmcli connection up eth1 echo a | passwd --stdin root [root@localhost ~]# logout CentOS Linux 7 (Core) Kernel 3.10.0-862.el7.x86_64 on an x86_64 localhost login: # 按ctrl+] # 查看3台机器的硬盘 [root@node1 ~]# lsblk NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT vda 253:0 0 30G 0 disk └─vda1 253:1 0 30G 0 part / vdb 253:16 0 20G 0 disk vdc 253:32 0 20G 0 disk [root@node2 ~]# lsblk NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT vda 253:0 0 30G 0 disk └─vda1 253:1 0 30G 0 part / vdb 253:16 0 20G 0 disk vdc 253:32 0 20G 0 disk [root@node3 ~]# lsblk NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT vda 253:0 0 30G 0 disk └─vda1 253:1 0 30G 0 part / vdb 253:16 0 20G 0 disk vdc 253:32 0 20G 0 disk
- 配置ceph yum源
在真机上提供yum源。真机务必关掉防火墙
[root@zzgrhel8 ~]# yum install -y vsftpd
[root@zzgrhel8 ~]# systemctl start vsftpd
[root@zzgrhel8 ~]# mkdir /var/ftp/ceph
[root@zzgrhel8 ~]# cp /linux-soft/2/ceph10.iso /iso/
[root@zzgrhel8 ~]# vim /etc/fstab # 追加一行如下
/iso/ceph10.iso /var/ftp/ceph iso9660 defaults,loop 0 0
[root@zzgrhel8 ~]# mount -a
[root@zzgrhel8 ~]# df -h /var/ftp/ceph/
文件系统 容量 已用 可用 已用% 挂载点
/dev/loop3 284M 284M 0 100% /var/ftp/ceph
[root@zzgrhel8 ~]# ls /var/ftp/ceph/
EULA MON README Tools
GPL OSD RPM-GPG-KEY-redhat-release TRANS.TBL
在node1-3节点上配置yum
[root@node1 ~]# cat /etc/yum.repos.d/local.repo
[local_repo]
name=CentOS-$releasever - Base
baseurl=ftp://192.168.2.254/centos-1804
enabled=1
gpgcheck=0
[root@node1 ~]# vim /etc/yum.repos.d/ceph.repo
[osd]
name=ceph osd
baseurl=ftp://192.168.2.254/ceph/OSD
enabled=1
gpgcheck=0
[mon]
name=ceph mon
baseurl=ftp://192.168.2.254/ceph/MON
enabled=1
gpgcheck=0
[tools]
name=ceph tools
baseurl=ftp://192.168.2.254/ceph/Tools
enabled=1
gpgcheck=0
[root@node1 ~]# yum repolist
... ...
repolist: 10,013
[root@node1 ~]# scp /etc/yum.repos.d/ceph.repo 192.168.2.42:/etc/yum.repos.d/
[root@node1 ~]# ^42^43
- 各节点务必关闭selinux和防火墙
- 集群安装前的准备工作
ceph为我们提供了一个ceph-deploy工具,可以在某一节点上统一操作全部节点 将Node1作为部署节点,将来的操作都在node1上进行。这样,需要node1能够免密操作其他主机
[root@node1 ~]# ssh-keygen # 生成密钥对
[root@node1 ~]# for i in {41..43}
> do
> ssh-copy-id 192.168.2.$i
> done
# 在所有的主机上配置名称解析。注意,解析的名字必须是该机器的主机名
[root@node1 ~]# for i in {1..3}
> do
> echo -e "192.168.2.4$i\tnode$i" >> /etc/hosts
> done
[root@node1 ~]# cat /etc/hosts
... ...
192.168.2.41 node1
192.168.2.42 node2
192.168.2.43 node3
[root@node2 ~]# for i in {1..3}; do echo -e "192.168.2.4$i\tnode$i" >> /etc/hosts; done
[root@node3 ~]# for i in {1..3}; do echo -e "192.168.2.4$i\tnode$i" >> /etc/hosts; done
- 安装集群
在3个节点上安装软件包
[root@node1 ~]# for i in node{1..3}
> do
> ssh $i yum install -y ceph-mon ceph-osd ceph-mds ceph-radosgw
> done
配置node1为ntp服务器
[root@node1 ~]# yum install -y chrony
[root@node1 ~]# vim /etc/chrony.conf
allow 192.168.2.0/24 # 授权192.168.2.0/24可以时钟同步
local stratum 10 # 即使没有从一个源同步时钟,也为其他主机提供时间
[root@node1 ~]# systemctl restart chronyd
配置node2-3成为node1的NTP客户端
[root@node1 ~]# for i in node{2..3}
> do
> ssh $i yum install -y chrony
> done
[root@node2 ~]# vim /etc/chrony.conf # 只改第7行
server 192.168.2.41 iburst # 替换gateway
[root@node2 ~]# scp /etc/chrony.conf 192.168.2.43:/etc/
[root@node1 ~]# for i in node{2..3}
> do
> ssh $i systemctl restart chronyd
> done
# 验证时间是否同步 node1前面有^*表示同步成功
[root@node2 ~]# chronyc sources -v
... ...
^* node1 10 6 17 40 -4385ns[-1241us] +/- 162us
在node1上安装ceph-deploy部署工具
[root@node1 ~]# yum install -y ceph-deploy
# 查看使用帮助
[root@node1 ~]# ceph-deploy --help
[root@node1 ~]# ceph-deploy mon --help # 查看mon子命令的帮助
创建ceph-deploy工作目录
[root@node1 ~]# mkdir ceph-cluster
[root@node1 ~]# cd ceph-cluster
创建一个新的集群。
[root@node1 ceph-cluster]# ceph-deploy new node{1..3}
[root@node1 ceph-cluster]# ls
ceph.conf ceph-deploy-ceph.log ceph.mon.keyring
[root@node1 ceph-cluster]# tree .
.
├── ceph.conf # 集群配置文件
├── ceph-deploy-ceph.log # 日志文件
└── ceph.mon.keyring # 共享密钥
开启分层快照功能。
[root@node1 ceph-cluster]# vim ceph.conf # 尾部追加一行如下
rbd_default_features = 1
# 初始化monitor
[root@node1 ceph-cluster]# ceph-deploy mon create-initial
[root@node1 ceph-cluster]# systemctl status ceph-mon*
● ceph-mon@node1.service .. ..
[root@node2 ~]# systemctl status ceph*
● ceph-mon@node2.service ... ...
[root@node3 ~]# systemctl status ceph*
● ceph-mon@node3.service ... ...
# 注意:这些服务在30分钟之内只能启动3次,超过报错。
# 查看集群状态
[root@node1 ceph-cluster]# ceph -s
health HEALTH_ERR # 因为还没有硬盘,所以状态是HEALTH_ERR
创建OSD
[root@node1 ceph-cluster]# ceph-deploy disk --help
# 初始化各主机的硬盘。vmware应该是sdb和sdc
[root@node1 ceph-cluster]# ceph-deploy disk zap node1:sdb node1:sdc
[root@node1 ceph-cluster]# ceph-deploy disk zap node2:sdb node2:sdc
[root@node1 ceph-cluster]# ceph-deploy disk zap node3:sdb node3:sdc
创建存储空间。ceph会硬盘分为两个分区,一个分区大小为5GB,用于保存ceph的内部资源;另一个分区是剩余全部空间
[root@node1 ceph-cluster]# ceph-deploy osd --help
[root@node1 ceph-cluster]# ceph-deploy osd create node1:sd{b,c}
[root@node1 ceph-cluster]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 253:0 0 30G 0 disk
└─sda1 253:1 0 30G 0 part /
sdb 253:16 0 20G 0 disk
├─sdb1 253:17 0 15G 0 part /var/lib/ceph/osd/ceph-0
└─sdb2 253:18 0 5G 0 part
sdc 253:32 0 20G 0 disk
├─sdc1 253:33 0 15G 0 part /var/lib/ceph/osd/ceph-1
└─sdc2 253:34 0 5G 0 part
将会出现2个osd进程,因为有两块硬盘用于ceph
[root@node1 ceph-cluster]# systemctl status ceph-osd*
继续初始化其他节点的OSD
[root@node1 ceph-cluster]# ceph-deploy osd create node2:sd{b,c}
[root@node1 ceph-cluster]# ceph-deploy osd create node3:sd{b,c}
# 查看集群状态
[root@node1 ceph-cluster]# ceph -s
health HEALTH_OK # 状态是HEALTH_OK表示一切正常
配置ceph fs
- 安装并启用mds
在node1配置MDS
[root@node1 ~]# cd ceph-cluster/
[root@node1 ceph-cluster]# ceph-deploy mds create node3
[root@node3 ~]# systemctl status ceph-mds*
1. 新建一个名为data1的存储池,目的是存储数据,有100个PG
[root@node1 ceph-cluster]# ceph osd pool create data1 100
2. 新建一个名为metadata1的存储池,目的是存储元数据
[root@node1 ceph-cluster]# ceph osd pool create metadata1 100
3. 创建名为myfs1的cephfs,数据保存到data1中,元数据保存到metadata1中
[root@node1 ceph-cluster]# ceph fs new myfs1 metadata1 data1
查看存储池
[root@node1 ceph-cluster]# ceph osd lspools
0 rbd,1 data1,2 metadata1,
[root@node1 ceph-cluster]# ceph df
GLOBAL:
SIZE AVAIL RAW USED %RAW USED
92093M 91574M 519M 0.56
POOLS:
NAME ID USED %USED MAX AVAIL OBJECTS
rbd 0 86469k 0.28 30488M 2606
data1 1 0 0 30488M 0
metadata1 2 2068 0 30488M 20
# 查看创建文件系统
[root@node1 ceph-cluster]# ceph fs ls
name: myfs1, metadata pool: metadata1, data pools: [data1 ]
数据从NFS迁移到ceph fs
- 停止使用NFS共享
# 停止nginx服务
[root@web1 ~]# systemctl stop nginx
[root@web2 ~]# systemctl stop nginx
[root@web3 ~]# systemctl stop nginx
# 卸载NFS目录
[root@web1 ~]# umount /usr/local/nginx/html/
[root@web2 ~]# umount /usr/local/nginx/html/
[root@web3 ~]# umount /usr/local/nginx/html/
# 删除nfs自动挂载
[root@web1 ~]# sed -i '$d' /etc/fstab
[root@web2 ~]# sed -i '$d' /etc/fstab
[root@web3 ~]# sed -i '$d' /etc/fstab
- 配置web服务器使用ceph fs
1. 配置各web服务器的yum,安装ceph客户端软件
[root@node1 ~]# scp /etc/yum.repos.d/ceph.repo 192.168.2.11:/etc/yum.repos.d/
[root@node1 ~]# ^11^12
[root@node1 ~]# ^12^13
[root@web1 ~]# yum install -y ceph-common libcephfs1
[root@web2 ~]# yum install -y ceph-common libcephfs1
[root@web3 ~]# yum install -y ceph-common libcephfs1
2. 查看连接ceph的用户名和密码
[root@node1 ~]# cat /etc/ceph/ceph.client.admin.keyring
[client.admin]
key =
3. 挂载ceph fs
[root@web1 ~]# vim /etc/fstab # 注意以下是一行
192.168.2.41:6789,192.168.2.42:6789,192.168.2.43:6789:/ /usr/local/nginx/html ceph _netdev,name=admin,secret=AQAah95hfWQFOhAAg3hcQ2FtFuCYB1lRKJMCLQ== 0 0
[root@web1 ~]# mount -a
[root@web1 ~]# df -h
[root@web2 ~]# vim /etc/fstab # 注意以下是一行
192.168.2.41:6789,192.168.2.42:6789,192.168.2.43:6789:/ /usr/local/nginx/html ceph _netdev,name=admin,secret=AQAah95hfWQFOhAAg3hcQ2FtFuCYB1lRKJMCLQ== 0 0
[root@web2 ~]# mount -a
[root@web2 ~]# df -h
[root@web3 ~]# vim /etc/fstab # 注意以下是一行
192.168.2.41:6789,192.168.2.42:6789,192.168.2.43:6789:/ /usr/local/nginx/html ceph _netdev,name=admin,secret=AQAah95hfWQFOhAAg3hcQ2FtFuCYB1lRKJMCLQ== 0 0
[root@web3 ~]# mount -a
[root@web3 ~]# df -h
- 将nfs的网站程序传到ceph
[root@nfs ~]# cd /web_share/html/
[root@nfs html]# tar czpf /root/web.tar.gz ./*
[root@nfs html]# scp /root/web.tar.gz 192.168.2.11:/root/
[root@web1 ~]# tar xf web.tar.gz -C /usr/local/nginx/html/
- 启动所有的nginx服务
[root@web1 ~]# systemctl start nginx
[root@web2 ~]# systemctl start nginx
[root@web3 ~]# systemctl start nginx
迁移成功。
自定义错误页面
# 存放一张名为err.png的图片到/usr/local/nginx/html/wp-content
# 创建404.html
[root@web1 html]# vim /usr/local/nginx/html/404.html
<img src="wp-content/err.png">
# 修改配置文件
[root@web1 html]# vim /usr/local/nginx/conf/nginx.conf
error_page 404 /404.html;
[root@web2 html]# vim /usr/local/nginx/conf/nginx.conf
error_page 404 /404.html;
[root@web3 html]# vim /usr/local/nginx/conf/nginx.conf
error_page 404 /404.html;
[root@web1 html]# systemctl restart nginx.service
[root@web2 html]# systemctl restart nginx.service
[root@web3 html]# systemctl restart nginx.service
升级nginx
升级流程
- 在haproxy中注释web1的地址
- 升级web1
- 测试web1
- 在haproxy中添加web1的地址,注释掉web2和web3
- 升级web2和web3并测试
- web2和web3测试成功后,在haproxy中添加web2和web3
实施
1.在haproxy中注释web1的地址
[root@proxy ~]# vim /etc/haproxy/haproxy.cfg
... ...
listen wordpress *:80
balance roundrobin
# server web1 192.168.2.11:80 check inter 2000 rise 2 fall 3
server web2 192.168.2.12:80 check inter 2000 rise 2 fall 3
server web3 192.168.2.13:80 check inter 2000 rise 2 fall 3
... ...
[root@proxy ~]# systemctl restart haproxy
2.升级web1
[root@web1 ~]# cd lnmp_soft/
[root@web1 lnmp_soft]# tar xf nginx-1.15.8.tar.gz
[root@web1 lnmp_soft]# cd nginx-1.15.8/
[root@web1 nginx-1.15.8]# ./configure --with-http_ssl_module --with-http_stub_status_module
[root@web1 nginx-1.15.8]# make
[root@web1 nginx-1.15.8]# mv /usr/local/nginx/sbin/nginx ~/nginx.old
[root@web1 nginx-1.15.8]# cp objs/nginx /usr/local/nginx/sbin/
[root@web1 nginx-1.15.8]# make upgrade # 只是重启了nginx服务而已
[root@web1 nginx-1.15.8]# systemctl restart nginx.service
[root@web1 nginx-1.15.8]# ss -tlnp | grep :80
LISTEN 0 128 *:80
# 访问:http://192.168.2.11/测试是否正常
3.在haproxy中注释web2和web3,添加web1
[root@proxy ~]# vim /etc/haproxy/haproxy.cfg
... ...
listen wordpress *:80
balance roundrobin
server web1 192.168.2.11:80 check inter 2000 rise 2 fall 3
# server web2 192.168.2.12:80 check inter 2000 rise 2 fall 3
# server web3 192.168.2.13:80 check inter 2000 rise 2 fall 3
... ...
[root@proxy ~]# systemctl restart haproxy
4. 升级web2和web3
[root@web2 ~]# systemctl stop nginx.service
[root@web2 ~]# mv /usr/local/nginx/sbin/nginx ~/nginx.old
[root@web3 ~]# systemctl stop nginx.service
[root@web3 ~]# mv /usr/local/nginx/sbin/nginx ~/nginx.old
[root@web1 ~]# scp /usr/local/nginx/sbin/nginx 192.168.2.12:/usr/local/nginx/sbin/
[root@web1 ~]# ^12^13
[root@web2 ~]# systemctl start nginx.service
[root@web3 ~]# systemctl start nginx.service
# 访问:http://192.168.2.12/测试是否正常
# 访问:http://192.168.2.13/测试是否正常
5. 在haproxy中,添加web2和web3
[root@proxy ~]# vim /etc/haproxy/haproxy.cfg
... ...
listen wordpress *:80
balance roundrobin
server web1 192.168.2.11:80 check inter 2000 rise 2 fall 3
server web2 192.168.2.12:80 check inter 2000 rise 2 fall 3
server web3 192.168.2.13:80 check inter 2000 rise 2 fall 3
... ...
[root@proxy ~]# systemctl restart haproxy
[root@proxy ~]# systemctl restart haproxy