基于 KVM 实现两种高可用 Web 站点架构(全)

一:实验环境准备

在这里插入图片描述

1.1:实验网络

外部网络:192.168.1.0/24(DNS&NTP服务器:192.168.1.254)

内部网络:172.16.1.0/24(DNS&NTP服务器:172.16.1.253)

KVM 虚拟机采用桥接网络,和物理机共享内外网环境。

1.2:VMware Workstation 准备虚拟机

创建2台虚拟机:

  1. 2×2 cpu,内存4G,4块网卡(2块桥接模式用于谅解外部网络、2块仅主机模式用于创建内部网络。
  2. 最小化安装 CentOS 7.2,并进行初始化配置(详见【CentOS】 - CentOS系统初始化)。

VMware 的两台虚拟机,以下称为“物理机”。

KVM 虚拟机的系统全部采用CentOS 7.2 的 minimal 镜像进行安装。

1.3:物理机1的配置

1.3.1:更改主机名

  • 更改物理机1的主机名为pm1,以使后续步骤的命令方便甄别:
]# hostnamectl set-hostname pm1.yqc.com

1.3.2:外部网络配置

br0
[root@pm1 ~]# vim /etc/sysconfig/network-scripts/ifcfg-br0
TYPE="Bridge"
BOOTPROTO="static"
NAME="br0"
DEVICE="br0"
ONBOOT="yes"
IPADDR="192.168.1.101"
NETMASK="255.255.255.0"
GATEWAY="192.168.1.1"
DNS1="192.168.1.254"
bond0
[root@pm1 ~]# vim /etc/sysconfig/network-scripts/ifcfg-bond0
BOOTPROTO="none"
NAME="bond0"
DEVICE="bond0"
ONBOOT="yes"
BONDING_MASTER=yes
BONDING_OPTS="mode=1 miimon=100"
BRIDGE="br0"
eth0
[root@pm1 ~]# vim /etc/sysconfig/network-scripts/ifcfg-eth0
BOOTPROTO="none"
NAME="eth0"
DEVICE="eth0"
ONBOOT="yes"
NM_CONTROLLED="no"
MASTER="bond0"
USERCTL="no"
SLAVE="yes"
eth1
[root@pm1 ~]# vim /etc/sysconfig/network-scripts/ifcfg-eth0
BOOTPROTO="none"
NAME="eth1"
DEVICE="eth1"
ONBOOT="yes"
NM_CONTROLLED="no"
MASTER="bond0"
USERCTL="no"
SLAVE="yes"

1.3.3:内部网络配置

br1
[root@pm1 ~]# vim /etc/sysconfig/network-scripts/ifcfg-br1
TYPE="Bridge"
BOOTPROTO="static"
NAME="br1"
DEVICE="br1"
ONBOOT="yes"
IPADDR="172.16.1.101"
NETMASK="255.255.255.0"
DNS1="172.16.1.253"
bond1
[root@pm1 ~]# vim /etc/sysconfig/network-scripts/ifcfg-bond1
BOOTPROTO="none"
NAME="bond1"
DEVICE="bond1"
ONBOOT="yes"
BONDING_MASTER=yes
BONDING_OPTS="mode=1 miimon=100"
BRIDGE="br1"
eth2
[root@pm1 ~]# vim /etc/sysconfig/network-scripts/ifcfg-eth2
BOOTPROTO="none"
NAME="eth2"
DEVICE="eth2"
ONBOOT="yes"
NM_CONTROLLED="no"
MASTER="bond1"
USERCTL="no"
SLAVE="yes"
eth3
[root@pm1 ~]# vim /etc/sysconfig/network-scripts/ifcfg-eth3
BOOTPROTO="none"
NAME="eth3"
DEVICE="eth3"
ONBOOT="yes"
NM_CONTROLLED="no"
MASTER="bond1"
USERCTL="no"
SLAVE="yes"

1.3.4:验证网络

  • 验证网络配置:
[root@pm1 ~]# ifconfig
bond0: flags=5187<UP,BROADCAST,RUNNING,MASTER,MULTICAST>  mtu 1500
        inet6 fe80::20c:29ff:fe89:3d10  prefixlen 64  scopeid 0x20<link>
        ether 00:0c:29:89:3d:10  txqueuelen 0  (Ethernet)
        RX packets 1304  bytes 194219 (189.6 KiB)
        RX errors 0  dropped 217  overruns 0  frame 0
        TX packets 207  bytes 21728 (21.2 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

bond1: flags=5187<UP,BROADCAST,RUNNING,MASTER,MULTICAST>  mtu 1500
        inet6 fe80::20c:29ff:fe89:3d24  prefixlen 64  scopeid 0x20<link>
        ether 00:0c:29:89:3d:24  txqueuelen 0  (Ethernet)
        RX packets 36  bytes 5494 (5.3 KiB)
        RX errors 0  dropped 5  overruns 0  frame 0
        TX packets 24  bytes 1852 (1.8 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

br0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 192.168.1.101  netmask 255.255.255.0  broadcast 192.168.1.255
        inet6 240e:324:79e:f400:20c:29ff:fe89:3d10  prefixlen 64  scopeid 0x0<global>
        inet6 fe80::20c:29ff:fe89:3d10  prefixlen 64  scopeid 0x20<link>
        ether 00:0c:29:89:3d:10  txqueuelen 0  (Ethernet)
        RX packets 521  bytes 64927 (63.4 KiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 199  bytes 20624 (20.1 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

br1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 172.16.1.101  netmask 255.255.255.0  broadcast 172.16.1.255
        inet6 fe80::20c:29ff:fe89:3d24  prefixlen 64  scopeid 0x20<link>
        ether 00:0c:29:89:3d:24  txqueuelen 0  (Ethernet)
        RX packets 31  bytes 4760 (4.6 KiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 16  bytes 1096 (1.0 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

eth0: flags=6211<UP,BROADCAST,RUNNING,SLAVE,MULTICAST>  mtu 1500
        ether 00:0c:29:89:3d:10  txqueuelen 1000  (Ethernet)
        RX packets 2585  bytes 321460 (313.9 KiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 674  bytes 136653 (133.4 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

eth1: flags=6211<UP,BROADCAST,RUNNING,SLAVE,MULTICAST>  mtu 1500
        ether 00:0c:29:89:3d:10  txqueuelen 1000  (Ethernet)
        RX packets 217  bytes 21442 (20.9 KiB)
        RX errors 0  dropped 217  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

eth2: flags=6211<UP,BROADCAST,RUNNING,SLAVE,MULTICAST>  mtu 1500
        ether 00:0c:29:89:3d:24  txqueuelen 1000  (Ethernet)
        RX packets 31  bytes 5194 (5.0 KiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 24  bytes 1852 (1.8 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

eth3: flags=6211<UP,BROADCAST,RUNNING,SLAVE,MULTICAST>  mtu 1500
        ether 00:0c:29:89:3d:24  txqueuelen 1000  (Ethernet)
        RX packets 5  bytes 300 (300.0 B)
        RX errors 0  dropped 5  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
  • 验证外部网络通信:
[root@pm1 ~]# ping www.baidu.com
PING www.a.shifen.com (180.101.49.12) 56(84) bytes of data.
64 bytes from 180.101.49.12: icmp_seq=1 ttl=52 time=39.5 ms
64 bytes from 180.101.49.12: icmp_seq=2 ttl=52 time=40.0 ms
  • 验证内部网络通信:
[root@pm1 ~]# ping 172.16.1.1
PING 172.16.1.1 (172.16.1.1) 56(84) bytes of data.
64 bytes from 172.16.1.1: icmp_seq=1 ttl=128 time=5.65 ms
64 bytes from 172.16.1.1: icmp_seq=2 ttl=128 time=0.691 ms

1.3.5:NTP 定时同步

[root@pm1 ~]# /usr/sbin/ntpdate 192.168.1.254 && /usr/sbin/hwclock -w
11 Nov 17:20:28 ntpdate[4687]: adjust time server 172.16.1.253 offset 0.014662 sec

[root@pm1 ~]# echo "*/30 * * * * /usr/sbin/ntpdate 172.16.1.253 && /usr/sbin/hwclock -w" > /var/spool/cron/root

1.4:物理机2的配置

物理机2可以直接克隆物理机1;

1.4.1:更改主机名

  • 更改物理机1的主机名为pm2,以使后续步骤的命令方便甄别:
]# hostnamectl set-hostname pm2.yqc.com

1.4.2:外部网络配置

br0
[root@pm2 ~]# vim /etc/sysconfig/network-scripts/ifcfg-br0
TYPE="Bridge"
BOOTPROTO="static"
NAME="br0"
DEVICE="br0"
ONBOOT="yes"
IPADDR="192.168.1.102"
NETMASK="255.255.255.0"
GATEWAY="192.168.1.1"
DNS1="192.168.1.254"
bond0
[root@pm2 ~]# vim /etc/sysconfig/network-scripts/ifcfg-bond0
BOOTPROTO="none"
NAME="bond0"
DEVICE="bond0"
ONBOOT="yes"
BONDING_MASTER=yes
BONDING_OPTS="mode=1 miimon=100"
BRIDGE="br0"
eth0
[root@pm2 ~]# vim /etc/sysconfig/network-scripts/ifcfg-eth0
BOOTPROTO="none"
NAME="eth0"
DEVICE="eth0"
ONBOOT="yes"
NM_CONTROLLED="no"
MASTER="bond0"
USERCTL="no"
SLAVE="yes"
eth1
[root@pm2 ~]# vim /etc/sysconfig/network-scripts/ifcfg-eth0
BOOTPROTO="none"
NAME="eth1"
DEVICE="eth1"
ONBOOT="yes"
NM_CONTROLLED="no"
MASTER="bond0"
USERCTL="no"
SLAVE="yes"

1.4.3:内部网络配置

br1
[root@pm2 ~]# vim /etc/sysconfig/network-scripts/ifcfg-br1
TYPE="Bridge"
BOOTPROTO="static"
NAME="br1"
DEVICE="br1"
ONBOOT="yes"
IPADDR="172.16.1.102"
NETMASK="255.255.255.0"
DNS1="172.16.1.253"
bond1
[root@pm2 ~]# vim /etc/sysconfig/network-scripts/ifcfg-bond1
BOOTPROTO="none"
NAME="bond1"
DEVICE="bond1"
ONBOOT="yes"
BONDING_MASTER=yes
BONDING_OPTS="mode=1 miimon=100"
BRIDGE="br1"
eth2
[root@pm2 ~]# vim /etc/sysconfig/network-scripts/ifcfg-eth2
BOOTPROTO="none"
NAME="eth2"
DEVICE="eth2"
ONBOOT="yes"
NM_CONTROLLED="no"
MASTER="bond1"
USERCTL="no"
SLAVE="yes"
eth3
[root@pm2 ~]# vim /etc/sysconfig/network-scripts/ifcfg-eth3
BOOTPROTO="none"
NAME="eth3"
DEVICE="eth3"
ONBOOT="yes"
NM_CONTROLLED="no"
MASTER="bond1"
USERCTL="no"
SLAVE="yes"

1.4.4:验证网络

  • 验证网络配置:
[root@pm2 ~]# ifconfig
bond0: flags=5187<UP,BROADCAST,RUNNING,MASTER,MULTICAST>  mtu 1500
        inet6 fe80::20c:29ff:fe18:35e9  prefixlen 64  scopeid 0x20<link>
        ether 00:0c:29:18:35:e9  txqueuelen 0  (Ethernet)
        RX packets 853  bytes 101031 (98.6 KiB)
        RX errors 0  dropped 142  overruns 0  frame 0
        TX packets 381  bytes 59847 (58.4 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

bond1: flags=5187<UP,BROADCAST,RUNNING,MASTER,MULTICAST>  mtu 1500
        inet6 fe80::20c:29ff:fe18:35fd  prefixlen 64  scopeid 0x20<link>
        ether 00:0c:29:18:35:fd  txqueuelen 0  (Ethernet)
        RX packets 65  bytes 5994 (5.8 KiB)
        RX errors 0  dropped 14  overruns 0  frame 0
        TX packets 51  bytes 3986 (3.8 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

br0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 192.168.1.102  netmask 255.255.255.0  broadcast 192.168.1.255
        inet6 fe80::20c:29ff:fe18:35e9  prefixlen 64  scopeid 0x20<link>
        inet6 240e:324:79e:f400:20c:29ff:fe18:35e9  prefixlen 64  scopeid 0x0<global>
        ether 00:0c:29:18:35:e9  txqueuelen 0  (Ethernet)
        RX packets 272  bytes 32214 (31.4 KiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 133  bytes 17392 (16.9 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

br1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 172.16.1.102  netmask 255.255.255.0  broadcast 172.16.1.255
        inet6 fe80::20c:29ff:fe18:35fd  prefixlen 64  scopeid 0x20<link>
        ether 00:0c:29:18:35:fd  txqueuelen 0  (Ethernet)
        RX packets 40  bytes 3136 (3.0 KiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 12  bytes 816 (816.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

eth0: flags=6211<UP,BROADCAST,RUNNING,SLAVE,MULTICAST>  mtu 1500
        ether 00:0c:29:18:35:e9  txqueuelen 1000  (Ethernet)
        RX packets 712  bytes 87425 (85.3 KiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 372  bytes 59113 (57.7 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

eth1: flags=6211<UP,BROADCAST,RUNNING,SLAVE,MULTICAST>  mtu 1500
        ether 00:0c:29:18:35:e9  txqueuelen 1000  (Ethernet)
        RX packets 142  bytes 13692 (13.3 KiB)
        RX errors 0  dropped 142  overruns 0  frame 0
        TX packets 9  bytes 734 (734.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

eth2: flags=6211<UP,BROADCAST,RUNNING,SLAVE,MULTICAST>  mtu 1500
        ether 00:0c:29:18:35:fd  txqueuelen 1000  (Ethernet)
        RX packets 52  bytes 5204 (5.0 KiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 42  bytes 3252 (3.1 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

eth3: flags=6211<UP,BROADCAST,RUNNING,SLAVE,MULTICAST>  mtu 1500
        ether 00:0c:29:18:35:fd  txqueuelen 1000  (Ethernet)
        RX packets 14  bytes 876 (876.0 B)
        RX errors 0  dropped 14  overruns 0  frame 0
        TX packets 9  bytes 734 (734.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
  • 验证外部网络通信:
[root@pm2 ~]# ping www.baidu.com
PING www.a.shifen.com (180.101.49.12) 56(84) bytes of data.
64 bytes from 180.101.49.12: icmp_seq=1 ttl=52 time=39.5 ms
64 bytes from 180.101.49.12: icmp_seq=2 ttl=52 time=40.0 ms
  • 验证内部网络通信:
[root@pm2 ~]# ping 172.16.1.1
PING 172.16.1.1 (172.16.1.1) 56(84) bytes of data.
64 bytes from 172.16.1.1: icmp_seq=1 ttl=128 time=5.65 ms
64 bytes from 172.16.1.1: icmp_seq=2 ttl=128 time=0.691 ms

1.4.5:NTP 定时同步

[root@pm2 ~]# /usr/sbin/ntpdate 192.168.1.254 && /usr/sbin/hwclock -w
11 Nov 17:20:28 ntpdate[4687]: adjust time server 172.16.1.253 offset 0.014662 sec

[root@pm2 ~]# echo "*/30 * * * * /usr/sbin/ntpdate 172.16.1.253 && /usr/sbin/hwclock -w" > /var/spool/cron/root

二:搭建 KVM 虚拟机环境

2.1:准备 KVM 虚拟化环境

2.1.1:确认 CPU 开启了虚拟化

[root@pm1 ~]# grep -E "vmx|svm" /proc/cpuinfo | wc -l
4
[root@pm2 ~]# grep -E "vmx|svm" /proc/cpuinfo | wc -l
4

2.1.2:安装 KVM 相关工具包并启动

安装相关工具包
~]# yum install qemu-kvm qemu-kvm-tools libvirt virt-manager virt-install -y
启动 libvirtd
~]# systemctl start libvirtd
~]# systemctl enable libvirtd
验证 KVM 的 NAT 网卡
]# ifconfig virbr0
virbr0: flags=4099<UP,BROADCAST,MULTICAST>  mtu 1500
        inet 192.168.122.1  netmask 255.255.255.0  broadcast 192.168.122.255
        ether 52:54:00:cf:58:53  txqueuelen 0  (Ethernet)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
  • 如果需要创建 NAT 网络的 KVM 虚拟机,可以通过更改以下配置文件来配置自己想要的 KVM 内网网段:
]# grep "192.168.122.1" /etc/libvirt/ -R
/etc/libvirt/qemu/networks/autostart/default.xml:  <ip address='192.168.122.1' netmask='255.255.255.0'>
/etc/libvirt/qemu/networks/default.xml:  <ip address='192.168.122.1' netmask='255.255.255.0'>

/etc/libvirt/qemu/networks/autostart/default.xml 为 /etc/libvirt/qemu/networks/default.xml 的软链接,二者实际为同一配置文件。

此次实验采用桥接网络,不做更改。

2.2:KVM 虚拟机创建

2.2.1:在 pm1 上创建 CentOS-7.2-1511-bridge 虚拟机模板

创建磁盘
  • 磁盘采用 qcow2 格式,大小为10G,存放在 KVM 默认镜像目录中:
[root@pm1 ~]# qemu-img create -f qcow2 /var/lib/libvirt/images/centos-7.2-1511-minimal.qcow2 10G
Formatting '/var/lib/libvirt/images/centos-7.2-1511-minimal.qcow2', fmt=qcow2 size=10737418240 encryption=off cluster_size=65536 lazy_refcounts=off
上传镜像
  • 采用 CentOS 7.2 的最小化镜像进行 KVM 虚拟机的系统安装:
[root@pm1 src]# ll /usr/local/src/CentOS-7.2-x86_64-Minimal-1511.iso 
-rw-r--r-- 1 root root 632291328 Oct 27 15:01 /usr/local/src/CentOS-7.2-x86_64-Minimal-1511.iso
创建桥接网络的 KVM 虚拟机
[root@pm1 ~]# virt-install --virt-type kvm \
  --name CentOS-7.2-1511-bridge \
  --ram 1024 \
  --vcpus 2 \
  --cdrom=/usr/local/src/CentOS-7.2-x86_64-Minimal-1511.iso \
  --disk path=/var/lib/libvirt/images/centos-7.2-1511-minimal.qcow2 \
  --network bridge=br0 \
  --graphics vnc,listen=0.0.0.0 \
  --noautoconsole
使用 virt-manager 连接 KVM 虚拟机
  • 命令行执行 virt-manager:
[root@pm1 ~]# virt-manager
  • 在弹出窗口中选择创建的 KVM 虚拟机:

在这里插入图片描述

为 KVM 虚拟机安装 CentOS 7.2 系统
  • 传递内核参数:

目的是将网卡名称标准化,设置为eth*。

net.ifnames=0 biosdevname=0
  • 按照常规安装步骤安装系统:

安装过程中配置好 IP 地址 192.168.1.201/24,安装完成后就可以远程进行 SSH 连接。

在这里插入图片描述

CentOS 系统初始化配置
  • 更改主机名:
[root@localhost ~]# hostnamectl set-hostname pm1-node201.yqc.com
  • 关闭防火墙:
[root@pm1-node201 ~]# systemctl stop firewalld
[root@pm1-node201 ~]# systemctl disable firewalld
  • 关闭 SELinux:
[root@pm1-node201 ~]# vi /etc/sysconfig/selinux
SELINUX=disabled
[root@pm1-node201 ~]# setenforce 0
  • 关闭NetworkManager:
[root@pm1-node201 ~]# systemctl stop NetworkManager
[root@pm1-node201 ~]# systemctl disable NetworkManager
  • 更改为国内 yum 源:
[root@pm1-node201 ~]# mkdir /etc/yum.repos.d/repo_bak && mv /etc/yum.repos.d/CentOS* /etc/yum.repos.d/repo_bak
[root@pm1-node201 ~]# curl -o /etc/yum.repos.d/CentOS-7-ali.repo http://mirrors.aliyun.com/repo/Centos-7.repo
[root@pm1-node201 ~]# yum clean cache
[root@pm1-node201 ~]# yum makecache
[root@pm1-node201 ~]# yum repolist
  • 安装常用工具包:
[root@pm1-node201 ~]# yum install  vim iotop bc gcc gcc-c++ glibc glibc-devel pcre \
  pcre-devel openssl  openssl-devel zip unzip zlib-devel  net-tools \
  lrzsz tree ntpdate telnet lsof tcpdump wget libevent libevent-devel \
  bc  systemd-devel bash-completion traceroute \
  bridge-utils -y
  • NTP时间同步:
[root@pm1-node201 ~]# /usr/sbin/ntpdate 192.168.1.254 && /usr/sbin/hwclock -w
[root@pm1-node201 ~]# echo "*/30 * * * * /usr/sbin/ntpdate 192.168.1.254 && /usr/sbin/hwclock -w" > /var/spool/cron/root
  • 内核参数优化:
[root@pm1-node201 ~]# vim ~/.vimrc
set paste

[root@pm1-node201 ~]# vim /etc/sysctl.conf
# Controls source route verification
net.ipv4.conf.default.rp_filter = 1
net.ipv4.ip_nonlocal_bind = 1
net.ipv4.ip_forward = 1

# Do not accept source routing
net.ipv4.conf.default.accept_source_route = 0

# Controls the System Request debugging functionality of the kernel
kernel.sysrq = 0

# Controls whether core dumps will append the PID to the core filename.

# Controls the use of TCP syncookies
net.ipv4.tcp_syncookies = 1

# Disable netfilter on bridges.
net.bridge.bridge-nf-call-ip6tables = 0
net.bridge.bridge-nf-call-iptables = 0
net.bridge.bridge-nf-call-arptables = 0

# Controls the default maxmimum size of a mesage queue
kernel.msgmnb = 65536

# # Controls the maximum size of a message, in bytes
kernel.msgmax = 65536

# Controls the maximum shared segment size, in bytes
kernel.shmmax = 68719476736

# # Controls the maximum number of shared memory segments, in pages
kernel.shmall = 4294967296


# TCP kernel paramater
net.ipv4.tcp_mem = 786432 1048576 1572864
net.ipv4.tcp_rmem = 4096        87380   4194304
net.ipv4.tcp_wmem = 4096        16384   4194304
net.ipv4.tcp_window_scaling = 1
net.ipv4.tcp_sack = 1

# socket buffer
net.core.wmem_default = 8388608
net.core.rmem_default = 8388608
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.core.netdev_max_backlog = 262144
net.core.somaxconn = 20480
net.core.optmem_max = 81920


# TCP conn
net.ipv4.tcp_max_syn_backlog = 262144
net.ipv4.tcp_syn_retries = 3
net.ipv4.tcp_retries1 = 3
net.ipv4.tcp_retries2 = 15

# tcp conn reuse
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_tw_recycle = 1
net.ipv4.tcp_fin_timeout = 1


net.ipv4.tcp_max_tw_buckets = 20000
net.ipv4.tcp_max_orphans = 3276800
net.ipv4.tcp_timestamps = 1 #?
net.ipv4.tcp_synack_retries = 1
net.ipv4.tcp_syncookies = 1

# keepalive conn
net.ipv4.tcp_keepalive_time = 300
net.ipv4.tcp_keepalive_intvl = 30
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.ip_local_port_range = 10001    65000

# swap
vm.overcommit_memory = 0
vm.swappiness = 10

#net.ipv4.conf.eth1.rp_filter = 0
#net.ipv4.conf.lo.arp_ignore = 1
#net.ipv4.conf.lo.arp_announce = 2
#net.ipv4.conf.all.arp_ignore = 1
#net.ipv4.conf.all.arp_announce = 2

[root@pm1-node201 ~]# sysctl -p
  • 系统资源限制优化:
[root@pm1-node201 ~]# vim /etc/security/limits.conf
*                soft    core           unlimited
*                hard    core           unlimited
*                soft    nproc          1000000 
*                hard    nproc          1000000 
*                soft    nofile         1000000 
*                hard    nofile         1000000
*                soft    memlock        32000
*                hard    memlock        32000
*                soft    msgqueue       8192000
*                hard    msgqueue       8192000
关机
  • 关机以便拷贝该虚拟机的磁盘文件:
[root@pm1 ~]# virsh list
 Id    Name                           State
----------------------------------------------------
 2     CentOS-7.2-1511-bridge         running
 
[root@pm1 ~]# virsh shutdown CentOS-7.2-1511-bridge
Domain CentOS-7.2-1511-bridge is being shutdown

2.2.2:创建 pm1-node201

拷贝虚拟机磁盘文件
[root@pm1 ~]# cp /var/lib/libvirt/images/centos-7.2-1511-minimal.qcow2 /var/lib/libvirt/images/pm1-node201.qcow2
创建虚拟机
  • 基于拷贝好的虚拟机磁盘文件创建虚拟机
[root@pm1 ~]# virt-install --virt-type kvm \
  --name pm1-node201 \
  --ram 1024 \
  --vcpus 2 \
  --cdrom=/usr/local/src/CentOS-7.2-x86_64-Minimal-1511.iso \
  --disk path=/var/lib/libvirt/images/images/pm1-node201.qcow2 \
  --network bridge=br0 \
  --graphics vnc,listen=0.0.0.0 \
  --noautoconsole
  • 选择 “Shut Down” 强制关机:

有时在启动选择界面 “Shut Down” 无法关机,可以选择 “Force Off” 强制关机。

在这里插入图片描述

  • 再次开启虚拟机,就会从磁盘直接引导现有系统:

在这里插入图片描述

在这里插入图片描述

更改主机名
]# hostnamectl set-hostname pm1-node201.yqc.com
添加内网网卡
  • View 选择 Details,点击左下角Add Hardware:

在这里插入图片描述

  • 选择 Network,桥接到 br1,设备类型为 virtio:

在这里插入图片描述

配置网络
  • 添加 eth1 网络配置文件
[root@pm1-node201 ~]# vim /etc/sysconfig/network-scripts/ifcfg-eth1
BOOTPROTO="none"
NAME="eth1"
DEVICE="eth1"
ONBOOT="yes"
IPADDR="172.16.1.201"
PREFIX="24"
DNS1="172.16.1.253"
  • 重启网络服务并验证:
[root@pm1-node201 ~]# systemctl restart network

[root@pm1-node201 ~]# ifconfig
eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 192.168.1.201  netmask 255.255.255.0  broadcast 192.168.1.255
        inet6 240e:324:79e:f400:5054:ff:fe1f:99c7  prefixlen 64  scopeid 0x0<global>
        inet6 fe80::5054:ff:fe1f:99c7  prefixlen 64  scopeid 0x20<link>
        ether 52:54:00:1f:99:c7  txqueuelen 1000  (Ethernet)
        RX packets 1310  bytes 195856 (191.2 KiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 384  bytes 43157 (42.1 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

eth1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 172.16.1.201  netmask 255.255.255.0  broadcast 172.16.1.255
        inet6 fe80::5054:ff:fef2:3384  prefixlen 64  scopeid 0x20<link>
        ether 52:54:00:f2:33:84  txqueuelen 1000  (Ethernet)
        RX packets 7  bytes 558 (558.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 10  bytes 676 (676.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
        
[root@pm1-node201 ~]# ping 172.16.1.1
PING 172.16.1.1 (172.16.1.1) 56(84) bytes of data.
64 bytes from 172.16.1.1: icmp_seq=1 ttl=128 time=5.42 ms
64 bytes from 172.16.1.1: icmp_seq=2 ttl=128 time=1.22 ms
关闭 pm1-node201
  • 因为后续克隆的 KVM 虚拟机都是使用的192.168.1.201的地址,所以先关闭 pm1-node201,以能够 SSH 连接新创建的虚拟机:

创建模板时失误了,应该用个其它的地址,这样就不用多这一步了,不过没多大关系,就多了一小步。

[root@pm1 ~]# virsh shutdown pm1-node201

2.2.3:创建 pm1-node204

拷贝虚拟机磁盘文件
[root@pm1 ~]# cp /var/lib/libvirt/images/centos-7.2-1511-minimal.qcow2 /var/lib/libvirt/images/pm1-node204.qcow2
创建虚拟机
  • 基于拷贝好的虚拟机磁盘文件创建虚拟机
[root@pm1 ~]# virt-install --virt-type kvm \
  --name pm1-node204 \
  --ram 1024 \
  --vcpus 2 \
  --cdrom=/usr/local/src/CentOS-7.2-x86_64-Minimal-1511.iso \
  --disk path=/var/lib/libvirt/images/pm1-node204.qcow2 \
  --network bridge=br0 \
  --graphics vnc,listen=0.0.0.0 \
  --noautoconsole
  • 虚拟机启动过程和 pm1-node201 相同。
更改主机名
]# hostnamectl set-hostname pm1-node204.yqc.com
配置网络
  • 更改 IP 地址为 172.16.1.204
[root@pm1-node204 ~]# vim /etc/sysconfig/network-scripts/ifcfg-eth0
BOOTPROTO="none"
NAME="eth0"
DEVICE="eth0"
ONBOOT="yes"
IPADDR="172.16.1.204"
PREFIX="24"
DNS1="172.16.1.253"

[root@pm1-node204 ~]# systemctl restart network
  • 更改虚拟机网卡配置,桥接到 br1:

在这里插入图片描述

  • 验证内部网络连通性:
[root@pm1-node204 ~]# ping 172.16.1.1
PING 172.16.1.1 (172.16.1.1) 56(84) bytes of data.
64 bytes from 172.16.1.1: icmp_seq=1 ttl=128 time=1.70 ms
64 bytes from 172.16.1.1: icmp_seq=2 ttl=128 time=1.94 ms

2.2.4:创建 pm2-node202

拷贝虚拟机磁盘文件
[root@pm2 ~]# scp pm1:/var/lib/libvirt/images/centos-7.2-1511-minimal.qcow2 /var/lib/libvirt/images/centos-7.2-1511-minimal.qcow2
[root@pm2 ~]# cp /var/lib/libvirt/images/centos-7.2-1511-minimal.qcow2 /var/lib/libvirt/images/pm2-node202.qcow2
拷贝镜像
[root@pm2 ~]# scp pm1:/usr/local/src/CentOS-7.2-x86_64-Minimal-1511.iso /usr/local/src/CentOS-7.2-x86_64-Minimal-1511.iso
创建虚拟机
  • 基于拷贝好的虚拟机磁盘文件创建虚拟机
[root@pm2 ~]# virt-install --virt-type kvm \
  --name pm2-node202 \
  --ram 1024 \
  --vcpus 2 \
  --cdrom=/usr/local/src/CentOS-7.2-x86_64-Minimal-1511.iso \
  --disk path=/var/lib/libvirt/images/pm2-node202.qcow2 \
  --network bridge=br0 \
  --graphics vnc,listen=0.0.0.0 \
  --noautoconsole
  • 虚拟机启动过程和 pm1-node201 相同。
更改主机名
]# hostnamectl set-hostname pm2-node202.yqc.com
添加内网网卡

参照 pm1-node201 中的步骤。

配置网络
  • 更改 eth0 的地址为 192.168.1.202
[root@pm2-node202 ~]# vim /etc/sysconfig/network-scripts/ifcfg-eth0
BOOTPROTO="none"
NAME="eth0"
DEVICE="eth0"
ONBOOT="yes"
IPADDR="192.168.1.202"
PREFIX="24"
GATEWAY="192.168.1.1"
DNS1="192.168.1.254"
  • 更改 eth1 的地址为 172.16.1.202
[root@pm2-node202 ~]# vim /etc/sysconfig/network-scripts/ifcfg-eth1
BOOTPROTO="none"
NAME="eth1"
DEVICE="eth1"
ONBOOT="yes"
IPADDR="172.16.1.202"
PREFIX="24"
DNS1="172.16.1.253"
  • 重启网络服务并验证:
[root@pm2-node202 ~]# systemctl restart network

[root@pm2-node202 ~]# ifconfig
eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 192.168.1.202  netmask 255.255.255.0  broadcast 192.168.1.255
        inet6 fe80::5054:ff:fe04:2d17  prefixlen 64  scopeid 0x20<link>
        inet6 240e:324:79e:f400:5054:ff:fe04:2d17  prefixlen 64  scopeid 0x0<global>
        ether 52:54:00:04:2d:17  txqueuelen 1000  (Ethernet)
        RX packets 1207  bytes 172922 (168.8 KiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 349  bytes 58171 (56.8 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

eth1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 172.16.1.202  netmask 255.255.255.0  broadcast 172.16.1.255
        inet6 fe80::5054:ff:fe1d:9e2  prefixlen 64  scopeid 0x20<link>
        ether 52:54:00:1d:09:e2  txqueuelen 1000  (Ethernet)
        RX packets 10  bytes 908 (908.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 25  bytes 1674 (1.6 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

[root@pm2-node202 ~]# ping 192.168.1.1
PING 192.168.1.1 (192.168.1.1) 56(84) bytes of data.
64 bytes from 192.168.1.1: icmp_seq=1 ttl=64 time=12.0 ms
64 bytes from 192.168.1.1: icmp_seq=2 ttl=64 time=2.85 ms

[root@pm2-node202 ~]# ping 172.16.1.1
PING 172.16.1.1 (172.16.1.1) 56(84) bytes of data.
64 bytes from 172.16.1.1: icmp_seq=1 ttl=128 time=7.27 ms
64 bytes from 172.16.1.1: icmp_seq=2 ttl=128 time=1.24 ms

2.2.5:创建 pm2-node203

拷贝虚拟机磁盘文件
[root@pm2 ~]# cp /var/lib/libvirt/images/centos-7.2-1511-minimal.qcow2 /var/lib/libvirt/images/pm2-node203.qcow2
创建虚拟机
  • 基于拷贝好的虚拟机磁盘文件创建虚拟机
[root@pm2 ~]# virt-install --virt-type kvm \
  --name pm2-node203 \
  --ram 1024 \
  --vcpus 2 \
  --cdrom=/usr/local/src/CentOS-7.2-x86_64-Minimal-1511.iso \
  --disk path=/var/lib/libvirt/images/pm2-node203.qcow2 \
  --network bridge=br0 \
  --graphics vnc,listen=0.0.0.0 \
  --noautoconsole
  • 虚拟机启动过程和 pm1-node201 相同。
更改主机名
]# hostnamectl set-hostname pm2-node203.yqc.com
配置网络
  • 更改 IP 地址为 172.16.1.203
[root@pm2-node203 ~]# vim /etc/sysconfig/network-scripts/ifcfg-eth0
BOOTPROTO="none"
NAME="eth0"
DEVICE="eth0"
ONBOOT="yes"
IPADDR="172.16.1.203"
PREFIX="24"
DNS1="172.16.1.253"

[root@pm2-node203 ~]# systemctl restart network
  • 更改虚拟机网卡配置,桥接到 br1:

  • 验证内部网络连通性:

[root@pm2-node203 ~]# ping 172.16.1.1
PING 172.16.1.1 (172.16.1.1) 56(84) bytes of data.
64 bytes from 172.16.1.1: icmp_seq=1 ttl=128 time=1.70 ms
64 bytes from 172.16.1.1: icmp_seq=2 ttl=128 time=1.94 ms

2.2.6:创建 pm2-node205

拷贝虚拟机磁盘文件
[root@pm2 ~]# cp /var/lib/libvirt/images/centos-7.2-1511-minimal.qcow2 /var/lib/libvirt/images/pm2-node205.qcow2
创建虚拟机
  • 基于拷贝好的虚拟机磁盘文件创建虚拟机
[root@pm2 ~]# virt-install --virt-type kvm \
  --name pm2-node205 \
  --ram 1024 \
  --vcpus 2 \
  --cdrom=/usr/local/src/CentOS-7.2-x86_64-Minimal-1511.iso \
  --disk path=/var/lib/libvirt/images/pm2-node205.qcow2 \
  --network bridge=br0 \
  --graphics vnc,listen=0.0.0.0 \
  --noautoconsole
  • 虚拟机启动过程和 pm1-node201 相同。
更改主机名
]# hostnamectl set-hostname pm2-node205.yqc.com
配置网络
  • 更改 IP 地址为 172.16.1.205
[root@pm2-node205 ~]# vim /etc/sysconfig/network-scripts/ifcfg-eth0
BOOTPROTO="none"
NAME="eth0"
DEVICE="eth0"
ONBOOT="yes"
IPADDR="172.16.1.205"
PREFIX="24"
DNS1="172.16.1.253"

[root@pm2-node205 ~]# systemctl restart network
  • 更改虚拟机网卡配置,桥接到 br1

  • 验证内部网络连通性:

[root@pm2-node205 ~]# ping 172.16.1.1
PING 172.16.1.1 (172.16.1.1) 56(84) bytes of data.
64 bytes from 172.16.1.1: icmp_seq=1 ttl=128 time=1.70 ms
64 bytes from 172.16.1.1: icmp_seq=2 ttl=128 time=1.94 ms

2.3:KVM 虚拟机后续设置

2.3.1:设置 KVM 虚拟机随物理机开机启动

[root@pm1 ~]# virsh autostart pm1-node201
[root@pm1 ~]# virsh autostart pm1-node204
[root@pm2 ~]# virsh autostart pm2-node202
[root@pm2 ~]# virsh autostart pm2-node203
[root@pm2 ~]# virsh autostart pm2-node205

2.3.2:保存 KVM 虚拟机快照

[root@pm1 ~]# virsh snapshot-create-as pm1-node201 --name "pm1-node201-init" --description "Initial state"
[root@pm1 ~]# virsh snapshot-create-as pm1-node204 --name "pm1-node204-init" --description "Initial state" 
[root@pm2 ~]# virsh snapshot-create-as pm2-node202 --name "pm2-node202-init" --description "Initial state"
[root@pm2 ~]# virsh snapshot-create-as pm2-node203 --name "pm2-node203-init" --description "Initial state"
[root@pm2 ~]# virsh snapshot-create-as pm2-node205 --name "pm2-node205-init" --description "Initial state"  

2.3.3:查看创建完成的 KVM 虚拟机

  • pm1 的 KVM 虚拟机:
[root@pm1 ~]# virsh list --all
 Id    Name                           State
----------------------------------------------------
 7     pm1-node204                    running
 8     pm1-node201                    running
 -     CentOS-7.2-1511-bridge         shut off

在这里插入图片描述

  • pm2 的 KVM 虚拟机:
[root@pm2 ~]# virsh list --all
 Id    Name                           State
----------------------------------------------------
 2     pm2-node202                    running
 4     pm2-node203                    running
 6     pm2-node205                    running

在这里插入图片描述

至此,KVM 虚拟机环境搭建完成。

三:HAProxy 高可用 Wordpress 站点搭建

在这里插入图片描述

3.1:部署负载均衡

3.1.1:pm1-node201 部署 Keepalived

keepalived 节点上需要开启的两个 Linux 内核参数:net.ipv4.ip_forward = 1 和 net.ipv4.ip_nonlocal_bind = 1,已在系统初始化的过程中添加。

  • 编译安装 keepalived-1.3.6:
[root@pm1-node201 ~]# wget -O /usr/local/src/keepalived-1.3.6.tar.gz http://www.keepalived.org/software/keepalived-1.3.6.tar.gz
[root@pm1-node201 ~]# cd /usr/local/src
[root@pm1-node201 src]# tar xvf keepalived-1.3.6.tar.gz
[root@pm1-node201 src]# cd keepalived-1.3.6

[root@pm1-node201 keepalived-1.3.6]# yum install libnfnetlink-devel libnfnetlink ipvsadm libnl libnl-devel libnl3 libnl3-devel lm_sensors-libs net-snmp-agent-libs net-snmp-libs openssh-server openssh-clients openssl openssl-devel tree sudo psmisc lrzsz gcc gcc-c++ automake pcre pcredevel zlib zlib-devel openssl openssl-devel iproute -y

[root@pm1-node201 keepalived-1.3.6]# ./configure --prefix=/usr/local/keepalived --disable-fwmark && make && make install

[root@pm1-node201 keepalived-1.3.6]# cp /usr/local/src/keepalived-1.3.6/keepalived/etc/init.d/keepalived.rh.init /etc/sysconfig/keepalived.sysconfig

[root@pm1-node201 keepalived-1.3.6]# cp /usr/local/src/keepalived-1.3.6/keepalived/keepalived.service /usr/lib/systemd/system/

[root@pm1-node201 keepalived-1.3.6]# cp /usr/local/src/keepalived-1.3.6/bin/keepalived /usr/sbin/
  • 配置 Keepalived,将 pm1-node201 设为 192.168.1.200 的 master 节点,172.16.1.200 的 backup 节点:
[root@pm1-node201 ~]# mkdir /etc/keepalived
[root@pm1-node201 ~]# cp /usr/local/keepalived/etc/keepalived/keepalived.conf /etc/keepalived/
[root@pm1-node201 ~]# vim /etc/keepalived/keepalived.conf
global_defs {
   notification_email {
      root@pm1-node201.yqc.com
   }
   notification_email_from root@pm1-node201.yqc.com
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   router_id pm1-node201.yqc.com
   vrrp_skip_check_adv_addr
   #vrrp_strict
   vrrp_garp_interval 0
   vrrp_gna_interval 0
}

vrrp_instance VIP_1 {
        state MASTER
        interface eth0
        virtual_router_id 1
        priority 100
        advert_int 2
        unicast_src_ip 192.168.1.201
        unicast_peer {
                192.168.1.202
        }
        authentication {
                auth_type PASS
                auth_pass 123456
        }
        virtual_ipaddress {
                192.168.1.200/24 dev eth0 label eth0:0
        }
}

vrrp_instance VIP_2 {
        state BACKUP
        interface eth1
        virtual_router_id 2
        priority 80
        advert_int 2
        unicast_src_ip 172.16.1.201
        unicast_peer {
                172.16.1.202
        }
        authentication {
                auth_type PASS
                auth_pass 123456
        }
        virtual_ipaddress {
                172.16.1.200/24 dev eth1 label eth1:0
        }
}
  • 启动keepalived并设为开机启动:
[root@pm1-node201 ~]# systemctl start keepalived
[root@pm1-node201 ~]# systemctl enable keepalived
  • 验证 VIP:
[root@pm1-node201 ~]# ifconfig
eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 192.168.1.201  netmask 255.255.255.0  broadcast 192.168.1.255
        inet6 240e:324:79e:f400:5054:ff:fe1f:99c7  prefixlen 64  scopeid 0x0<global>
        inet6 fe80::5054:ff:fe1f:99c7  prefixlen 64  scopeid 0x20<link>
        ether 52:54:00:1f:99:c7  txqueuelen 1000  (Ethernet)
        RX packets 220  bytes 34831 (34.0 KiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 105  bytes 9825 (9.5 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

eth0:0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 192.168.1.200  netmask 255.255.255.0  broadcast 0.0.0.0
        ether 52:54:00:1f:99:c7  txqueuelen 1000  (Ethernet)

eth1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 172.16.1.201  netmask 255.255.255.0  broadcast 172.16.1.255
        inet6 fe80::5054:ff:fef2:3384  prefixlen 64  scopeid 0x20<link>
        ether 52:54:00:f2:33:84  txqueuelen 1000  (Ethernet)
        RX packets 153  bytes 10230 (9.9 KiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 36  bytes 1872 (1.8 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

3.1.2:pm2-node202 部署 Keepalived

  • 将 pm1-node201 的 keepalived 程序打包,并拷贝解压到 pm2-node202 相应目录:
[root@pm1-node201 ~]# cd /usr/local/keepalived/
[root@pm1-node201 keepalived]# tar zcvf keepalived-pm1-node201.tar.gz ./*

[root@pm2-node202 ~]# mkdir /usr/local/keepalived
[root@pm2-node202 ~]# scp pm1-node201:/usr/local/keepalived/keepalived-pm1-node201.tar.gz /usr/local/keepalived/
[root@pm2-node202 ~]# cd /usr/local/keepalived/
[root@pm2-node202 keepalived]# tar zxvf keepalived-pm1-node201.tar.gz
  • 拷贝 pm1-node201 的 keepalived 相关配置文件、主程序、Unit file 到 pm2-node202 相应目录:
[root@pm2-node202 ~]# mkdir /etc/keepalived

[root@pm2-node202 ~]# scp pm1-node201:/etc/keepalived/keepalived.conf /etc/keepalived/
[root@pm2-node202 ~]# scp pm1-node201:/etc/sysconfig/keepalived.sysconfig /etc/sysconfig/keepalived.sysconfig
[root@pm2-node202 ~]# scp pm1-node201:/usr/lib/systemd/system/keepalived.service /usr/lib/systemd/system/keepalived.service
[root@pm2-node202 ~]# scp pm1-node201:/usr/sbin/keepalived /usr/sbin/
  • 配置 Keepalived,将 pm2-node202 设为 192.168.1.200 的 backup 节点,172.16.1.200 的 master 节点:
[root@pm2-node202 ~]# vim /etc/keepalived/keepalived.conf
global_defs {
   notification_email {
      root@pm2-node202.yqc.com
   }
   notification_email_from root@pm2-node202.yqc.com
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   router_id pm2-node202.yqc.com
   vrrp_skip_check_adv_addr
   #vrrp_strict
   vrrp_garp_interval 0
   vrrp_gna_interval 0
}

vrrp_instance VIP_1 {
        state BACKUP
        interface eth0
        virtual_router_id 1
        priority 80
        advert_int 2
        unicast_src_ip 192.168.1.202
        unicast_peer {
                192.168.1.201
        }
        authentication {
                auth_type PASS
                auth_pass 123456
        }
        virtual_ipaddress {
                192.168.1.200/24 dev eth0 label eth0:0
        }
}

vrrp_instance VIP_2 {
        state MASTER
        interface eth1
        virtual_router_id 2
        priority 100
        advert_int 2
        unicast_src_ip 172.16.1.202
        unicast_peer {
                172.16.1.201
        }
        authentication {
                auth_type PASS
                auth_pass 123456
        }
        virtual_ipaddress {
                172.16.1.200/24 dev eth1 label eth1:0
        }
}
  • 启动keepalived并设为开机启动:
[root@pm2-node202 ~]# systemctl start keepalived
[root@pm2-node202 ~]# systemctl enable keepalived
  • 验证 VIP:
[root@pm2-node202 ~]# ifconfig
eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 192.168.1.202  netmask 255.255.255.0  broadcast 192.168.1.255
        inet6 fe80::5054:ff:fe04:2d17  prefixlen 64  scopeid 0x20<link>
        inet6 240e:324:79e:f400:5054:ff:fe04:2d17  prefixlen 64  scopeid 0x0<global>
        ether 52:54:00:04:2d:17  txqueuelen 1000  (Ethernet)
        RX packets 290  bytes 42205 (41.2 KiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 64  bytes 7813 (7.6 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

eth1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 172.16.1.202  netmask 255.255.255.0  broadcast 172.16.1.255
        inet6 fe80::5054:ff:fe1d:9e2  prefixlen 64  scopeid 0x20<link>
        ether 52:54:00:1d:09:e2  txqueuelen 1000  (Ethernet)
        RX packets 133  bytes 8820 (8.6 KiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 67  bytes 3630 (3.5 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

eth1:0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 172.16.1.200  netmask 255.255.255.0  broadcast 0.0.0.0
        ether 52:54:00:1d:09:e2  txqueuelen 1000  (Ethernet)

3.1.3:pm1-node201 部署 HAProxy

  • 编译安装 haproxy-1.8.20:
[root@pm1-node201 ~]# cd /usr/local/src
[root@pm1-node201 src]# tar zxvf haproxy-1.8.20.tar.gz 
[root@pm1-node201 src]# cd haproxy-1.8.20/

[root@pm1-node201 haproxy-1.8.20]# yum install gcc gcc-c++ glibc glibc-devel pcre pcre-devel openssl openssl-devel systemd-devel net-tools vim iotop bc zip unzip zlib-devel lrzsz tree screen lsof tcpdump wget ntpdate -y

[root@pm1-node201 haproxy-1.8.20]# make ARCH=x86_64 TARGET=linux2628 USE_PCRE=1 USE_OPENSSL=1 USE_ZLIB=1 USE_SYSTEMD=1 USE_CPU_AFFINITY=1 PREFIX=/usr/local/haproxy
[root@pm1-node201 haproxy-1.8.20]# make install PREFIX=/usr/local/haproxy
[root@pm1-node201 haproxy-1.8.20]# cp haproxy /usr/sbin/

[root@pm1-node201 ~]# vim /usr/lib/systemd/system/haproxy.service
[Unit]  
Description=HAProxy Load Balancer
After=syslog.target network.target
[Service]
ExecStartPre=/usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -c -q 
ExecStart=/usr/sbin/haproxy -Ws -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid
ExecReload=/bin/kill -USR2 $MAINPID
[Install]
WantedBy=multi-user.target

[root@pm1-node201 ~]# mkdir /etc/haproxy
[root@pm1-node201 ~]# useradd haproxy -s /sbin/nologin
[root@pm1-node201 ~]# mkdir /var/lib/haproxy
[root@pm1-node201 ~]# chown haproxy.haproxy /var/lib/haproxy/ -R
  • 配置 HAProxy,代理后端 Web 服务器以及 MySQL:
[root@pm1-node201 examples]# vim /etc/haproxy/haproxy.cfg
global
        maxconn 100000
        user haproxy
        group haproxy
        daemon
        nbproc 1
        pidfile /run/haproxy.pid
        log 127.0.0.1 local3 info
        chroot /usr/local/haproxy
        stats socket /var/lib/haproxy/haproxy.socket mode 600 level admin

defaults
        option redispatch
        option abortonclose
        option http-keep-alive
        option forwardfor
        maxconn 100000
        mode http
        timeout connect 10s
        timeout client 20s
        timeout server 30s
        timeout check 5s

listen stats
        bind :9999
        stats enable
        #stats hide-version
        stats uri /haproxy-status
        stats realm HAPorxy\ Stats\ Page
        stats auth haadmin:123456
        stats auth admin:123456
        stats refresh 30s
        stats admin if TRUE

listen nginx
        bind 192.168.1.200:80
        mode tcp
        log global
        balance roundrobin
        server 172.16.1.204 172.16.1.204:80 check inter 3000 fall 3 rise 5
        server 172.16.1.205 172.16.1.205:80 check inter 3000 fall 3 rise 5

listen mysql
        bind 172.16.1.200:3306
        mode tcp
        log global
        balance source
        server 172.16.1.203 172.16.1.203:3306 check inter 3000 fall 3 rise 5
  • 配置 rsyslog 接收 haproxy 的日志:
[root@pm1-node201 ~]# vim /etc/rsyslog.conf
local3.*    /var/log/haproxy.log
$ModLoad imudp
$UDPServerRun 514

[root@pm1-node201 ~]# systemctl restart rsyslog
  • 启动 HAProxy
[root@pm1-node201 ~]# systemctl start haproxy
[root@pm1-node201 ~]# systemctl enable haproxy
  • 验证监听端口:
[root@pm1-node201 ~]# ss -tnl | egrep "(80|3306|9999)"
LISTEN     0      20480  172.16.1.200:3306                     *:*                  
LISTEN     0      20480        *:9999                     *:*                  
LISTEN     0      20480  192.168.1.200:80                       *:*                  
  • 验证 HAProxy 状态页(http://192.168.1.201:9999/haproxy-status):

在这里插入图片描述

3.1.4:pm2-node202 部署 HAProxy

  • pm2-node202 创建相应目录及haproxy用户:
[root@pm2-node202 ~]# mkdir /usr/local/haproxy /etc/haproxy /var/lib/haproxy
[root@pm2-node202 ~]# useradd haproxy -s /sbin/nologin
[root@pm2-node202 ~]# chown haproxy.haproxy /var/lib/haproxy/ -R
  • 打包 pm1-node201 的 HAPoxy 安装目录,拷贝并解压到 pm2-node202 相应目录:
[root@pm1-node201 ~]# cd /usr/local/haproxy/
[root@pm1-node201 haproxy]# tar zcvf haproxy-pm1-node201.tar.gz ./*

[root@pm2-node202 ~]# scp pm1-node201:/usr/local/haproxy/haproxy-pm1-node201.tar.gz /usr/local/haproxy/
[root@pm2-node202 ~]# cd /usr/local/haproxy/
[root@pm2-node202 haproxy]# tar zxvf haproxy-pm1-node201.tar.gz 
  • 拷贝 pm1-node201 的HAProxy 主程序文件、配置文件和 Unit file:
[root@pm2-node202 ~]# scp pm1-node201:/usr/sbin/haproxy /usr/sbin/
[root@pm2-node202 ~]# scp pm1-node201:/etc/haproxy/haproxy.cfg /etc/haproxy/
[root@pm2-node202 ~]# scp pm1-node201:/usr/lib/systemd/system/haproxy.service /usr/lib/systemd/system/haproxy.service
  • 配置 rsyslog 接收 haproxy 的日志:
[root@pm2-node202 ~]# vim /etc/rsyslog.conf
local3.*    /var/log/haproxy.log
$ModLoad imudp
$UDPServerRun 514

[root@pm2-node202 ~]# systemctl restart rsyslog
  • 启动 HAProxy
[root@pm2-node202 ~]# systemctl start haproxy
[root@pm2-node202 ~]# systemctl enable haproxy
  • 验证监听端口:
[root@pm2-node202 ~]# ss -tnl | egrep "(80|3306|9999)"
LISTEN     0      20480  172.16.1.200:3306                     *:*                  
LISTEN     0      20480        *:9999                     *:*                  
LISTEN     0      20480  192.168.1.200:80                       *:*
  • 验证 HAProxy 状态页(http://192.168.1.202:9999/haproxy-status):

在这里插入图片描述

3.2:部署数据库和共享存储

3.3.1:pm2-node203 部署 MariaDB

  • 安装 MariaDB:
[root@pm2-node203 ~]# yum install mariadb mariadb-server -y
  • 编辑主配置文件:
[root@pm2-node203 ~]# cp /etc/my.cnf /etc/my.cnf.bak

[root@pm2-node203 ~]# vim /etc/my.cnf
[mysqld]
socket=/var/lib/mysql/mysql.sock
user=mysql
symbolic-links=0
datadir=/data/mysql
innodb_file_per_table=1
# skip-grant-tables
relay-log=/data/mysql
server-id=10
log-error=/data/mysql-log/mysql_error.log
log-bin=/data/mysql-binlog/master-log
# general_log=ON
# general_log_file=/data/general_mysql.log
long_query_time=5
slow_query_log=1
slow_query_log_file=/data/mysql-log/slow_mysql.log
max_connections=1000
bind-address=172.16.1.203

[client]
port=3306
socket=/var/lib/mysql/mysql.sock

[mysqld_safe]
log-error=/data/mysql-log/mysqld_safe.log
pid-file=/var/run/mariadb/mariadb.pid
  • 创建数据目录并授权:
[root@pm2-node203 ~]# mkdir -pv /data/{mysql,mysql-log,mysql-binlog}
[root@pm2-node203 ~]# chown mysql:mysql /data/mysql* -R
  • 启动 MariaDB 并验证端口:
[root@pm2-node203 ~]# systemctl start mariadb
[root@pm2-node203 ~]# systemctl enable mariadb
[root@pm2-node203 ~]# ss -tnl | grep 3306
LISTEN     0      50     192.168.1.107:3306                     *:* 
  • 初始化安全配置:
[root@pm2-node203 ~]# mysql_secure_installation
  • 验证 mysql 登录:
[root@pm2-node203 ~]# mysql -uroot -p
Enter password: 
Welcome to the MariaDB monitor.  Commands end with ; or \g.
Your MariaDB connection id is 3
Server version: 5.5.65-MariaDB MariaDB Server

Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

MariaDB [(none)]> 
  • 创建 wordpress 数据库并授权:
MariaDB [(none)]> CREATE DATABASE wordpress;
MariaDB [(none)]> GRANT ALL PRIVILEGES ON  wordpress.* TO "wordpress"@"%" IDENTIFIED BY "123456";
MariaDB [(none)]> flush privileges;
  • 使用 wordpress 服务器使用 VIP 远程连接数据库:
[root@pm1-node204 ~]# mysql -h172.16.1.200 -uwordpress -p 
Enter password: 
MariaDB [(none)]> show databases;
+--------------------+
| Database           |
+--------------------+
| information_schema |
| wordpress          |
+--------------------+
2 rows in set (0.01 sec)

[root@pm2-node205 ~]# mysql -h172.16.1.200 -uwordpress -p
Enter password: 
MariaDB [(none)]> show databases;
+--------------------+
| Database           |
+--------------------+
| information_schema |
| wordpress          |
+--------------------+
2 rows in set (0.00 sec)

3.3.2:pm2-node203 部署 NFS

  • 安装 nfs-utils:
[root@pm2-node203 ~]# yum install nfs-utils -y
  • 配置 NFS:
[root@pm2-node203 ~]# vim /etc/exports
/data/wordpress *(rw,no_root_squash)

[root@pm2-node203 ~]# mkdir /data/wordpress -pv
[root@pm2-node203 ~]# chown 2000:2000 -R /data/wordpress
  • 启动 nfs 并设为开机启动:
[root@pm2-node203 ~]# systemctl start nfs
[root@pm2-node203 ~]# systemctl enable nfs
  • 验证 NFS 挂载点:
[root@pm1-node204 ~]# showmount -e 172.16.1.203
Export list for 172.16.1.203:
/data/wordpress *
  • pm1-node204 挂载共享存储:
[root@pm1-node204 ~]# mkdir /data/nginx/wordpress -pv
[root@pm1-node204 ~]# vim /etc/fstab
172.16.1.203:/data/wordpress /data/nginx/wordpress      nfs     defaults,_netdev        0 0
[root@pm1-node204 ~]# mount -a

[root@pm1-node204 ~]# ll -d /data/nginx/wordpress/
drwxr-xr-x 2 nginx nginx 6 Nov 11 22:17 /data/nginx/wordpress/

[root@pm1-node204 ~]# df -Th
Filesystem                   Type      Size  Used Avail Use% Mounted on
/dev/mapper/centos-root      xfs       7.5G  2.9G  4.7G  39% /
devtmpfs                     devtmpfs  487M     0  487M   0% /dev
tmpfs                        tmpfs     497M     0  497M   0% /dev/shm
tmpfs                        tmpfs     497M  6.7M  490M   2% /run
tmpfs                        tmpfs     497M     0  497M   0% /sys/fs/cgroup
/dev/vda1                    xfs       509M  124M  386M  25% /boot
tmpfs                        tmpfs     100M     0  100M   0% /run/user/0
172.16.1.203:/data/wordpress nfs4      7.5G  1.4G  6.2G  18% /data/nginx/wordpress/
  • pm2-node205 挂载共享存储:
[root@pm2-node205 ~]# mkdir /data/nginx/wordpress -pv
[root@pm2-node205 ~]# vim /etc/fstab
172.16.1.203:/data/wordpress /data/nginx/wordpress      nfs     defaults,_netdev        0 0
[root@pm2-node205 ~]# mount -a

[root@pm2-node205 ~]# ll -d /data/nginx/wordpress/
drwxr-xr-x 2 nginx nginx 6 Nov 11 22:17 /data/nginx/wordpress/

[root@pm2-node205 ~]# df -Th
Filesystem                   Type      Size  Used Avail Use% Mounted on
/dev/mapper/centos-root      xfs       7.5G  1.8G  5.8G  24% /
devtmpfs                     devtmpfs  487M     0  487M   0% /dev
tmpfs                        tmpfs     497M     0  497M   0% /dev/shm
tmpfs                        tmpfs     497M  6.7M  490M   2% /run
tmpfs                        tmpfs     497M     0  497M   0% /sys/fs/cgroup
/dev/vda1                    xfs       509M  124M  386M  25% /boot
tmpfs                        tmpfs     100M     0  100M   0% /run/user/0
172.16.1.203:/data/wordpress nfs4      7.5G  1.4G  6.2G  18% /data/nginx/wordpress/

3.3:部署 Web 服务

3.3.1:pm1-node204 部署 PHP

  • 编译安装 php-7.1.30:
[root@pm1-node204 ~]# cd /usr/local/src/
[root@pm1-node204 src]# tar zxvf  php-7.1.30.tar.gz
[root@pm1-node204 src]# cd  php-7.1.30/

[root@pm1-node204 php-7.1.30]# yum -y install wget vim pcre pcre-devel openssl openssl-devel libicudevel gcc gcc-c++ autoconf libjpeg libjpeg-devel libpng libpng-devel freetype freetype-devel libxml2 libxml2-devel zlib zlib-devel glibc glibc-devel glib2 glib2-devel ncurses ncurses-devel curl curl-devel krb5-devel libidn libidn-devel openldap openldap-devel nss_ldap jemalloc-devel cmake boost-devel bison automake libevent libevent-devel gd gd-devel libtool* libmcrypt libmcrypt-devel mcrypt mhash libxslt libxslt-devel readline readline-devel gmp gmp-devel libcurl libcurl-devel openjpegdevel libzip

[root@pm1-node204 php-7.1.30]# ./configure --prefix=/usr/local/php --enable-fpm --with-fpmuser=www --with-fpm-group=www --with-pear --with-curl --with-png-dir --with-freetype-dir --with-iconv --with-mhash --with-zlib --with-xmlrpc --with-xsl --with-openssl --with-mysqli --with-pdo-mysql --disable-debug --enable-zip --enable-sockets --enable-soap --enable-inline-optimization --enable-xml --enable-ftp --enable-exif --enable-wddx --enable-bcmath --enable-calendar --enable-shmop --enable-dba --enable-sysvsem --enable-sysvshm --enable-sysvmsg



[root@pm1-node204 php-7.1.30]# make -j 2
[root@pm1-node204 php-7.1.30]# make install
  • 从相应目录拷贝 PHP 配置文件:
[root@pm1-node204 ~]# cp /usr/local/php/etc/php-fpm.conf.default /usr/local/php/etc/php-fpm.conf
[root@pm1-node204 ~]# cp /usr/local/php/etc/php-fpm.d/www.conf.default /usr/local/php/etc/php-fpm.d/www.conf
[root@pm1-node204 ~]# cp /usr/local/src/php-7.1.30/php.ini-production /usr/local/php/etc/php.ini
  • 创建日志文件目录:
[root@pm1-node204 ~]# mkdir /usr/local/php/log
  • 配置 pid 文件:
[root@pm1-node204 ~]# vim /usr/local/php/etc/php-fpm.conf
pid = run/php-fpm.pid
  • 将 php-fpm 添加到 service:
[root@pm1-node204 ~]# cp /usr/local/src/php-7.1.30/sapi/fpm/init.d.php-fpm /etc/init.d/php-fpm
[root@pm1-node204 ~]# chmod +x /etc/init.d/php-fpm
[root@pm1-node204 ~]# chkconfig --add php-fpm
[root@pm1-node204 ~]# chkconfig php-fpm on

[root@pm1-node204 ~]# chkconfig --list                              

Note: This output shows SysV services only and does not include native
      systemd services. SysV configuration data might be overridden by native
      systemd configuration.

      If you want to list systemd services use 'systemctl list-unit-files'.
      To see services enabled on particular target use
      'systemctl list-dependencies [target]'.

netconsole      0:off   1:off   2:off   3:off   4:off   5:off   6:off
network         0:off   1:off   2:on    3:on    4:on    5:on    6:off
php-fpm         0:off   1:off   2:on    3:on    4:on    5:on    6:off
  • 配置 www.conf:
[root@pm1-node204 ~]# vim /usr/local/php/etc/php-fpm.d/www.conf
[www]
user = nginx
group = nginx
listen = 127.0.0.1:9000
pm = dynamic
pm.max_children = 50
pm.start_servers = 30
pm.min_spare_servers = 30
pm.max_spare_servers = 50
pm.status_path = /pm_status
ping.path = /ping
ping.response = pong
access.log = log/$pool.access.log
slowlog = log/$pool.log.slow
  • 检测配置文件:
[root@pm1-node204 ~]# /usr/local/php/sbin/php-fpm -t
[13-Nov-2020 11:07:39] NOTICE: configuration file /usr/local/php/etc/php-fpm.conf test is successful
  • 启动 php-fpm 并验证:
[root@pm1-node204 ~]# service php-fpm start

[root@pm1-node204 ~]# ps -ef | grep php-fpm
root      2247     1  0 11:08 ?        00:00:00 php-fpm: master process (/usr/local/php/etc/php-fpm.conf)
nginx     2248  2247  0 11:08 ?        00:00:00 php-fpm: pool www
nginx     2249  2247  0 11:08 ?        00:00:00 php-fpm: pool www
nginx     2250  2247  0 11:08 ?        00:00:00 php-fpm: pool www
nginx     2251  2247  0 11:08 ?        00:00:00 php-fpm: pool www
nginx     2252  2247  0 11:08 ?        00:00:00 php-fpm: pool www
nginx     2253  2247  0 11:08 ?        00:00:00 php-fpm: pool www

[root@pm1-node204 ~]# netstat -tanlp | grep php-fpm
tcp        0      0 127.0.0.1:9000          0.0.0.0:*               LISTEN      2247/php-fpm: maste

3.3.2:pm2-node205 部署 PHP

  • 安装 php 依赖环境:
[root@pm2-node205 ~]# yum -y install wget vim pcre pcre-devel openssl openssl-devel libicudevel gcc gcc-c++ autoconf libjpeg libjpeg-devel libpng libpng-devel freetype freetype-devel libxml2 libxml2-devel zlib zlib-devel glibc glibc-devel glib2 glib2-devel ncurses ncurses-devel curl curl-devel krb5-devel libidn libidn-devel openldap openldap-devel nss_ldap jemalloc-devel cmake boost-devel bison automake libevent libevent-devel gd gd-devel libtool* libmcrypt libmcrypt-devel mcrypt mhash libxslt libxslt-devel readline readline-devel gmp gmp-devel libcurl libcurl-devel openjpegdevel libzip
  • 创建 php 程序目录:
[root@pm2-node205 ~]# mkdir /usr/local/php
  • 打包 pm1-node204 的 php 程序目录,拷贝到 pm2-node205 并解压到相应目录:
[root@pm1-node204 ~]# cd /usr/local/php/
[root@pm1-node204 php]# tar zcvf php-pm1-node204.tar.gz ./*

[root@pm2-node205 ~]# scp pm1-node204:/usr/local/php/php-pm1-node204.tar.gz /usr/local/php/
[root@pm2-node205 ~]# cd /usr/local/php/
[root@pm2-node205 php]# tar zxvf php-pm1-node204.tar.gz
  • 检测配置文件:
[root@pm2-node205 ~]# /usr/local/php/sbin/php-fpm -t
[13-Nov-2020 11:23:02] NOTICE: configuration file /usr/local/php/etc/php-fpm.conf test is successful
  • 将 php-fpm 添加到 service:
[root@pm2-node205 ~]# scp pm1-node204:/etc/init.d/php-fpm /etc/init.d/
[root@pm2-node205 ~]# chkconfig --add php-fpm
[root@pm2-node205 ~]# chkconfig php-fpm on
[root@pm2-node205 ~]# chkconfig --list

Note: This output shows SysV services only and does not include native
      systemd services. SysV configuration data might be overridden by native
      systemd configuration.

      If you want to list systemd services use 'systemctl list-unit-files'.
      To see services enabled on particular target use
      'systemctl list-dependencies [target]'.

netconsole      0:off   1:off   2:off   3:off   4:off   5:off   6:off
network         0:off   1:off   2:on    3:on    4:on    5:on    6:off
php-fpm         0:off   1:off   2:on    3:on    4:on    5:on    6:off
  • 启动 php-fpm 并验证:
[root@pm2-node205 ~]# service php-fpm start

[root@pm2-node205 ~]# ps -ef | grep php-fpm
root      2760     1  0 11:23 ?        00:00:00 php-fpm: master process (/usr/local/php/etc/php-fpm.conf)
nginx     2761  2760  0 11:23 ?        00:00:00 php-fpm: pool www
nginx     2762  2760  0 11:23 ?        00:00:00 php-fpm: pool www
nginx     2763  2760  0 11:23 ?        00:00:00 php-fpm: pool www
nginx     2764  2760  0 11:23 ?        00:00:00 php-fpm: pool www
nginx     2765  2760  0 11:23 ?        00:00:00 php-fpm: pool www

[root@pm2-node205 ~]# netstat -tanlp | grep php-fpm
tcp        0      0 127.0.0.1:9000          0.0.0.0:*               LISTEN      2760/php-fpm: maste 

3.3.3:pm1-node204 部署 Nginx

  • 编译安装 nginx-1.18.0,安装目录为 /usr/local/nginx/:
[root@pm1-node204 ~]# wget -O /usr/local/src/nginx-1.18.0.tar.gz https://nginx.org/download/nginx-1.18.0.tar.gz
[root@pm1-node204 ~]# cd /usr/local/src

[root@pm1-node204 src]# tar zxvf nginx-1.18.0.tar.gz
[root@pm1-node204 src]# cd nginx-1.18.0/

[root@pm1-node204 nginx-1.18.0]# yum install -y vim lrzsz tree screen psmisc lsof tcpdump wget ntpdate gcc gcc-c++ glibc glibc-devel pcre pcre-devel openssl openssl-devel systemd-devel net-tools iotop bc zip unzip zlib-devel bash-completion nfs-utils automake libxml2 libxml2-devel libxslt libxslt-devel perl perl-ExtUtils-Embed

[root@pm1-node204 nginx-1.18.0]# ./configure --prefix=/usr/local/nginx \
  --user=nginx \
  --group=nginx \
  --with-http_ssl_module \
  --with-http_v2_module \
  --with-http_realip_module \
  --with-http_stub_status_module \
  --with-http_gzip_static_module \
  --with-pcre \
  --with-stream \
  --with-stream_ssl_module \
  --with-stream_realip_module
[root@pm1-node204 nginx-1.18.0]# make && make install
  • 添加 Nginx 用户,指定 UID 为 2000,将 nginx 安装目录的属主更改为 nginx:
[root@pm1-node204 ~]# useradd nginx -s /sbin/nologin -u 2000
[root@pm1-node204 ~]# chown -R nginx:nginx /usr/local/nginx/
  • 准备 Nginx 启动脚本:
[root@pm1-node204 ~]# vim /usr/lib/systemd/system/nginx.service
[Unit]
Description=The nginx HTTP and reverse proxy server
After=network.target remote-fs.target nss-lookup.target
[Service]
Type=forking
PIDFile=/usr/local/nginx/logs/nginx.pid
ExecStartPre=/usr/bin/rm -f /usr/local/nginx/logs/nginx.pid
ExecStartPre=/usr/local/nginx/sbin/nginx -t
ExecStart=/usr/local/nginx/sbin/nginx
ExecReload=/bin/kill -s HUP $MAINPID
KillSignal=SIGQUIT
TimeoutStopSec=5
KillMode=process
PrivateTmp=true
[Install]
WantedBy=multi-user.target
  • 创建 nginx 命令软链接:
[root@pm1-node204 ~]# ln -sv /usr/local/nginx/sbin/nginx /usr/sbin/nginx
  • 准备 php 测试页:
[root@pm1-node204 php]# vim /data/nginx/wordpress/index.php
<?php
	phpinfo();
?>
  • 配置 nginx:
[root@pm1-node204 ~]# vim /usr/local/nginx/conf/nginx.conf 
worker_processes  1;
events {
    worker_connections  1024;
}
http {
    include       mime.types;
    default_type  application/octet-stream;
    sendfile        on;
    keepalive_timeout  65;
    server_tokens off;
    server {
        listen       80;
        server_name  wordpress.yqc.com;
        location / {
            root   /data/nginx/wordpress;
            index index.php index.html index.htm;
        }

        location ~ \.php$ {
            root /data/nginx/wordpress;
            fastcgi_pass 127.0.0.1:9000;
            fastcgi_index index.php;
            fastcgi_hide_header X-Powered-By;
            fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
            include fastcgi_params;
        }
        error_page   500 502 503 504  /50x.html;
        location = /50x.html {
            root   html;
        }
        
        location ~ ^/(pm_status|ping)$ {
			include fastcgi_params;
			fastcgi_pass 127.0.0.1:9000;
			fastcgi_param PATH_TRANSLATED $document_root$fastcgi_script_name;
		}
    }
}
  • 启动 Nginx:
[root@pm1-node204 ~]# nginx -t
nginx: the configuration file /usr/local/nginx/conf/nginx.conf syntax is ok
nginx: configuration file /usr/local/nginx/conf/nginx.conf test is successful

[root@pm1-node204 ~]# systemctl start nginx
[root@pm1-node204 ~]# systemctl enable nginx

3.3.4:pm2-node205 部署 Nginx

  • pm2-node204 创建相应目录及 nginx 用户:
[root@pm2-node205 ~]# mkdir /usr/local/nginx
[root@pm2-node205 ~]# useradd nginx -s /sbin/nologin -u 2000
[root@pm2-node205 ~]# chown -R nginx:nginx /usr/local/nginx/
  • 打包 pm1-node204 的 Nginx 安装目录,拷贝并解压到 pm2-node205 相应目录:
[root@pm1-node204 ~]# cd /usr/local/nginx/
[root@pm1-node204 nginx]# tar zcvf nginx-pm1-node204.tar.gz ./*

[root@pm2-node205 ~]# scp pm1-node204:/usr/local/nginx/nginx-pm1-node204.tar.gz /usr/local/nginx/
[root@pm2-node205 ~]# cd /usr/local/nginx/
[root@pm2-node205 nginx]# tar zxvf nginx-pm1-node204.tar.gz
  • 准备 Nginx 启动脚本:
[root@pm1-node204 ~]# vim /usr/lib/systemd/system/nginx.service
[Unit]
Description=The nginx HTTP and reverse proxy server
After=network.target remote-fs.target nss-lookup.target
[Service]
Type=forking
PIDFile=/usr/local/nginx/logs/nginx.pid
ExecStartPre=/usr/bin/rm -f /usr/local/nginx/logs/nginx.pid
ExecStartPre=/usr/local/nginx/sbin/nginx -t
ExecStart=/usr/local/nginx/sbin/nginx
ExecReload=/bin/kill -s HUP $MAINPID
KillSignal=SIGQUIT
TimeoutStopSec=5
KillMode=process
PrivateTmp=true
[Install]
WantedBy=multi-user.target
  • 创建 nginx 命令软链接:
[root@pm1-node204 ~]# ln -sv /usr/local/nginx/sbin/nginx /usr/sbin/nginx
  • 启动 Nginx:
[root@pm1-node204 ~]# systemctl start nginx
[root@pm1-node204 ~]# systemctl enable nginx
  • HAProxy 状态页验证 Nginx 后端服务器状态:

在这里插入图片描述

3.4:部署 WordPress

3.4.1:安装 WordPress

  • 解压 wordpress 安装包:
[root@pm1-node204 ~]# cd /data/nginx/wordpress/
[root@pm1-node204 wordpress]# mv index.php /tmp
[root@pm1-node204 wordpress]# tar zxvf wordpress-5.0.3-zh_CN.tar.gz
[root@pm1-node204 wordpress]# mv wordpress/* ./
[root@pm1-node204 wordpress]# rmdir wordpress/
[root@pm1-node204 wordpress]# mv wordpress-5.0.3-zh_CN.tar.gz /tmp
  • 编辑 wordpress 配置文件:
[root@pm1-node204 wordpress]# cp wp-config-sample.php wp-config.php
[root@pm1-node204 wordpress]# vim wp-config.php
define('DB_NAME', 'wordpress');
define('DB_USER', 'wordpress');
define('DB_PASSWORD', '123456');
define('DB_HOST', '172.16.1.200');
  • 更改 PC 的 hosts 文件,添加 wordpress.yqc.com 域名解析:
192.168.1.200 wordpress.yqc.com
  • 验证域名访问(http://wordpress.yqc.com):

在这里插入图片描述

3.4.2:初始化 WordPress

  • 填写相关信息后,点击“安装WordPress”:

在这里插入图片描述

  • 登录 WordPress:管理页面

在这里插入图片描述

在这里插入图片描述

  • 用户访问页面(http://wordpress.yqc.com):

在这里插入图片描述

3.5:保存 KVM 虚拟机快照

[root@pm1 ~]# virsh snapshot-create-as pm1-node201 --name "pm1-node201-HAProxy" --description "WordPress Based On HAProxy Load Balancing"
[root@pm1 ~]# virsh snapshot-create-as pm1-node204 --name "pm1-node204-HAProxy" --description "WordPress Based On HAProxy Load Balancing" 
[root@pm2 ~]# virsh snapshot-create-as pm2-node202 --name "pm2-node202-HAProxy" --description "WordPress Based On HAProxy Load Balancing"
[root@pm2 ~]# virsh snapshot-create-as pm2-node203 --name "pm2-node203-HAProxy" --description "WordPress Based On HAProxy Load Balancing"
[root@pm2 ~]# virsh snapshot-create-as pm2-node205 --name "pm2-node205-HAProxy" --description "WordPress Based On HAProxy Load Balancing"  
Domain snapshot pm2-node205-HAProxy created

四:LVS 高可用 WordPress 站点搭建

4.1:实验前准备

4.1.1:修改 HAProxy 配置

  • 修改 HAProxy 仅代理 Mysql 数据库:
]# vim /etc/haproxy/haproxy.cfg
global
        maxconn 100000
        user haproxy
        group haproxy
        daemon
        nbproc 1
        pidfile /run/haproxy.pid
        log 127.0.0.1 local3 info
        chroot /usr/local/haproxy
        stats socket /var/lib/haproxy/haproxy.socket mode 600 level admin

defaults
        option redispatch
        option abortonclose
        option http-keep-alive
        option forwardfor
        maxconn 100000
        mode http
        timeout connect 10s
        timeout client 20s
        timeout server 30s
        timeout check 5s

listen stats
        bind :9999
        stats enable
        #stats hide-version
        stats uri /haproxy-status
        stats realm HAPorxy\ Stats\ Page
        stats auth haadmin:123456
        stats auth admin:123456
        stats refresh 30s
        stats admin if TRUE

listen mysql
        bind 172.16.1.200:3306
        mode tcp
        log global
        balance source
        server 172.16.1.203 172.16.1.203:3306 check inter 3000 fall 3 rise 5
  • 重载 HAProxy:
]# systemctl reload haproxy

4.1.2:Web 服务器增加外网网卡

LVS-DR 模式下,RS 需要直接响应用户,需要具备外网访问能力。

  • pm1-node204 添加外网网卡:

在这里插入图片描述

  • pm1-node204 配置外网 IP 地址:
[root@pm1-node204 ~]# vim /etc/sysconfig/network-scripts/ifcfg-eth1
BOOTPROTO="none"
NAME="eth1"
DEVICE="eth1"
ONBOOT="yes"
IPADDR="192.168.1.204"
PREFIX="24"
GATEWAY="192.168.1.1"
DNS1="192.168.1.254"
  • pm2-node205 添加外网网卡:

在这里插入图片描述

  • pm2-node205 配置外网 IP 地址:
[root@pm2-node205 ~]# vim /etc/sysconfig/network-scripts/ifcfg-eth1
BOOTPROTO="none"
NAME="eth1"
DEVICE="eth1"
ONBOOT="yes"
IPADDR="192.168.1.205"
PREFIX="24"
GATEWAY="192.168.1.1"
DNS1="192.168.1.254"

4.2:配置 LVS 负载均衡

4.2.1:配置 pm1-node201 的 Keepalived

  • 增加 virtual_server 配置:
[root@pm1-node201 ~]# vim /etc/keepalived/keepalived.conf
global_defs {
   notification_email {
      root@pm1-node201.yqc.com
   }
   notification_email_from root@pm1-node201.yqc.com
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   router_id pm1-node201.yqc.com
   vrrp_skip_check_adv_addr
   #vrrp_strict
   vrrp_garp_interval 0
   vrrp_gna_interval 0
}

vrrp_instance VIP1 {
        state MASTER
        interface eth0
        virtual_router_id 1
        priority 100
        advert_int 2
        unicast_src_ip 192.168.1.201
        unicast_peer {
                192.168.1.202
        }
        authentication {
                auth_type PASS
                auth_pass 123456
        }
        virtual_ipaddress {
                192.168.1.200/24 dev eth0 label eth0:0
        }
}

vrrp_instance VIP2 {
        state BACKUP
        interface eth1
        virtual_router_id 2
        priority 80
        advert_int 2
        unicast_src_ip 172.16.1.201
        unicast_peer {
                172.16.1.202
        }
        authentication {
                auth_type PASS
                auth_pass 123456
        }
        virtual_ipaddress {
                172.16.1.200/24 dev eth1 label eth1:0
        }
}

virtual_server 192.168.1.200 80 {
        delay_loop 6
        lb_algo wrr
        lb_kind DR
        persistent_timeout 20
        protocol TCP
        real_server 192.168.1.204 80 {
                weight 1
                TCP_CHECK {
                        connect_timeout 5
                        nb_get_retry 3
                        delay_before_retry 3
                        connect_port 80
                }
        }
        real_server 192.168.1.205 80 {
                weight 1
                TCP_CHECK {
                        connect_timeout 5
                        nb_get_retry 3
                        delay_before_retry 3
                        connect_port 80
                }
        }
}
  • 重启 keepalived:
[root@pm1-node201 ~]# systemctl restart keepalived
  • 验证 VIP:
[root@pm1-node201 ~]# ifconfig
eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 192.168.1.201  netmask 255.255.255.0  broadcast 192.168.1.255
        inet6 240e:324:79e:f400:5054:ff:fe1f:99c7  prefixlen 64  scopeid 0x0<global>
        inet6 fe80::5054:ff:fe1f:99c7  prefixlen 64  scopeid 0x20<link>
        ether 52:54:00:1f:99:c7  txqueuelen 1000  (Ethernet)
        RX packets 1281  bytes 173452 (169.3 KiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 1038  bytes 90807 (88.6 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

eth0:0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 192.168.1.200  netmask 255.255.255.0  broadcast 0.0.0.0
        ether 52:54:00:1f:99:c7  txqueuelen 1000  (Ethernet)

eth1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 172.16.1.201  netmask 255.255.255.0  broadcast 172.16.1.255
        inet6 fe80::5054:ff:fef2:3384  prefixlen 64  scopeid 0x20<link>
        ether 52:54:00:f2:33:84  txqueuelen 1000  (Ethernet)
        RX packets 325  bytes 25121 (24.5 KiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 260  bytes 17084 (16.6 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
  • 验证 ipvs 规则:
[root@pm1-node201 ~]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  192.168.1.200:80 wrr
  -> 192.168.1.204:80             Route   1      0          0         
  -> 192.168.1.205:80             Route   1      0          0         

4.2.2:配置 pm2-node202 的 Keepalived

  • 增加 virtual_server 配置:
[root@pm2-node202 ~]# vim /etc/keepalived/keepalived.conf
global_defs {
   notification_email {
      root@pm2-node202.yqc.com
   }
   notification_email_from root@pm2-node202.yqc.com
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   router_id pm2-node202.yqc.com
   vrrp_skip_check_adv_addr
   #vrrp_strict
   vrrp_garp_interval 0
   vrrp_gna_interval 0
}

vrrp_instance VIP1 {
        state MASTER
        interface eth0
        virtual_router_id 1
        priority 100
        advert_int 2
        unicast_src_ip 192.168.1.201
        unicast_peer {
                192.168.1.202
        }
        authentication {
                auth_type PASS
                auth_pass 123456
        }
        virtual_ipaddress {
                192.168.1.200/24 dev eth0 label eth0:0
        }
}

vrrp_instance VIP2 {
        state MASTER
        interface eth1
        virtual_router_id 2
        priority 100
        advert_int 2
        unicast_src_ip 172.16.1.202
        unicast_peer {
                172.16.1.201
        }
        authentication {
                auth_type PASS
                auth_pass 123456
        }
        virtual_ipaddress {
                172.16.1.200/24 dev eth1 label eth1:0
        }
}

virtual_server 192.168.1.200 80 {
        delay_loop 6
        lb_algo wrr
        lb_kind DR
        persistent_timeout 20
        protocol TCP
        real_server 192.168.1.204 80 {
                weight 1
                TCP_CHECK {
                        connect_timeout 5
                        nb_get_retry 3
                        delay_before_retry 3
                        connect_port 80
                }
        }
        real_server 192.168.1.205 80 {
                weight 1
                TCP_CHECK {
                        connect_timeout 5
                        nb_get_retry 3
                        delay_before_retry 3
                        connect_port 80
                }
        }
}
  • 重启 keepalived:
[root@pm2-node202 ~]# systemctl restart keepalived
  • 验证 ipvs 规则:
[root@pm2-node202 ~]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  192.168.1.200:80 wrr
  -> 192.168.1.204:80             Route   1      0          0         
  -> 192.168.1.205:80             Route   1      0          0         

4.3:配置 RS

4.3.1:创建 RS 配置脚本

脚本中的命令步骤
  • 设置 arp 通告和响应级别
echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore 
echo 1 > /proc/sys/net/ipv4/conf/lo/arp_ignore    
echo 2 > /proc/sys/net/ipv4/conf/all/arp_announce   
echo 2 > /proc/sys/net/ipv4/conf/lo/arp_announce    
  • 将 VIP 配置在回环接口
ifconfig lo:0 192.168.1.200 netmask 255.255.255.255 broadcast 192.168.1.200 up
  • 添加路由条目,确保目标地址是 VIP 的响应报文经由 lo:0 接口发出:
route add -host 192.168.1.200 dev lo:0
RS 上准备脚本文件
  • pm1-node204 上创建脚本:
[root@pm1-node204 ~]# vim lvs_rs.sh
#! /bin/bash
vip=192.168.1.200

case $1 in
start)
	echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore
	echo 1 > /proc/sys/net/ipv4/conf/lo/arp_ignore
	echo 2 > /proc/sys/net/ipv4/conf/all/arp_announce
	echo 2 > /proc/sys/net/ipv4/conf/lo/arp_announce
	ifconfig lo:0 $vip netmask 255.255.255.255 broadcast $vip up
	route add -host $vip dev lo:0
	;;
stop)
	echo 0 > /proc/sys/net/ipv4/conf/all/arp_ignore
	echo 0 > /proc/sys/net/ipv4/conf/lo/arp_ignore
	echo 0 > /proc/sys/net/ipv4/conf/all/arp_announce
	echo 0 > /proc/sys/net/ipv4/conf/lo/arp_announce
	ifconfig lo:0 down
	;;
*)
	echo "Usage: $0 (start|stop)"
	exit 1
	;;
esac

[root@pm1-node204 ~]# chmod +x lvs_rs.sh
  • 拷贝脚本至 pm2-node205:
[root@pm2-node205 ~]# scp pm1-node204:/root/lvs_rs.sh /root

4.3.2:pm1-node204 执行 RS 配置脚本

  • 执行脚本:
[root@pm1-node204 ~]# /root/lvs_rs.sh start
  • 验证脚本执行结果:
[root@pm1-node204 ~]# ifconfig lo:0
lo:0: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
        inet 192.168.1.200  netmask 255.255.255.255
        loop  txqueuelen 0  (Local Loopback)

[root@pm1-node204 ~]# route -n
Kernel IP routing table
Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
0.0.0.0         192.168.1.1     0.0.0.0         UG    0      0        0 eth1
169.254.0.0     0.0.0.0         255.255.0.0     U     1002   0        0 eth0
169.254.0.0     0.0.0.0         255.255.0.0     U     1003   0        0 eth1
172.16.1.0      0.0.0.0         255.255.255.0   U     0      0        0 eth0
192.168.1.0     0.0.0.0         255.255.255.0   U     0      0        0 eth1
192.168.1.200   0.0.0.0         255.255.255.255 UH    0      0        0 lo
  • 配置开机执行脚本:
[root@pm1-node204 ~]# vim /etc/rc.d/rc.local
/bin/bash /root/lvs_rs.sh start
[root@pm1-node204 ~]# chmod +x /etc/rc.d/rc.local

4.3.3:pm2-node205 执行 RS 配置脚本

  • 执行脚本:
[root@pm2-node205 ~]# /root/lvs_rs.sh start
  • 验证脚本执行结果:
[root@pm2-node205 ~]# ifconfig lo:0
lo:0: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
        inet 192.168.1.200  netmask 255.255.255.255
        loop  txqueuelen 0  (Local Loopback)

[root@pm2-node205 ~]# route -n
Kernel IP routing table
Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
0.0.0.0         192.168.1.1     0.0.0.0         UG    0      0        0 eth1
169.254.0.0     0.0.0.0         255.255.0.0     U     1002   0        0 eth0
169.254.0.0     0.0.0.0         255.255.0.0     U     1003   0        0 eth1
172.16.1.0      0.0.0.0         255.255.255.0   U     0      0        0 eth0
192.168.1.0     0.0.0.0         255.255.255.0   U     0      0        0 eth1
192.168.1.200   0.0.0.0         255.255.255.255 UH    0      0        0 lo
  • 配置开机执行脚本:
[root@pm1-node204 ~]# vim /etc/rc.d/rc.local
/bin/bash /root/lvs_rs.sh start
[root@pm2-node205 ~]# chmod +x /etc/rc.d/rc.local

4.4:验证 WordPress 访问

在这里插入图片描述

  • 0
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值