一、LVS负载均衡
环境配置
主机名 | ip | vip | 角色 |
vm1 | 192.168.52.130 | 192.168.52.100 | 调度器(VS) |
vm2 | 192.168.52.131 | 192.168.52.100 | 真实服务器(RS) |
vm3 | 192.168.52.132 | 192.168.52.100 | 真实服务器(RS) |
vm4 | 192.168.52.133 | 测试机 |
VS调度器配置
[root@vm1 ~]# yum install -y ipvsadm
[root@vm1 ~]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
添加vip
[root@vm1 ~]# ip a a 192.168.52.100/24 dev ens160
添加lvs策略
[root@vm1 ~]# ipvsadm -A -t 192.168.36.100:80 -s rr
[root@vm1 ~]# ipvsadm -a -t 192.168.52.100:80 -r 192.168.52.131:80 -g
[root@vm1 ~]# ipvsadm -a -t 192.168.52.100:80 -r 192.168.52.132:80 -g
[root@vm1 ~]# ipvsadm -ln
RS真实服务器配置
vm2:
安装服务和环境配置
[root@vm2 ~]# dnf install -y httpd
[root@vm2 ~]# systemctl start httpd
[root@vm2 ~]# echo vm2 > /var/www/html/index.html
[root@vm2 ~]# systemctl enable httpd
rs也需要添加vip
[root@vm2 ~]# ip a a 192.168.52.100/24 dev ens160
[root@vm2 ~]# ip a
禁用arp
[root@vm2 ~]# dnf install arptables
[root@vm2 ~]# arptables -A INPUT -d 192.168.52.100 -j DROP
[root@vm2 ~]# arptables -A OUTPUT -s 192.168.52.100 -j mangle --mangle-ip-s 192.168.52.131
[root@vm2 ~]# arptables -nL
[root@vm2 ~]# arptables-save > /etc/sysconfig/arptables
[root@vm2 ~]# systemctl enable arptables
vm3:
[root@vm3 ~]# dnf install -y httpd
[root@vm3 ~]# systemctl start httpd
[root@vm3 ~]# echo vm3 > /var/www/html/index.html
[root@vm3 ~]# systemctl enable httpd
rs也需要添加vip
[root@vm3 ~]# ip a a 192.168.52.100/24 dev ens160
[root@vm3 ~]# ip a
禁用arp
[root@vm3 ~]# dnf install arptables
[root@vm3 ~]# arptables -A INPUT -d 192.168.52.100 -j DROP
[root@vm3 ~]# arptables -A OUTPUT -s 192.168.52.100 -j mangle --mangle-ip-s 192.168.52.132
[root@vm3 ~]# arptables -nL
测试
[root@vm4 ~]# for i in {1..10}; do curl 192.168.52.100; done
查看调度器情况
[root@vm1 ~]# ipvsadm -ln
二、keepalived高可用
master配置
vm1:
清除资源
[root@vm1 ~]# ip a d 192.168.52.100/24 dev ens160
[root@vm1 ~]# ipvsadm -C
! Configuration File for keepalived
global_defs {
notification_email {
root@localhost
}
notification_email_from keepalived@localhost
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id LVS_DEVEL
vrrp_skip_check_adv_addr
#vrrp_strict
vrrp_garp_interval 0
vrrp_gna_interval 0
}
vrrp_instance VI_1 {
state MASTER
interface ens160
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.52.100
}
}
virtual_server 192.168.52.100 80 {
delay_loop 3
lb_algo rr
lb_kind DR
#persistence_timeout 50
protocol TCP
real_server 192.168.52.131 80 {
weight 1
TCP_CHECK {
connect_timeout 3
delay_before_retry 3
}
}
real_server 192.168.52.132 80 {
weight 1
TCP_CHECK {
connect_timeout 3
delay_before_retry 3
}
}
}
查看资源是否就绪
测试keepalived对后端RS的健康检测
[root@vm2 ~]# systemctl stop httpd
故障的RS会自动从调度列表中被摘除
[root@vm1 ~]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 192.168.52.100:80 rr
-> 192.168.52.132:80 Route 1 0 0
再次启动服务
[root@vm2 ~]# systemctl start httpd
恢复的RS会自动添加到调度列表
[root@vm1 ~]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 192.168.52.100:80 rr
-> 192.168.52.131:80 Route 1 0 0
-> 192.168.52.132:80 Route 1 0 0
backup配置
vm4:
安装高可用软件
[root@vm4 ~]# dnf install -y keepalived
从server1拷贝配置文件
[root@vm4 ~]# scp /etc/keepalived/keepalived.conf server4:/etc/keepalived/
[root@vm4 ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
root@localhost
}
notification_email_from keepalived@localhost
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id LVS_DEVEL
vrrp_skip_check_adv_addr
#vrrp_strict
vrrp_garp_interval 0
vrrp_gna_interval 0
}
vrrp_instance VI_1 {
state BACKUP
interface ens160
virtual_router_id 51
priority 50
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.52.100
}
}
virtual_server 192.168.52.100 80 {
delay_loop 3
lb_algo rr
lb_kind DR
#persistence_timeout 50
protocol TCP
real_server 192.168.52.131 80 {
weight 1
TCP_CHECK {
connect_timeout 3
delay_before_retry 3
}
}
real_server 192.168.52.132 80 {
weight 1
TCP_CHECK {
connect_timeout 3
delay_before_retry 3
}
}
}
[root@vm4 ~]# systemctl enable --now keepalived
测试
停止server1上服务
[root@vm1 ~]# systemctl stop keepalived.service
查看资源是否自动迁移到server4
三、NGINX 7层负载均衡
nginx部署
安装依赖性
[root@vm1 ~]# dnf install -y gcc pcre-devel openssl-devel
安装nginx
[root@vm1 ~]# tar zxf nginx-1.24.0.tar.gz
[root@vm1 ~]# cd nginx-1.24.0/
关闭debug
[root@vm1 nginx-1.24.0]# vim auto/cc/gcc
[root@vm1 nginx-1.24.0]# ./configure --with-http_ssl_module --with-http_stub_status_module
[root@vm1 nginx-1.24.0]# make
[root@vm1 nginx-1.24.0]# make install
启动服务
[root@vm1 ~]# nginx
重载nginx服务
[root@vm1 ~]# nginx -s reload
关闭nginx服务
[root@vm1 ~]# nginx -s stop
配置启动脚本
[root@vm1 ~]# vim /lib/systemd/system/nginx.service
[Unit]
Description=The NGINX HTTP and reverse proxy server
After=syslog.target network-online.target remote-fs.target nss-lookup.target
Wants=network-online.target
[Service]
Type=forking
PIDFile=/usr/local/nginx/logs/nginx.pid
ExecStartPre=/usr/local/nginx/sbin/nginx -t
ExecStart=/usr/local/nginx/sbin/nginx
ExecReload=/usr/local/nginx/sbin/nginx -s reload
ExecStop=/bin/kill -s QUIT $MAINPID
PrivateTmp=true
[Install]
WantedBy=multi-user.target
[root@vm1 ~]# systemctl daemon-reload
[root@vm1 ~]# systemctl enable --now nginx
负载均衡
[root@vm1 ~]# vim /usr/local/nginx/conf/nginx.conf
http {
upstream backend {
server 192.168.52.131 weight=2;
server 192.168.52.132;
server 127.0.0.1 backup;
}
#定义虚拟主机,当访问此域名时反向代理负载均衡
server {
listen 80;
server_name www.hjl.org;
location / {
proxy_pass http://backend;
}
}
[root@vm1 ~]# systemctl reload nginx
测试主机添加域名解析
修改nginx服务启动用户
[root@vm1 ~]# useradd -M -d /usr/local/nginx/ -s /sbin/nologin nginx
[root@vm1 ~]# vim /usr/local/nginx/conf/nginx.conf
user nginx;
[root@vm1 ~]# nginx -s reload
[root@vm1 ~]# ps axu|grep nginx
nginx进程与cpu核心绑定
推荐设置:nginx进程数量与cpu和核心数一致
[root@vm1 ~]# vim /usr/local/nginx/conf/nginx.conf
worker_processes 2;
worker_cpu_affinity 01 10;
[root@vm1 ~]# nginx -s reload
[root@vm1 ~]# ps axu|grep nginx
四、Pacemaker高可用
主机配置免密
[root@vm1 ~]# ssh-keygen
[root@vm1 ~]# ssh-copy-id vm4
配置yum源
vm1和vm4主机都需要配置
[root@vm1 ~]# cat /etc/yum.repos.d/hjl.repo
[root@vm4 ~]# cat /etc/yum.repos.d/hjl.repo
安装软件
[root@vm1 ~]# dnf install -y pacemaker-2.1.2-4.el8 pcs psmisc
[root@vm1 ~]# ssh vm4 dnf install -y pacemaker-2.1.2-4.el8 pcs psmisc
启动pcsd服务
[root@vm1 ~]# ssh vm4 systemctl enable --now pcsd
[root@vm1 ~]# ssh vm4 echo westos | passwd --stdin hacluster
配置corosync
[root@vm1 ~]# pcs host auth vm1 vm4
Username: hacluster
Password:
vm1: Authorized
vm4: Authorized
[root@vm1 ~]# pcs cluster setup mycluster vm1 vm4
启动集群
[root@vm1 ~]# pcs cluster start --all
[root@vm1 ~]# pcs cluster enable --all
校验集群
[root@vm1 ~]# pcs status
创建集群资源
[root@vm1 ~]# pcs resource create vip ocf:heartbeat:IPaddr2 ip=192.168.52.100 op monitor interval=30s
手动移除server1上的vip后,集群会自动检测并修复
[root@vm1 ~]# ip a d 192.168.52.100/24 dev ens160
[root@vm1 ~]# ip a
等待一段时间后,vip会再回来
[root@vm1 ~]# ip a
节点失效备援
[root@vm1 ~]# pcs node standby
[root@vm1 ~]# pcs status
vm1失效后资源自动迁移至vm4
[root@vm4 ~]# ip a
添加nginx服务资源
在添加资源之前,确保vm1和vm4主机上的nginx服务正确配置
[root@vm1 ~]# pcs resource create nginx systemd:nginx op monitor interval=60s
[root@vm1 ~]# pcs status
创建资源组,强制资源运行在同一节点
[root@vm1 ~]# pcs resource group add haogroup vip nginx
[root@vm1 ~]# pcs status
五、LNMP架构
Nginx平滑升级
先将pacemaker有关服务关闭
[root@vm1 ~]# pcs cluster stop --all
vm4: Stopping Cluster (pacemaker)...
vm1: Stopping Cluster (pacemaker)...
vm1: Stopping Cluster (corosync)...
vm4: Stopping Cluster (corosync)...
[root@vm1 ~]# pcs cluster disable --all
vm1: Cluster Disabled
vm4: Cluster Disabled
[root@vm1 ~]# curl -I localhost
[root@vm1 ~]# cd nginx-1.24.0/
[root@vm1 nginx-1.24.0]# ./configure --with-http_ssl_module --with-http_stub_status_module
[root@vm1 nginx-1.24.0]# make
备份原程序
[root@vm1 nginx-1.24.0]# cd /usr/local/nginx/sbin/
[root@vm1 sbin]# cp nginx nginx.old
拷贝新程序
[root@vm1 sbin]# \cp -f /root/nginx-1.24.0/objs/nginx nginx
获取nginx进程Pid
[root@vm1 sbin]# ps ax|grep nginx
唤醒新程序进程
[root@vm1 sbin]# kill -USR2 56881
[root@vm1 sbin]# ps ax | grep nginx
此时有多个nginx版本在处理请求
[root@vm1 sbin]# curl -I localhost
[root@vm1 sbin]# curl -I localhost
回收旧版本进程
[root@vm1 sbin]# kill -WINCH 56881
[root@vm1 sbin]# ps ax | grep nginx
此时只有新版本在响应业务请求
[root@vm1 sbin]# curl -I localhost
版本回退
拷贝旧版本程序
[root@vm1 sbin]# cp -f nginx.old nginx
[root@vm1 sbin]# kill -HUP 56881
[root@vm1 sbin]# ps ax | grep nginx
回收新版本work进程
[root@vm1 sbin]# kill -WINCH 56926
[root@vm1 sbin]# ps ax | grep nginx
只有旧版本在响应业务请求
回收新版本master进程
[root@vm1 sbin]# kill -QUIT 56926
[root@vm1 sbin]# ps ax | grep nginx