Nginx负载均衡与高可用配置
环境说明:
各主机均已关闭防火墙与SELinux。
主机名 | IP地址 | 应用服务 | 系统 |
---|---|---|---|
LB01 | 192.168.188.128 | keepalived nginx | Centos8 |
LB02 | 192.168.188.129 | keepalived nginx | Centos8 |
RS01 | 192.168.188.137 | nginx | Centos8 |
RS02 | 192.168.188.132 | nginx | Centos8 |
需求:
LB01做主负载均衡器,LB02做备负载均衡器,VIP设为192.168.188.100。RS01与RS02做实际处理业务请求的服务器
部署RS
RS1主机配置
#安装nginx
[root@RS1 ~]# yum -y install nginx
#先将原首页文件备份,再定义新的首页文件内容
[root@RS1 ~]# cd /usr/share/nginx/html/
[root@RS1 html]# ls
404.html 50x.html index.html nginx-logo.png poweredby.png
[root@RS1 html]# mv index.html{,.bak}
[root@RS1 html]# echo 'This is RS1.' > index.html
[root@RS1 html]# ls
404.html 50x.html index.html index.html.bak nginx-logo.png poweredby.png
#启动nginx并设为开机自启
[root@RS1 html]# systemctl enable --now nginx.service
RS2主机配置
[root@RS2 ~]# dnf -y install nginx
[root@RS2 ~]# cd /usr/share/nginx/html/
[root@RS2 html]# mv index.html{,.bak}
[root@RS2 html]# echo "This is RS2." > index.html
[root@RS2 html]# ls
404.html 50x.html index.html index.html.bak nginx-logo.png poweredby.png
[root@RS2 html]# systemctl enable --now nginx.service
测试两台RS能否访问
[root@LB1 ]# curl 192.168.188.137
This is RS1.
[root@LB1 ]# curl 192.168.188.132
This is RS2.
部署LB
LB1主机做负载均衡
#安装nginx
[root@LB1 ~]# dnf -y install nginx
#修改配置文件前先对原文件做备份
[root@LB1 ~]# cd /etc/nginx/
[root@LB1 nginx]# cp nginx.conf nginx.conf.bak
[root@LB1 nginx]# ls
conf.d fastcgi_params.default nginx.conf uwsgi_params
default.d koi-utf nginx.conf.bak uwsgi_params.default
fastcgi.conf koi-win nginx.conf.default win-utf
fastcgi.conf.default mime.types scgi_params
fastcgi_params mime.types.default scgi_params.default
#配置负载均衡
[root@LB1 nginx]# vim nginx.conf
......
upstream webserver { #定义后端实际处理业务请求的服务器池
server 192.168.188.137; #RS1的IP
server 192.168.188.132; #RS2的IP
}
......
server {
listen 80;
server_name _;
root /usr/share/nginx/html;
......
location / {
proxy_pass http://webserver;
}
.......
[root@LB1 nginx]# systemctl enable --now nginx.service
测试负载均衡:
#因没有分配权重,默认是1:1轮询
[root@LB1 nginx]# curl 192.168.188.128
This is RS1.
[root@LB1 nginx]# curl 192.168.188.128
This is RS2.
[root@LB1 nginx]# curl 192.168.188.128
This is RS1.
[root@LB1 nginx]# curl 192.168.188.128
This is RS2.
LB2主机做负载均衡
[root@LB2 ~]# dnf -y install nginx
[root@LB2 ~]# cd /etc/nginx/
[root@LB2 nginx]# cp nginx.conf nginx.conf.bak
[root@LB2 nginx]# vim nginx.conf
......
upstream webserver { #定义后端实际处理业务请求的服务器池
server 192.168.188.137; #RS1的IP
server 192.168.188.132; #RS2的IP
}
......
server {
listen 80;
server_name _;
root /usr/share/nginx/html;
......
location / {
proxy_pass http://webserver;
}
.......
[root@LB2 nginx]# systemctl start nginx.service
测试负载均衡:
[root@LB2 nginx]# curl 192.168.188.129
This is RS1.
[root@LB2 nginx]# curl 192.168.188.129
This is RS2.
[root@LB2 nginx]# curl 192.168.188.129
This is RS1.
[root@LB2 nginx]# curl 192.168.188.129
This is RS2.
#测试完停止nginx服务
[root@LB2 nginx]# systemctl stop nginx.service
部署HA
LB1做主LB
#下载做高可用的软件
[root@LB1 ~]# dnf -y install keepalived
#生成8位数的密码
[root@LB1 ~]# strings /dev/urandom |tr -dc A-Za-z0-9 | head -c8; echo
PvF9Pslz
#配置keepalived
[root@LB1 ~]# cd /etc/keepalived/
[root@LB1 keepalived]# mv keepalived.conf{,.bak}
[root@LB1 keepalived]# vim keepalived.conf
! Configuration File for keepalived
global_defs {
router_id lb01
}
vrrp_instance VI_1 {
state MASTER
interface ens33
virtual_router_id 81
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass PvF9Pslz
}
virtual_ipaddress {
192.168.188.100
}
}
virtual_server 192.168.188.100 80 {
delay_loop 6
lb_algo rr
lb_kind DR
persistence_timeout 50
protocol TCP
real_server 192.168.188.128 80 {
weight 1
TCP_CHECK {
connect_port 80
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 192.168.188.129 80 {
weight 1
TCP_CHECK {
connect_port 80
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}
#开启keepalived并设为开机自启
[root@LB1 keepalived]# systemctl enable --now keepalived.service
#可以看到VIP已经有了
[root@LB1 keepalived]# ip a s ens33
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
link/ether 00:0c:29:79:5f:8d brd ff:ff:ff:ff:ff:ff
inet 192.168.188.128/24 brd 192.168.188.255 scope global dynamic noprefixroute ens33
valid_lft 946sec preferred_lft 946sec
inet 192.168.188.100/32 scope global ens33
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:fe79:5f8d/64 scope link noprefixroute
valid_lft forever preferred_lft forever
#使用VIP进行访问。如果访问不了又确信配置无误,则极有可能是备负载均衡器服务没停止
[root@LB1 keepalived]# curl 192.168.188.100
This is RS1.
[root@LB1 keepalived]# curl 192.168.188.100
This is RS2.
[root@LB1 keepalived]# curl 192.168.188.100
This is RS1.
[root@LB1 keepalived]# curl 192.168.188.100
This is RS2.
验证究竟是否是LB1(主)主机在做反向代理
这里有必要简述一下nginx反向代理的工作流程:反向代理服务器接收访问用户的请求后,会代理用户重新发起请求代理下的节点服务器,最后把数据返回给客户端用。所以被代理的节点服务器并不知道客户端的存在,因为它所处理的全部请求都是由代理服务器请求的
#在LB2主机上用VIP进行访问
[root@LB2 nginx]# curl 192.168.188.100
This is RS1.
[root@LB2 nginx]# curl 192.168.188.100
This is RS2.
[root@LB2 nginx]# curl 192.168.188.100
This is RS1.
[root@LB2 nginx]# curl 192.168.188.100
This is RS2.
#在RS1主机上查看日志
[root@RS1 ~]# cd /var/log/nginx/
[root@RS1 nginx]# ls
access.log error.log
#可以看到访问主机的IP确实是LB1这台
[root@RS1 nginx]# tail -f access.log
192.168.188.128 - - [18/Oct/2022:10:42:57 +0800] "GET / HTTP/1.0" 200 13 "-" "curl/7.61.1" "-"
192.168.188.128 - - [18/Oct/2022:10:43:00 +0800] "GET / HTTP/1.0" 200 13 "-" "curl/7.61.1" "-"
LB2做备LB
[root@LB2 ~]# dnf -y install keepalived
[root@LB2 ~]# cd /etc/keepalived/
[root@LB2 keepalived]# mv keepalived.conf{,.bak}
#将LB1主机的keepalived配置文件直接copy过来
[root@LB2 keepalived]# scp root@192.168.188.128:/etc/keepalived/keepalived.conf ./
The authenticity of host '192.168.188.128 (192.168.188.128)' can't be established.
ECDSA key fingerprint is SHA256:0bvmuHVC3ghelbS64Z9RTF267C7bc4pcx27zoU2YM4U.
Are you sure you want to continue connecting (yes/no/[fingerprint])? yes
Warning: Permanently added '192.168.188.128' (ECDSA) to the list of known hosts.
root@192.168.188.128's password:
keepalived.conf 100% 870 636.3KB/s 00:00
[root@LB2 keepalived]# ls
keepalived.conf keepalived.conf.bak
#修改配置文件。仅有两个地方需要注意,其一是state,设为backup。其二是priority,一定要比主低
! Configuration File for keepalived
global_defs {
router_id lb01
}
vrrp_instance VI_1 {
state BACKUP
interface ens33
virtual_router_id 81
priority 90
advert_int 1
authentication {
auth_type PASS
auth_pass PvF9Pslz
}
virtual_ipaddress {
192.168.188.100
}
}
virtual_server 192.168.188.100 80 {
delay_loop 6
lb_algo rr
lb_kind DR
persistence_timeout 50
protocol TCP
real_server 192.168.188.128 80 {
weight 1
TCP_CHECK {
connect_port 80
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 192.168.188.129 80 {
weight 1
TCP_CHECK {
connect_port 80
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}
[root@LB2 keepalived]# systemctl enable --now keepalived.service
测试主备切换
#模拟主负载均衡器出现故障
[root@LB1 keepalived]# systemctl stop nginx keepalived.service
#去到备负载均衡器上查看VIP
[root@LB2 keepalived]# ip a s ens33
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
link/ether 00:0c:29:a9:87:2e brd ff:ff:ff:ff:ff:ff
inet 192.168.188.129/24 brd 192.168.188.255 scope global dynamic noprefixroute ens33
valid_lft 1093sec preferred_lft 1093sec
inet 192.168.188.100/32 scope global ens33
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:fea9:872e/64 scope link noprefixroute
valid_lft forever preferred_lft forever
#启动nginx进行负载均衡
[root@LB2 keepalived]# systemctl start nginx.service
[root@LB2 keepalived]# curl 192.168.188.100
This is RS1.
[root@LB2 keepalived]# curl 192.168.188.100
This is RS2.
[root@LB2 keepalived]# curl 192.168.188.100
This is RS1.
[root@LB2 keepalived]# curl 192.168.188.100
This is RS2.
#来到RS1主机上查看访问日志,可以看到此时显示源IP是LB2
[root@LB1 keepalived]# cd /var/log/nginx/
[root@LB1 nginx]# tail -f access.log
192.168.188.129 - - [18/Oct/2022:10:43:00 +0800] "GET / HTTP/1.1" 200 13 "-" "curl/7.61.1" "-"
192.168.188.129 - - [18/Oct/2022:10:43:01 +0800] "GET / HTTP/1.1" 200 13 "-" "curl/7.61.1" "-"
#如果你想继续做监控脚本实现半自动主备切换,那么请恢复到LB1为主负载均衡器
//LB2
[root@LB2 ~]# systemctl stop nginx.service
//LB1
[root@LB1 ~]# systemctl start nginx.service keepalived.service
[root@LB1 ~]# ip a s ens33
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
link/ether 00:0c:29:79:5f:8d brd ff:ff:ff:ff:ff:ff
inet 192.168.188.128/24 brd 192.168.188.255 scope global dynamic noprefixroute ens33
valid_lft 1571sec preferred_lft 1571sec
inet 192.168.188.100/32 scope global ens33
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:fe79:5f8d/64 scope link noprefixroute
valid_lft forever preferred_lft forever
配置监控脚本实现半自动主备切换
所谓半自动主备切换意思是,当主ka(keepalived)挂掉了,监控脚本检测到后,备ka会自动成为新的主ka。当旧主ka恢复后想要重新成为主卡时需要系统管理员手动切换。
LB1主机配置
[root@LB1 ~]# mkdir /scripts && cd /scripts
[root@LB1 scripts]# vim check_nginx.sh
#!/bin/bash
nginx_status=$(ps -ef|grep -Ev "grep|$0"|grep '\bnginx\b'|wc -l)
if [ $nginx_status -lt 1 ];then
systemctl stop keepalived
fi
[root@LB1 scripts]# vim notify.sh
#!/bin/bash
case "$1" in
master)
nginx_status=$(ps -ef|grep -Ev "grep|$0"|grep '\bnginx\b'|wc -l)
if [ $nginx_status -lt 1 ];then
systemctl start nginx
fi
;;
backup)
nginx_status=$(ps -ef|grep -Ev "grep|$0"|grep '\bnginx\b'|wc -l)
if [ $nginx_status -gt 0 ];then
systemctl stop nginx
fi
;;
*)
echo "Usage:$0 master|backup VIP"
;;
esac
[root@LB1 scripts]# chmod +x check_nginx.sh notify.sh
[root@LB1 scripts]# ll
total 8
-rwxr-xr-x. 1 root root 146 Oct 18 11:03 check_nginx.sh
-rwxr-xr-x. 1 root root 439 Oct 18 11:04 notify.sh
#将监控脚本配置到keepalived
[root@LB1 scripts]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id lb01
}
#填空以下这五行
vrrp_script nginx_check {
script "/scripts/check_nginx.sh"
interval 1
weight -20
}
vrrp_instance VI_1 {
state MASTER
interface ens33
virtual_router_id 81
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass PvF9Pslz
}
virtual_ipaddress {
192.168.188.100
}
track_ipaddress{ #添加以下四行
nginx_check
}
notify_master "/scripts/notify.sh master"
}
virtual_server 192.168.188.100 80 {
delay_loop 6
lb_algo rr
lb_kind DR
persistence_timeout 50
protocol TCP
real_server 192.168.188.128 80 {
weight 1
TCP_CHECK {
connect_port 80
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 192.168.188.129 80 {
weight 1
TCP_CHECK {
connect_port 80
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}
[root@LB1 scripts]# systemctl restart keepalived.service
LB2主机配置
backup无需检测nginx是否正常,当升级为MASTER时启动nginx,当降级为BACKUP时关闭
[root@LB2 ~]# mkdir /scripts && cd /scripts
[root@LB2 scripts]# scp root@192.168.188.128:/scripts/notify.sh ./
root@192.168.188.128's password:
notify.sh 100% 439 287.0KB/s 00:00
[root@LB2 scripts]# cat notify.sh
#!/bin/bash
case "$1" in
master)
nginx_status=$(ps -ef|grep -Ev "grep|$0"|grep '\bnginx\b'|wc -l)
if [ $nginx_status -lt 1 ];then
systemctl start nginx
fi
;;
backup)
nginx_status=$(ps -ef|grep -Ev "grep|$0"|grep '\bnginx\b'|wc -l)
if [ $nginx_status -gt 0 ];then
systemctl stop nginx
fi
;;
*)
echo "Usage:$0 master|backup VIP"
;;
esac
[root@LB2 scripts]# ll
total 4
-rwxr-xr-x 1 root root 439 Oct 18 11:08 notify.sh
[root@LB2 scripts]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id lb01
}
vrrp_instance VI_1 {
state BACKUP
interface ens33
virtual_router_id 81
priority 90
advert_int 1
authentication {
auth_type PASS
auth_pass PvF9Pslz
}
virtual_ipaddress {
192.168.188.100
}
notify_master "/scripts/notify.sh master" #添加这两行
notify_backup "/scripts/notify.sh backup"
}
virtual_server 192.168.188.100 80 {
delay_loop 6
lb_algo rr
lb_kind DR
persistence_timeout 50
protocol TCP
real_server 192.168.188.128 80 {
weight 1
TCP_CHECK {
connect_port 80
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 192.168.188.129 80 {
weight 1
TCP_CHECK {
connect_port 80
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}
[root@LB2 scripts]# systemctl restart keepalived.service
测试配置监控脚本是否能自动进行主备切换
#目前VIP在LB1主机上,说明此时还是主
[root@LB1 scripts]# ip a s ens33
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
link/ether 00:0c:29:79:5f:8d brd ff:ff:ff:ff:ff:ff
inet 192.168.188.128/24 brd 192.168.188.255 scope global dynamic noprefixroute ens33
valid_lft 1757sec preferred_lft 1757sec
inet 192.168.188.100/32 scope global ens33
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:fe79:5f8d/64 scope link noprefixroute
valid_lft forever preferred_lft forever
#手动停止nginx均衡负载器,模拟故障
[root@LB1 scripts]# systemctl stop nginx.service keepalived
#可以看到由于负载均衡器掉了,运行脚本停掉了keepalived。VIP也不在了
[root@LB1 scripts]# systemctl status keepalived.service
● keepalived.service - LVS and VRRP High Availability Monitor
Loaded: loaded (/usr/lib/systemd/system/keepalived.service; enabled; vendor preset: disabled)
Active: inactive (dead) since Tue 2022-10-18 11:12:04 CST; 27s ago
[root@LB1 scripts]# ip a s ens33
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
link/ether 00:0c:29:79:5f:8d brd ff:ff:ff:ff:ff:ff
inet 192.168.188.128/24 brd 192.168.188.255 scope global dynamic noprefixroute ens33
valid_lft 1661sec preferred_lft 1661sec
inet6 fe80::20c:29ff:fe79:5f8d/64 scope link noprefixroute
valid_lft forever preferred_lft forever
#此时去到LB2查看VIP,可以看到VIP在这台负载均衡器上了
[root@LB2 scripts]# ip a s ens33
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
link/ether 00:0c:29:a9:87:2e brd ff:ff:ff:ff:ff:ff
inet 192.168.188.129/24 brd 192.168.188.255 scope global dynamic noprefixroute ens33
valid_lft 1625sec preferred_lft 1625sec
inet 192.168.188.100/32 scope global ens33
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:fea9:872e/64 scope link noprefixroute
valid_lft forever preferred_lft forever
#可以看到nginx的默认80也随之启用
[root@LB2 scripts]# ss -antl
State Recv-Q Send-Q Local Address:Port Peer Address:Port Process
LISTEN 0 128 0.0.0.0:80 0.0.0.0:*
LISTEN 0 128 0.0.0.0:22 0.0.0.0:*
LISTEN 0 128 [::]:22 [::]:*
#要想再次启用LB1为主,则需自行手动启动nginx与keepalived服务