nginx反向代理介绍
nginx通常被用作后端服务器的反向代理,这样就可以很方便的实现动静分离以及负载均衡,从而大大提高服务器的处理能力。
nginx实现动静分离,其实就是在反向代理的时候,如果是静态资源,就直接从nginx发布的路径去读取,而不需要从后台服务器获取了。
但是要注意,这种情况下需要保证后端跟前端的程序保持一致,可以使用Rsync做服务端自动同步或者使用NFS、MFS分布式共享存储。
Http Proxy模块,功能很多,最常用的是proxy_pass和proxy_cache
如果要使用proxy_cache,需要集成第三方的ngx_cache_purge模块,用来清除指定的URL缓存。这个集成需要在安装nginx的时候去做,如:
./configure --add-module=…/ngx_cache_purge-1.0 …
nginx通过upstream模块来实现简单的负载均衡,upstream需要定义在http段内
环境设置
主机名称 | IP地址 | 安装环境 | 操作系统 |
---|---|---|---|
Masters | 192.168.141.140 | 源码nginx,keepalived | centos8 |
DR | 192.168.141.141 | 源码nginx,keepalived | centos8 |
RS1 | 192.168.141.143 | httpd | centos8 |
RS2 | 192.168.141.144 | nginx | centos8 |
nginx配置负载均衡和反向代理
安装httpd服务
##关闭防火墙
[root@RS1 ~]# systemctl stop firewalld.service
[root@RS1 ~]# vim /etc/selinux/config
SELINUX=disabled
[root@RS1 ~]# setenforce 0
#设置开机自启
[root@RS1 ~]# systemctl enable --now httpd
Created symlink /etc/systemd/system/multi-user.target.wants/httpd.service → /usr/lib/systemd/system/httpd.service.
[root@RS1 ~]# ss -anlt
State Recv-Q Send-Q Local Address:Port Peer Address:Port Process
LISTEN 0 128 0.0.0.0:22 0.0.0.0:*
LISTEN 0 128 *:80 *:*
LISTEN 0 128 [::]:22 [::]:*
[root@RS1 ~]#
#配置网站页面
[root@RS1 ~]# cd /var/www/html/
[root@RS1 html]# ls
[root@RS1 html]# echo 'apache' > index.html
[root@RS1 html]# ls
index.html
[root@RS1 html]#
安装nginx服务
#关闭防火墙
[root@RS2 ~]# systemctl stop firewalld.service
[root@RS2 ~]# vim /etc/selinux/config
[root@RS2 ~]# setenforce 0
#设置开机自启
[root@RS2 ~]# yum -y install nginx
[root@RS2 ~]# systemctl enable --now nginx.service
Created symlink /etc/systemd/system/multi-user.target.wants/nginx.service → /usr/lib/systemd/system/nginx.service.
[root@RS2 ~]# ss -anlt
State Recv-Q Send-Q Local Address:Port Peer Address:Port Process
LISTEN 0 128 0.0.0.0:80 0.0.0.0:*
LISTEN 0 128 0.0.0.0:22 0.0.0.0:*
LISTEN 0 128 [::]:80 [::]:*
LISTEN 0 128 [::]:22 [::]:*
[root@RS2 ~]#
#配置网站页面
[root@RS2 ~]# cd /usr/share/nginx/html/
[root@RS2 html]# ls
404.html 50x.html index.html nginx-logo.png poweredby.png
[root@RS2 html]# echo 'nginx' >index.html
[root@RS2 html]#
源码安装nginx
#关闭防火墙
[root@DR ~]# systemctl stop firewalld.service
[root@DR ~]# vim /etc/selinux/config
[root@DR ~]# setenforce 0
[root@DR ~]#
[root@DR ~]# nginx -v
nginx version: nginx/1.22.0
[root@DR ~]# ss -anlt
State Recv-Q Send-Q Local Address:Port Peer Address:Port Process
LISTEN 0 128 0.0.0.0:80 0.0.0.0:*
LISTEN 0 128 0.0.0.0:22 0.0.0.0:*
LISTEN 0 128 [::]:22 [::]:*
[root@DR ~]# vim /usr/local/nginx/conf/nginx.conf
#添加负载均衡
upstream webservers {
server 192.168.141.143;
server 192.168.141.144;
}
#这个方法本质还是轮询,而且由于客户端的ip可能是不断变化的,
#比如动态ip,代理,翻墙等,
#因此ip_hash并不能完全保证同一个客户端总是由同一个服务器来处理。
#定义好upstream后,需要在server段内添加如下内容:
43 #charset koi8-r;
44
45 #access_log logs/host.access.log main;
46 location / {
47 proxy_pass http://webservers;
48 }
#测试访问,会以轮询访问访问
[root@DR ~]# curl http://192.168.141.141
apache
[root@DR ~]# curl http://192.168.141.141
nginx
[root@DR ~]# curl http://192.168.141.141
apache
[root@DR ~]# curl http://192.168.141.141
nginx
#设置访问3次网站后在访问下个下个网站
[root@DR ~]# vim /usr/local/nginx/conf/nginx.conf
upstream webservers {
server 192.168.141.143 weight=3;
server 192.168.141.144;
}
[root@DR ~]# systemctl restart nginx.service
[root@DR ~]# curl http://192.168.141.141
apache
[root@DR ~]# curl http://192.168.141.141
apache
[root@DR ~]# curl http://192.168.141.141
apache
[root@DR ~]# curl http://192.168.141.141
nginx
[root@DR ~]#
#在upstream段内,定义一个服务器列表,默认的方式是轮询,
#如果要确定同一个访问者发出的请求总是由同一个后端服务器来处理,可以设置ip_hash
[root@DR ~]# vim /usr/local/nginx/conf/nginx.conf
upstream webservers {
ip_hash; #添加这个
server 192.168.141.143 weight=3;
server 192.168.141.144;
}
#测试页面
#第一次访问apache,后面永远是apache
[root@DR ~]# systemctl restart nginx.service
[root@DR ~]# curl http://192.168.141.141
apache
[root@DR ~]# curl http://192.168.141.141
apache
[root@DR ~]# curl http://192.168.141.141
apache
[root@DR ~]# curl http://192.168.141.141
apache
[root@DR ~]# curl http://192.168.141.141
apache
[root@DR ~]#
nginx高可用
两台主机配置要求一样
[root@Masters ~]# nginx -v
nginx version: nginx/1.22.0
#因为配置文件要一样,所以我选择直接传送覆盖原来的文件
[root@Masters ~]# scp 192.168.141.141:/usr/local/nginx/conf/nginx.conf /usr/local/nginx/conf/
[root@Masters ~]# cat /usr/local/nginx/conf/nginx.conf
upstream webservers {
ip_hash;
server 192.168.141.143 weight=3;
server 192.168.141.144;
}
server {
listen 80;
server_name localhost;
#charset koi8-r;
#access_log logs/host.access.log main;
location / {
proxy_pass http://webservers;
}
[root@Masters ~]# systemctl restart nginx.service
安装keepalived
#yum安装keepalived
[root@Masters ~]# yum -y install keepalived
[root@DR ~]# yum -y install keepalived
[root@Masters ~]# cd /etc/keepalived/
[root@Masters keepalived]# ls
keepalived.conf
[root@Masters keepalived]# mv keepalived.conf{,-bak}
[root@Masters keepalived]# vim keepalived.conf
#配置主keepalived
! Configuration File for keepalived
global_defs {
router_id lb01
}
vrrp_instance VI_1 {
state MASTER
interface ens33
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass wangqing
}
virtual_ipaddress {
192.168.141.250
}
}
virtual_server 192.168.141.250 80 {
delay_loop 6
lb_algo rr
lb_kind DR
persistence_timeout 50
protocol TCP
real_server 192.168.141.140 80 {
weight 1
TCP_CHECK {
connect_port 80
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 192.168.141.141 80 {
weight 1
TCP_CHECK {
connect_port 80
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}
[root@Masters keepalived]# systemctl start keepalived.service
[root@Masters keepalived]# systemctl enable keepalived.service
Created symlink /etc/systemd/system/multi-user.target.wants/keepalived.service → /usr/lib/systemd/system/keepalived.service.
#配置从keepalived
[root@DR ~]# cd /etc/keepalived/
[root@DR keepalived]# ls
keepalived.conf
[root@DR keepalived]# mv keepalived.conf{,-bak}
[root@DR keepalived]# ls
keepalived.conf-bak
[root@DR keepalived]# vim keepalived.conf
#传送配置文件然后修改
[root@Masters keepalived]# scp keepalived.conf 192.168.141.141:/etc/keepalived
root@192.168.141.141's password:
keepalived.conf 100% 875 76.4KB/s 00:00
[root@Masters keepalived]#
! Configuration File for keepalived
global_defs {
router_id lb02
}
vrrp_instance VI_1 {
state BACKUP
interface eth0
virtual_router_id 51
priority 90
advert_int 1
authentication {
auth_type PASS
auth_pass wangqing
}
virtual_ipaddress {
192.168.141.250
}
}
virtual_server 192.168.141.250 80 {
delay_loop 6
lb_algo rr
lb_kind DR
persistence_timeout 50
protocol TCP
real_server 192.168.141.140 80 {
weight 1
TCP_CHECK {
connect_port 80
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 192.168.141.141 80 {
weight 1
TCP_CHECK {
connect_port 80
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}
[root@DR keepalived]# systemctl restart keepalived.service
查看VIP在哪里
在MASTER上查看
[root@Masters ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
link/ether 00:0c:29:37:59:5d brd ff:ff:ff:ff:ff:ff
inet 192.168.141.140/24 brd 192.168.141.255 scope global noprefixroute ens33
valid_lft forever preferred_lft forever
inet 192.168.141.250/32 scope global ens33
valid_lft forever preferred_lft forever
inet6 fe80::e72c:7b95:803d:fcb0/64 scope link noprefixroute
valid_lft forever preferred_lft forever
[root@Masters ~]#
在SLAVE上查看
这里是不显示VIP的
[root@DR ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
link/ether 00:16:3e:43:a5:ea brd ff:ff:ff:ff:ff:ff
inet 192.168.141.141/24 brd 192.168.141.255 scope global noprefixroute eth0
valid_lft forever preferred_lft forever
inet6 fe80::216:3eff:fe43:a5ea/64 scope link noprefixroute
valid_lft forever preferred_lft forever
[root@DR ~]#
查看VIP转移是否有效
#暂停主keepalived
[root@Masters ~]# systemctl stop keepalived.service
[root@Masters ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
link/ether 00:0c:29:37:59:5d brd ff:ff:ff:ff:ff:ff
inet 192.168.141.140/24 brd 192.168.141.255 scope global noprefixroute ens33
valid_lft forever preferred_lft forever
inet6 fe80::e72c:7b95:803d:fcb0/64 scope link noprefixroute
valid_lft forever preferred_lft forever
[root@Masters ~]#
#查看DR的IP
[root@DR ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
link/ether 00:16:3e:43:a5:ea brd ff:ff:ff:ff:ff:ff
inet 192.168.141.141/24 brd 192.168.141.255 scope global noprefixroute eth0
valid_lft forever preferred_lft forever
inet 192.168.141.250/32 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::216:3eff:fe43:a5ea/64 scope link noprefixroute
valid_lft forever preferred_lft forever
[root@DR ~]#
#暂停nginx然后测试访问
[root@Masters ~]# systemctl stop nginx.service
[root@Masters ~]# ss -anlt
State Recv-Q Send-Q Local Address:Port Peer Address:Port Process
LISTEN 0 128 0.0.0.0:22 0.0.0.0:*
LISTEN 0 128 [::]:22 [::]:*
[root@Masters ~]#
#因为刚才DR设置ip_hash所有都是apache
[root@Masters ~]# curl http://192.168.141.250
apache
[root@Masters ~]# curl http://192.168.141.250
apache
[root@Masters ~]# curl http://192.168.141.250
apache
[root@Masters ~]# curl http://192.168.141.250
apache
#修改DR的ngixn后测试
[root@DR ~]# vim /usr/local/nginx/conf/nginx.conf
upstream webservers {
#ip_hash;
server 192.168.141.143 weight=3;
server 192.168.141.144;
}
[root@DR ~]# systemctl restart nginx.service
#如设置的,访问三次然后出现下一台服务器配置
[root@Masters ~]# curl http://192.168.141.250
apache
[root@Masters ~]# curl http://192.168.141.250
apache
[root@Masters ~]# curl http://192.168.141.250
apache
[root@Masters ~]# curl http://192.168.141.250
nginx