多服务器负载均衡的策略有多种,其中为目前大多数大型网站所采用的就是nginx做前端代理,后端链接多台jsp,php,cgi等网站服务器;
近来一个项目可能会存在较大大访问量(预期日均用户量50万,最大并发数5万),项目开发采用的语言是java,有一个网站管理后台(系统管理员用的,访问量不大),另外有50多个接口向外提供ios,android客户端访问,预设服务器运行环境是tomcat,为了适应大规模并发,需要在tomcat前端增加代理服务器,来做负载均衡,于是就开始采用nginx作为前端代理服务器。
测试时,部署了3台tomcat服务器,1台nginx服务器,其中一台tomcat服务器与nginx在同一机器上;三台机器的操作系统分别为centos6.2(nginx+tomcat)、redhat enterprise 6.0(tomcat+mysql)、window xp(tomcat);
一开始按照官方推荐配置,配置了upstream模块,采用webbench测试并发一个动态的jsp(参数校验,数据库访问),3万个并发被平均的分配至三个tomcat服务器中处理,失败率是0;但当采用真实环境测试时问题来了:登录时总是提示验证码输入不正确,看下三台服务器控制台输出,然来生成验证码和验证验证码的tomcat经常不一致,而又没有配置session共享策略,所以必然会造成无法登录,这个问题通过设置nginx转发登录模块至同一台服务器解决;另外一个非常棘手的问题,那就是客户端文件上传问题(客户端接口中有两个文件上传接口),文件上传一开始也是动态的均衡到每台服务器,但是会发现可能上传的时候使用的是a服务器,下载的时候使用的是b服务器,这就经常的导致客户端无法正常的下载文件。
关于文件上传问题我想详细描述下:
文件的上传只能由tomcat服务器做处理,包括创建缩略图,写入路径至数据库,所以无法采用nginx自带的文件上传模块;
文件需要能被接口直接访问,路径需要写入数据库
考虑到的几种解决方案;
1.采用负载均衡策略,文件上传动态的分配至tomcat服务器处理,数据库中文件路径填写nginx服务器上的文件文件路径,然后每台服务器定时和nginx服务器做文件同步,最终文件下载时使用nginx直接处理;
2.采用负载均衡策略,文件上传动态的分配至tomcat服务器处理,数据库中文件路径填写tomcat服务器文件路径,并加上特殊前缀(唯一对应此台tomcat服务器);文件下载时,nginx配置一个前缀转发规则,不同的前缀转发至具体tomcat服务器;
3.采用负载均衡策略,文件上传动态的分配至tomcat服务器处理,数据库中文件路径填写tomcat服务器上的文件文件路径,然后每台tomcat服务器定时互相文件同步,最终文件下载时使用nginx动态分配至tomcat直接处理;
4.文件的上传指定nginx服务器上的tomcat处理,下载使用nginx处理;
优劣暂时就不写了,理解下,后期再给出吧;最终采用的策略是4(最简单)
另外附上nginx配置文件:
- user root;
- worker_processes 2;
- error_log logs/error.log;
- #error_log logs/error.log notice;
- #error_log logs/error.log info;
- pid logs/nginx.pid;
- events {
- worker_connections 4048;
- }
- http {
- include mime.types;
- default_type application/octet-stream;
- log_format main '$remote_addr - $remote_user [$time_local] "$request" '
- '$status $body_bytes_sent "$http_referer" '
- '"$http_user_agent" "$http_x_forwarded_for"';
- access_log logs/access.log main;
- sendfile on;
- tcp_nopush on;
- #keepalive_timeout 0;
- keepalive_timeout 65;
- #gzip on;
- server {
- listen 80;
- server_name localhost;
- #charset koi8-r;
- access_log logs/host.access.log main;
- # location / {
- # root html;
- # index index.html index.htm;
- # }
- #error_page 404 /404.html;
- # redirect server error pages to the static page /50x.html
- #
- error_page 500 502 503 504 /50x.html;
- location = /50x.html {
- root html;
- }
- # proxy the PHP scripts to Apache listening on 127.0.0.1:80
- #
- #location ~ \.php$ {
- # proxy_pass http://127.0.0.1;
- #}
- # pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
- #
- #location ~ \.php$ {
- # root html;
- # fastcgi_pass 127.0.0.1:9000;
- # fastcgi_index index.php;
- # fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name;
- # include fastcgi_params;
- #}
- # deny access to .htaccess files, if Apache's document root
- # concurs with nginx's one
- #
- #location ~ /\.ht {
- # deny all;
- #}
- }
- upstream 192.168.1.245{
- # ip_hash;
- server 127.0.0.1:8083;# weight=5;
- # server 192.168.1.250:8083 weight=4;
- # server 192.168.1.106:8082 weight=3;
- # server 192.168.0.1:8001;
- }
- upstream 192.168.1.245_nofile{
- # ip_hash;
- server 127.0.0.1:8083 weight=5;
- server 192.168.1.250:8083 weight=4;
- server 192.168.1.106:8082 weight=3;
- # server 192.168.0.1:8001;
- }
- server {
- listen 80;
- server_name 192.168.1.245;
- access_log logs/big.server.access.log main;
- # root /home/ky/apache-tomcat-6.0.33/webapps/ROOT/;
- location / {
- proxy_pass http://192.168.1.245_nofile;
- }
- # location ~ ^/(images|javascript|js|css|flash|media|static)/ {
- # root /home/ky/apache-tomcat-6.0.33/webapps/ROOT/;
- # expires 30d;
- # }
- location ~ \.(jsp|php) {
- proxy_pass http://192.168.1.245;
- }
- location ~ /fckeditor/~ {
- proxy_pass http://192.168.1.245;
- }
- location /user/info/updateIcon.action {
- proxy_pass http://192.168.1.245;
- }
- location /photo/photoUpload.action {
- proxy_pass http://192.168.1.245;
- }
- location /createCode {
- proxy_pass http://192.168.1.245;
- }
- location ~ ^/admin/ {
- proxy_pass http://192.168.1.245;
- }
- location ~\.(htm|html|gif|jpg|jpeg|png|ico|rar|css|js|zip|txt|flv|swf|doc|ppt|xls) {
- root /home/ky/apache-tomcat-6.0.33/webapps/ROOT/;
- }
- }
- # another virtual host using mix of IP-, name-, and port-based configuration
- #
- #server {
- # listen 8000;
- # listen somename:8080;
- # server_name somename alias another.alias;
- # location / {
- # root html;
- # index index.html index.htm;
- # }
- #}
- # HTTPS server
- #
- #server {
- # listen 443;
- # server_name localhost;
- # ssl on;
- # ssl_certificate cert.pem;
- # ssl_certificate_key cert.key;
- # ssl_session_timeout 5m;
- # ssl_protocols SSLv2 SSLv3 TLSv1;
- # ssl_ciphers ALL:!ADH:!EXPORT56:RC4+RSA:+HIGH:+MEDIUM:+LOW:+SSLv2:+EXP;
- # ssl_prefer_server_ciphers on;
- # location / {
- # root html;
- # index index.html index.htm;
- # }
- #}
- }
user root; worker_processes 2; error_log logs/error.log; #error_log logs/error.log notice; #error_log logs/error.log info; pid logs/nginx.pid; events { worker_connections 4048; } http { include mime.types; default_type application/octet-stream; log_format main '$remote_addr - $remote_user [$time_local] "$request" ' '$status $body_bytes_sent "$http_referer" ' '"$http_user_agent" "$http_x_forwarded_for"'; access_log logs/access.log main; sendfile on; tcp_nopush on; #keepalive_timeout 0; keepalive_timeout 65; #gzip on; server { listen 80; server_name localhost; #charset koi8-r; access_log logs/host.access.log main; # location / { # root html; # index index.html index.htm; # } #error_page 404 /404.html; # redirect server error pages to the static page /50x.html # error_page 500 502 503 504 /50x.html; location = /50x.html { root html; } # proxy the PHP scripts to Apache listening on 127.0.0.1:80 # #location ~ \.php$ { # proxy_pass http://127.0.0.1; #} # pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000 # #location ~ \.php$ { # root html; # fastcgi_pass 127.0.0.1:9000; # fastcgi_index index.php; # fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name; # include fastcgi_params; #} # deny access to .htaccess files, if Apache's document root # concurs with nginx's one # #location ~ /\.ht { # deny all; #} } upstream 192.168.1.245{ # ip_hash; server 127.0.0.1:8083;# weight=5; # server 192.168.1.250:8083 weight=4; # server 192.168.1.106:8082 weight=3; # server 192.168.0.1:8001; } upstream 192.168.1.245_nofile{ # ip_hash; server 127.0.0.1:8083 weight=5; server 192.168.1.250:8083 weight=4; server 192.168.1.106:8082 weight=3; # server 192.168.0.1:8001; } server { listen 80; server_name 192.168.1.245; access_log logs/big.server.access.log main; # root /home/ky/apache-tomcat-6.0.33/webapps/ROOT/; location / { proxy_pass http://192.168.1.245_nofile; } # location ~ ^/(images|javascript|js|css|flash|media|static)/ { # root /home/ky/apache-tomcat-6.0.33/webapps/ROOT/; # expires 30d; # } location ~ \.(jsp|php) { proxy_pass http://192.168.1.245; } location ~ /fckeditor/~ { proxy_pass http://192.168.1.245; } location /user/info/updateIcon.action { proxy_pass http://192.168.1.245; } location /photo/photoUpload.action { proxy_pass http://192.168.1.245; } location /createCode { proxy_pass http://192.168.1.245; } location ~ ^/admin/ { proxy_pass http://192.168.1.245; } location ~\.(htm|html|gif|jpg|jpeg|png|ico|rar|css|js|zip|txt|flv|swf|doc|ppt|xls) { root /home/ky/apache-tomcat-6.0.33/webapps/ROOT/; } } # another virtual host using mix of IP-, name-, and port-based configuration # #server { # listen 8000; # listen somename:8080; # server_name somename alias another.alias; # location / { # root html; # index index.html index.htm; # } #} # HTTPS server # #server { # listen 443; # server_name localhost; # ssl on; # ssl_certificate cert.pem; # ssl_certificate_key cert.key; # ssl_session_timeout 5m; # ssl_protocols SSLv2 SSLv3 TLSv1; # ssl_ciphers ALL:!ADH:!EXPORT56:RC4+RSA:+HIGH:+MEDIUM:+LOW:+SSLv2:+EXP; # ssl_prefer_server_ciphers on; # location / { # root html; # index index.html index.htm; # } #} }
其实个人感觉还有一个问题,那就是数据库缓存问题,服务器采用ssh三层框架开发的,hibernate缓存无法共享,可能会导致数据库操作的不一致,下周测试并找到解决方案,再贴出
2012-06-24
____________________________
接上页:
1.这个配置中有个漏洞,那就是没有配置哪些目录是不允许直接访问的,在传统tomcat作为服务器的时候,tomcat本身的机制就禁止直接访问WEB-INF下的内容,但是在nginx中,由于配置了部分内容直接从nginx转发出去,这就导致了WEB-INF目录实际上可能会被暴露出去,一旦暴漏了,那么系统架构,源代码,数据库配置文件,系统配置文件等内容将一并泄露,这对于商业项目来讲会是致命的安全隐患,再次提醒自己以及相关人士,一定要配置不允许访问的目录
新版配置在上一版配置中增加了三行:
- location ~ ^/WEB-INF/ {
- deny all;
- }
location ~ ^/WEB-INF/ {
deny all;
}
2.上页中提到hibernate缓存问题调研:如果配置了hibernate的cache*的话确实会造成nginx返回结果不同步的问题,在此建议如果采用nginx则不要再继续配置hibernate的缓存(其实就内网来讲,网速的影响可以忽略,需要担心的是数据库服务器的负载能否跟上)
当nginx解决了前端的负载均衡的时候,接下来的问题就是如果解决mysql数据的负载(如果配置了100台主机公用一个mysql)以及nginx的负载瓶颈(3万并发)
待续
2012-07-16
————————————————————————
接上页;
这几天在真实环境中使用了nginx,项目需求是这样的:
内网有两台tomcat,但对外只能已一个端口的形式出现,并且运行的服务是网站
所谓网站,那么一定会校验refer,并且jsp网页中都已经使用了base这个参数,页面输出到浏览器后的所有请求的开头将会以这个base参数作为http的开头,由于base 的值是通过java代码自动生成的,生成规则是依据请求来源去获取对应的server以及端口,那么按照上文的配置,将会使得jsp执行完成后,base的值中出现8081等没有开放的端口,这会导致网站的css,js,img等内容无法加载,网页显示出问题,而且后续的网页跳转均出现错误;
经仔细查询nginx的文档,发现了一处关键配置,那就是proxy_set_header Host;
它的作用是对代理的服务,设置http请求的header,使得通过代理转发的url和用户真实的访问nginx的参数一致
例如,这里加入我设置的proxy_set_header Host $host:8082;那么tomcat再接收到通过nginx转发的http请求后,会认为url来源是$host:8082,进一步推进的结果就是在jsp中的base参数的值将被设置为$host:8082;那么jsp页面加载完成后,后续的访问将以$host:8082为url前缀,也就是说访问是首先访问的还是nginx,然后再转发至tomcat,从而使得整个网页的访问变得更加流畅
proxy_set_header其实还可以设置其他的参数,这里没有用到,暂时就不做更多的探究
一份完整的配置代码贴出来:
- user root;
- worker_processes 2;
- error_log /var/log/nginx/error.log warn;
- pid /var/run/nginx.pid;
- events {
- worker_connections 1024;
- }
- http {
- include mime.types;
- default_type application/octet-stream;
- log_format main '$remote_addr - $remote_user [$time_local] "$request" '
- '$status $body_bytes_sent "$http_referer" '
- '"$http_user_agent" "$http_x_forwarded_for"';
- access_log /var/log/nginx/access.log main;
- sendfile on;
- tcp_nopush on;
- #keepalive_timeout 0;
- keepalive_timeout 65;
- #gzip on;
- server {
- listen 80;
- server_name 192.168.1.104;
- }
- upstream tb{
- server 127.0.0.1:8082;
- }
- upstream EdianCompile{
- server 127.0.0.1:8082;
- }
- upstream heihei{
- server 127.0.0.1:8081;
- }
- upstream advertise{
- server 59.174.130.202:8088;
- }
- server {
- listen 80;
- server_name app.emapp.cn;
- access_log /var/log/nginx/big.server.access.log main;
- location / {
- proxy_pass http://heihei;
- proxy_set_header Host $host;
- }
- location ~ ^/tb/ {
- proxy_pass http://tb;
- proxy_set_header Host $host;
- }
- location ~ ^/EdianCompile/ {
- proxy_pass http://EdianCompile;
- proxy_set_header Host $host;
- }
- location ~ ^/phonecharge/ {
- proxy_pass http://heihei;
- proxy_set_header Host $host;
- }
- location ~ ^/yhqserver/ {
- proxy_pass http://heihei;
- proxy_set_header Host $host;
- }
- location ~ ^/eshopapp/ {
- proxy_pass http://heihei;
- proxy_set_header Host $host;
- }
- location ~ ^/Struts2/ {
- proxy_pass http://heihei;
- proxy_set_header Host $host;
- }
- location ~ ^/advertise/ {
- proxy_pass http://advertise;
- proxy_set_header Host $host;
- }
- }
- }
user root;
worker_processes 2;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
tcp_nopush on;
#keepalive_timeout 0;
keepalive_timeout 65;
#gzip on;
server {
listen 80;
server_name 192.168.1.104;
}
upstream tb{
server 127.0.0.1:8082;
}
upstream EdianCompile{
server 127.0.0.1:8082;
}
upstream heihei{
server 127.0.0.1:8081;
}
upstream advertise{
server 59.174.130.202:8088;
}
server {
listen 80;
server_name app.emapp.cn;
access_log /var/log/nginx/big.server.access.log main;
location / {
proxy_pass http://heihei;
proxy_set_header Host $host;
}
location ~ ^/tb/ {
proxy_pass http://tb;
proxy_set_header Host $host;
}
location ~ ^/EdianCompile/ {
proxy_pass http://EdianCompile;
proxy_set_header Host $host;
}
location ~ ^/phonecharge/ {
proxy_pass http://heihei;
proxy_set_header Host $host;
}
location ~ ^/yhqserver/ {
proxy_pass http://heihei;
proxy_set_header Host $host;
}
location ~ ^/eshopapp/ {
proxy_pass http://heihei;
proxy_set_header Host $host;
}
location ~ ^/Struts2/ {
proxy_pass http://heihei;
proxy_set_header Host $host;
}
location ~ ^/advertise/ {
proxy_pass http://advertise;
proxy_set_header Host $host;
}
}
}