一、nginx配置
配置nginx.comf文件
user rootroot; #
启动进程,通常设置成和cpu的数量相等
Worker_processes4;
worker_cpu_affinity 0001 0010 0100 1000;
#worker_cpu_affinity这个参数要结合 worker_processes来一起使用。
#worker_processes指明了nginx要开启的进程数,据官方说法,一般开一个就够了,多开几个,可以减少 机器io带来的影响。一般为当前机器总cpu核心数的1到2倍
pid/usr/local/nginx/nginx.pid;
#单个进程打开文件数
worker_rlimit_nofile52400;
events
{
use epoll;
worker_connections8000;
# 并发总数是 worker_processes 和 worker_connections 的乘积
# 即 max_clients = worker_processes *worker_connections
# 在设置了反向代理的情况下,max_clients =worker_processes * worker_connections / 4 为什么
# 为什么上面反向代理要除以4,应该说是一个经验值
# 根据以上条件,正常情况下的Nginx Server可以应付的最大连接数为:4 * 8000 = 32000
#worker_connections 值的设置跟物理内存大小有关
# 因为并发受IO约束,max_clients的值须小于系统可以打开的最大文件数
# 而系统可以打开的最大文件数和内存大小成正比,一般1GB内存的机器上可以打开的文件数大约是10万左右
# 我们来看看360M内存的VPS可以打开的文件句柄数是多少:
# $ cat /proc/sys/fs/file-max
# 输出 34336
# 32000 < 34336,即并发连接总数小于系统可以打开的文件句柄总数,这样就在操作系统可以承受的范围之内
#所以,worker_connections 的值需根据 worker_processes 进程数目和系统可以打开的最大文件总数进行适当地进行设置
#使得并发总数小于操作系统可以打开的最大文件数目
#实质也就是根据主机的物理CPU和内存进行配置
#当然,理论上的并发总数可能会和实际有所偏差,因为主机还有其他的工作进程需要消耗系统资源。
#ulimit -SHn 65535
}
http
{
include mime.types;
default_type application/octet-stream;
fastcgi_intercept_errorson;
charset utf-8;
server_names_hash_bucket_size128;
client_header_buffer_size4k;
large_client_header_buffers4 32k;
client_max_body_size300m;
sendfileon;
tcp_nopush on;
keepalive_timeout60;
tcp_nodelayon;
client_body_buffer_size 512k;
proxy_connect_timeout 5;
proxy_read_timeout 60;
proxy_send_timeout 5;
proxy_buffer_size 16k;
proxy_buffers 4 64k;
proxy_busy_buffers_size128k;
proxy_temp_file_write_size128k;
proxy_temp_path/usr/local/nginx/temp;
proxy_cache_path/usr/local/nginx/cache levels=1:2 keys_zone=cache_one:200m inactive=1dmax_size=30g;
gzip on;
gzip_min_length 1k;
gzip_buffers 4 16k;
gzip_http_version1.1;
gzip_comp_level2;
gzip_types text/plain application/x-javascripttext/css application/xml;
gzip_varyon;
###2012-12-19change nginx logs
log_format main '$http_x_forwarded_for - $remote_user [$time_local] "$request"'
'$status$body_bytes_sent "$http_referer" '
'"$http_user_agent" $request_time $remote_addr';
upstreamresinServer {
#配置resin的代理端口与srun_id;适合resin3之后版本加入
server ip:端口 weight=1 srun_id=a;
server ip:端口weight=1 srun_id=b;
server ip:端口weight=1 srun_id=c;
jvm_route$cookie_JSESSIONID|sessionid;
#ip_hash;
}
server {
#访问nginx对外端口
listen 9094;
server_name ip;
index login.jsp;
root /opt/resin-pro-3.0.27/webapps/xfzzhjw;
#最后匹配规则
location/
{
proxy_next_upstream http_502 http_504 errortimeout invalid_header;
proxy_set_headerHost $host:9094;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For$proxy_add_x_forwarded_for;
proxy_pass http://resinServer;
expires 3d;
}
#动态页面由各个节点处理
location~ .*\.(jsp|do)?$
{
proxy_redirect off;
proxy_set_header Host $host:9094;
proxy_set_header X-Real-IP$remote_addr;
proxy_set_header X-Forwarded-For$proxy_add_x_forwarded_for;
proxy_pass http://resinServer;
}
#静态转发由nginx缓存
location~ .*\.(html|htm|gif|jpg|jpeg|bmp|png|ico|txt|js|css|jpg)$
{
root/opt/resin-pro-3.0.27/webapps/xfzzhjw/image;
expires 3d;
}
#加入resin 加入状态值
#反馈状态值
location /status {
stub_status on;
access_log off;
}
access_log /usr/local/nginx/logs/access.log main;
error_log /usr/local/nginx/logs/error.log crit;
}
}
二、配置resin
<web-app-default>之后加入,完成resin状态值监控
<servlet-mappingservlet-class='com.caucho.servlets.ResinStatusServlet'>
<url-pattern>/resin-status</url-pattern>
<init enable="read"/>
</servlet-mapping>
<httpserver-id="e1" port="80" />
<httpserver-id="e" port="18803" />
<httpserver-id="f" port="18804"/>
<cluster>
<srunserver-id="e1" port="28805" />
<srunserver-id="e" port="28803" />
<srunserver-id="f" port="28804" />
</cluster>
<ignore-client-disconnect>true</ignore-client-disconnect>
<session-cookie>JSESSIONID</session-cookie>