宝塔nginx配置SpringBoot服务集群代理
1、需求:
现有一个springboot服务需要部署成集群,通过nginx负载均衡进行访问,其中这个springboot服务内置了MQTT服务、HTTP服务、TCP服务。
MQTT服务开放了1889端口
HTTP服务开放了8891端口
HTTP服务开放了8893端口
TCP服务开放了8893端口
该服务在宝塔部署启动后开启的端口
2、nginx代理部署后的集群服务
宝塔nginx的nginx.conf位置
/www/server/nginx/conf/nginx.conf
nginx.conf原始内容:
user www www;
worker_processes auto;
error_log /www/wwwlogs/nginx_error.log crit;
pid /www/server/nginx/logs/nginx.pid;
worker_rlimit_nofile 51200;
stream {
log_format tcp_format '$time_local|$remote_addr|$protocol|$status|$bytes_sent|$bytes_received|$session_time|$upstream_addr|$upstream_bytes_sent|$upstream_bytes_received|$upstream_connect_time';
access_log /www/wwwlogs/tcp-access.log tcp_format;
error_log /www/wwwlogs/tcp-error.log;
include /www/server/panel/vhost/nginx/tcp/*.conf;
}
events
{
use epoll;
worker_connections 51200;
multi_accept on;
}
http
{
include mime.types;
#include luawaf.conf;
include proxy.conf;
default_type application/octet-stream;
server_names_hash_bucket_size 512;
client_header_buffer_size 32k;
large_client_header_buffers 4 32k;
client_max_body_size 50m;
sendfile on;
tcp_nopush on;
keepalive_timeout 60;
tcp_nodelay on;
fastcgi_connect_timeout 300;
fastcgi_send_timeout 300;
fastcgi_read_timeout 300;
fastcgi_buffer_size 64k;
fastcgi_buffers 4 64k;
fastcgi_busy_buffers_size 128k;
fastcgi_temp_file_write_size 256k;
fastcgi_intercept_errors on;
gzip on;
gzip_min_length 1k;
gzip_buffers 4 16k;
gzip_http_version 1.1;
gzip_comp_level 2;
gzip_types text/plain application/javascript application/x-javascript text/javascript text/css application/xml;
gzip_vary on;
gzip_proxied expired no-cache no-store private auth;
gzip_disable "MSIE [1-6]\.";
limit_conn_zone $binary_remote_addr zone=perip:10m;
limit_conn_zone $server_name zone=perserver:10m;
server_tokens off;
access_log off;
server
{
listen 888;
server_name phpmyadmin;
index index.html index.htm index.php;
root /www/server/phpmyadmin;
#error_page 404 /404.html;
include enable-php.conf;
location ~ .*\.(gif|jpg|jpeg|png|bmp|swf)$
{
expires 30d;
}
location ~ .*\.(js|css)?$
{
expires 12h;
}
location ~ /\.
{
deny all;
}
access_log /www/wwwlogs/access.log;
}
include /www/server/panel/vhost/nginx/*.conf;
}
文件中的 include /www/server/panel/vhost/nginx/tcp/*.conf;
表明了nginx加载.conf文件的位置,我们要配置集群代理,需要
在/www/server/panel/vhost/nginx/这个目录下面配置多个.conf文件,即可完成需求。
a、修改java_veiplinks-standalone.conf
upstream iotserver {
server 192.168.0.26:8844;
server 192.168.0.27:8844;
server 192.168.0.28:8844;
server 192.168.0.107:8848;
server 192.168.0.104:8848;
}
upstream fileserver {
# server 192.168.0.26:8844; #此处指定文件上传到该服务器上
server 192.168.0.107:8848;
}
server {
listen 80;
server_name iot_server
# gzip config
gzip on;
gzip_min_length 1k;
gzip_comp_level 9;
gzip_types text/plain text/css text/javascript application/json application/javascript application/x-javascript application/xml;
gzip_vary on;
gzip_disable "MSIE [1-6]\.";
root /home/wwwroot/iotlinks-client/dist;
# include /etc/nginx/mime.types;
location / {
index index.html;
}
location ^~/upload/ {
proxy_pass http://fileserver;
proxy_set_header Host $host:$server_port;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
location ^~/veiplinks/file/static {
proxy_pass http://fileserver/file/static;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $host:$server_port;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_send_timeout 30m;
proxy_read_timeout 30m;
client_max_body_size 100m;
}
location ^~/veiplinks/ {
proxy_pass http://iotserver/;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $host:$server_port;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_connect_timeout 1;
proxy_buffering off;
chunked_transfer_encoding off;
proxy_cache off;
proxy_send_timeout 30m;
proxy_read_timeout 30m;
client_max_body_size 100m;
}
}
b、在tcp目录下创建文件ngx_stream_proxy.conf
并编辑
upstream mqtt-cluster {
# hash $remote_addr consistent;
server 192.168.0.26:1889 max_fails=3 fail_timeout=10s;
server 192.168.0.27:1889 max_fails=3 fail_timeout=10s;
server 192.168.0.28:1889 max_fails=3 fail_timeout=10s;
server 192.168.0.107:1889 max_fails=3 fail_timeout=10s;
server 192.168.0.104:1889 max_fails=3 fail_timeout=10s;
}
upstream http-cluster {
# hash $remote_addr consistent;
server 192.168.0.26:8891 max_fails=3 fail_timeout=10s;
server 192.168.0.27:8891 max_fails=3 fail_timeout=10s;
server 192.168.0.28:8891 max_fails=3 fail_timeout=10s;
server 192.168.0.107:8891 max_fails=3 fail_timeout=10s;
server 192.168.0.104:8891 max_fails=3 fail_timeout=10s;
}
upstream tcp-cluster {
# hash $remote_addr consistent;
server 192.168.0.26:8893 max_fails=3 fail_timeout=10s;
server 192.168.0.27:8893 max_fails=3 fail_timeout=10s;
server 192.168.0.28:8893 max_fails=3 fail_timeout=10s;
server 192.168.0.107:8893 max_fails=3 fail_timeout=10s;
server 192.168.0.104:8893 max_fails=3 fail_timeout=10s;
}
upstream rs485-tcp-cluster {
# hash $remote_addr consistent;
server 192.168.0.26:8894 max_fails=3 fail_timeout=10s;
server 192.168.0.27:8894 max_fails=3 fail_timeout=10s;
server 192.168.0.28:8894 max_fails=3 fail_timeout=10s;
server 192.168.0.107:8894 max_fails=3 fail_timeout=10s;
server 192.168.0.104:8894 max_fails=3 fail_timeout=10s;
}
server {
listen 1884;
proxy_pass mqtt-cluster;
proxy_connect_timeout 30s;
proxy_timeout 30s;
}
server {
listen 8841;
proxy_pass http-cluster;
proxy_connect_timeout 30s;
proxy_timeout 30s;
}
server {
listen 8843;
proxy_pass tcp-cluster;
proxy_connect_timeout 30s;
proxy_timeout 30s;
}
server {
listen 8844;
proxy_pass rs485-tcp-cluster;
proxy_connect_timeout 30s;
proxy_timeout 30s;
}