高可用之——Consul+OpenResty实现无reload动态负载均衡

转载请注明出处:https://blog.csdn.net/l1028386804/article/details/100596026

一、安装Nginx

1.安装依赖环境

yum -y install wget gcc-c++ ncurses ncurses-devel cmake make perl bison openssl openssl-devel gcc* libxml2 libxml2-devel curl-devel libjpeg* libpng* freetype* autoconf automake zlib* fiex* libxml* libmcrypt* libtool-ltdl-devel* libaio libaio-devel  bzr libtool

2.安装openssl

wget https://www.openssl.org/source/openssl-1.0.2s.tar.gz
tar -zxvf openssl-1.0.2s.tar.gz
cd /usr/local/src/openssl-1.0.2s
sudo ./config --prefix=/usr/local/openssl-1.0.2s
sudo make
sudo make install

3.安装pcre

wget https://ftp.pcre.org/pub/pcre/pcre-8.43.tar.gz
tar -zxvf pcre-8.43.tar.gz
cd /usr/local/src/pcre-8.43
sudo ./configure --prefix=/usr/local/pcre-8.43
sudo make
sudo make install

4.安装zlib

wget https://sourceforge.net/projects/libpng/files/zlib/1.2.11/zlib-1.2.11.tar.gz
tar -zxvf zlib-1.2.11.tar.gz
cd /usr/local/src/zlib-1.2.11
sudo ./configure --prefix=/usr/local/zlib-1.2.11
sudo make
sudo make install

5.安装lua-devel

sudo yum install lua-devel -y

6.安装LuaJIT

git clone https://github.com/openresty/luajit2
cd luajit2
make && sudo make install

它的安装前缀是/usr/local,我们可以更新动态库

sudo vim /etc/ld.so.conf

添加

/usr/local/lib

使动态库生效

sudo ldconfig

卸载命令

sudo make uninstall

自定义安装方法(我使用的这个方法
也可以自定义安装路径,现在修改到/usr/便于系统直接找到所在的库和目录

make PREFIX=/usr/local/luajit
sudo make install PREFIX=/usr/local/luajit

如果自定义了安装目录,则配置系统环境变量,如下:

sudo vim /etc/profile

JAVA_HOME=/usr/local/jdk1.8.0_212
CLASS_PATH=.:$JAVA_HOME/lib
CONSUL_HOME=/usr/local/consul
CONSUL_TEMPLATE_HOME=/usr/local/consul-template
LUAJIT_HOME=/usr/local/luajit
PATH=$JAVA_HOME/bin:$CONSUL_HOME/bin:$CONSUL_TEMPLATE_HOME/bin:$LUAJIT_HOME/bin:$PATH
export JAVA_HOME CONSUL_HOME CONSUL_TEMPLATE_HOME LUAJIT_HOME PATH
export LUAJIT_LIB=/usr/local/luajit/lib
export LUAJIT_INC=/usr/local/luajit/include/luajit-2.1

source /etc/profile

接下来,输入如下命令创建软链接

sudo ln -s /usr/local/luajit/lib/libluajit-5.1.so.2 /lib64/libluajit-5.1.so.2

7.下载Nginx模块

cd /usr/local/src
wget https://github.com/openresty/lua-nginx-module/archive/v0.10.13.tar.gz
tar -zxvf v0.10.14.tar.gz
wget https://github.com/simplresty/ngx_devel_kit/archive/v0.3.1.tar.gz
tar -zxvf v0.3.1.tar.gz

8.安装Nginx(root账户进行)

wget http://nginx.org/download/nginx-1.17.2.tar.gz
tar -zxvf nginx-1.17.2.tar.gz
cd /usr/local/src/nginx-1.17.2
./configure --prefix=/usr/local/nginx-1.17.2 --with-ld-opt=-Wl,-rpath,/usr/local/luajit/lib --with-openssl=/usr/local/src/openssl-1.0.2s --with-pcre=/usr/local/src/pcre-8.43 --with-zlib=/usr/local/src/zlib-1.2.11 --with-http_realip_module --with-http_stub_status_module --with-http_ssl_module --with-http_flv_module --with-http_gzip_static_module --with-cc-opt=-O3 --with-stream --add-module=/usr/local/src/ngx_devel_kit-0.3.1 --add-module=/usr/local/src/lua-nginx-module-0.10.13  --with-http_ssl_module
make
make install

nginx.conf文件配置如下:

user  hadoop hadoop;
worker_processes  auto;

error_log  logs/error.log;
#error_log  logs/error.log  notice;
#error_log  logs/error.log  info;

#pid        logs/nginx.pid;


events {
	use epoll;
    worker_connections  1024;
}


http {
     include       mime.types;
     default_type application/octet-stream;
 	 client_max_body_size     16m;
     client_body_buffer_size  256k;
     proxy_connect_timeout    1200;
     proxy_read_timeout       1200;
     proxy_send_timeout       6000;
     proxy_buffer_size        32k;
     proxy_buffers            4 64k;
     proxy_busy_buffers_size 128k;
     proxy_temp_file_write_size 128k;
	 log_format common "$remote_addr,$http_ip,$http_mac,$time_local,$status,$request_length,$bytes_sent,$body_bytes_sent,$http_user_agent,$http_referer,$request_method,$request_time,$request_uri,$server_protocol,$request_body,$http_token";
 
	log_format main "$remote_addr,$http_ip,$http_mac,$time_local,$status,$request_length,$bytes_sent,$body_bytes_sent,$http_user_agent,$http_referer,$request_method,$request_time,$request_uri,$server_protocol,$request_body,$http_token";
	 
	access_log  logs/access.log  common;
	access_log syslog:server=192.168.175.100:10000,facility=local7,tag=nginx,severity=info main;
	  map $http_upgrade $connection_upgrade {
		default upgrade;
		''      close;
	 } 

    sendfile        on;
    #tcp_nopush     on;
    #http连接的持续时间
    keepalive_timeout  65;
 
    #gzip压缩设置
    gzip  on;           #开启gzip
    gzip_min_length 1k;  #最小压缩文件大小
    gzip_buffers 4 16k;  #压缩缓冲区
 
    #http的协议版本(1.0/1.1),默认1.1,前端如果是squid2.5请使用1.0
    gzip_http_version 1.1;
 
    #gzip压缩比,1压缩比最小处理速度最快,9压缩比最大但处理速度最慢(传输快但比较消耗cpu)
    gzip_comp_level 2;   
 
    #和http头有关系,加个vary头,给代理服务器用的,有的浏览器支持压缩,有的不支持,所以避免浪费不支持的也压缩,所以根据客户端的HTTP头来判断,是否需要压缩
    gzip_vary on;
 
    #gzip压缩类型,不用添加text/html,否则会有警告信息
    gzip_types text/plain text/javascript text/css application/xmlapplication/x-javascript application/json;
	server {
        listen       80;
        server_name  192.168.175.100;

        #charset koi8-r;

        #access_log  logs/host.access.log  main;

        location / {
            root   html;
            index  index.html index.htm;
        }

        #error_page  404              /404.html;

        # redirect server error pages to the static page /50x.html
        #
        error_page   500 502 503 504  /50x.html;
        location = /50x.html {
            root   html;
        }

        # proxy the PHP scripts to Apache listening on 127.0.0.1:80
        #
        #location ~ \.php$ {
        #    proxy_pass   http://127.0.0.1;
        #}

        # pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
        #
        #location ~ \.php$ {
        #    root           html;
        #    fastcgi_pass   127.0.0.1:9000;
        #    fastcgi_index  index.php;
        #    fastcgi_param  SCRIPT_FILENAME  /scripts$fastcgi_script_name;
        #    include        fastcgi_params;
        #}

        # deny access to .htaccess files, if Apache's document root
        # concurs with nginx's one
        #
        #location ~ /\.ht {
        #    deny  all;
        #}
    }
}

接下来,测试Nginx的配置文件是否正确,如下所示。

-bash-4.1$ sudo /usr/local/nginx-1.17.2/sbin/nginx -t -c /usr/local/nginx-1.17.2/conf/nginx.conf
nginx: the configuration file /usr/local/nginx-1.17.2/conf/nginx.conf syntax is ok
nginx: configuration file /usr/local/nginx-1.17.2/conf/nginx.conf test is successful

二、安装Consul

Consul的安装可以参见博文《高可用之——Consul+Consul-template实现HTTP动态负载均衡

三、安装OpenResty

1.安装准备

sudo yum install pcre-devel openssl-devel gcc curl

2.下载OpenResty

wget https://openresty.org/download/openresty-1.15.8.1.tar.gz

3.解压OpenResty

tar -zxvf openresty-1.15.8.1.tar.gz

4.安装OpenResty

cd openresty-1.15.8.1
./configure 
make && make install

注意:./configure命令默认的安装目录为/usr/local/openresty,也就是默认为如下命令

./configure --prefix=/usr/local/openresty

也可以指定各种选项,如下所示。

./configure --prefix=/usr/local/openresty\
            --with-luajit \
            --without-http_redis2_module \
            --with-http_iconv_module \
            --with-http_postgres_module

5.配置OpenResty环境变量
编辑/etc/profile文件,如下所示。

sudo vim /etc/profile
JAVA_HOME=/usr/local/jdk1.8.0_212
CLASS_PATH=.:$JAVA_HOME/lib
CONSUL_HOME=/usr/local/consul
CONSUL_TEMPLATE_HOME=/usr/local/consul-template
LUAJIT_HOME=/usr/local/luajit
OPENRESTY_HOME=/usr/local/openresty
PATH=$JAVA_HOME/bin:$CONSUL_HOME/bin:$CONSUL_TEMPLATE_HOME/bin:$OPENRESTY_HOME/bin:$LUAJIT_HOME/bin:$PATH
export JAVA_HOME CONSUL_HOME CONSUL_TEMPLATE_HOME LUAJIT_HOME OPENRESTY_HOME PATH
export LUAJIT_LIB=/usr/local/luajit/lib
export LUAJIT_INC=/usr/local/luajit/include/luajit-2.1

输入如下命令是系统环境变量生效。

source /etc/profile

四、安装OpenResty的组件

1.安装luasocket

wget http://files.luaforge.net/releases/luasocket/luasocket/luasocket-2.0.2/luasocket-2.0.2.tar.gz
cd luasocket-2.0.2
make
make install

2.安装lua-cjson

git clone https://github.com/openresty/lua-cjson/
cd lua-cjson/
vim Makefile 
修改的配置项如下:
PREFIX =            /usr/local/luajit
LUA_INCLUDE_DIR ?=   $(PREFIX)/include/luajit-2.1

make
sudo make install

五、修改配置文件

1.在OpenResty安装目录下的site/lualib目录下新建upstreams.lua文件,如下所示。

vim /usr/local/luajit/share/luajit-2.1.0-beta3/upstreams.lua

文件内容如下:

local http = require("socket.http")
local ltn12 = require("ltn12")
local cjson = require "cjson"
local function update_upstreams()
	local resp = {}
	http.request = {
		url = "http://192.168.175.100:8500/v1/catalog/service/binghe_server", sink = ltn12.sink.table(resp)
	}
	resp = cjson.decode(resp)
	
	local upstreams = {{ip="127.0.0.1",port=1111}}
	for i, v in ipairs(resp) do
		upstreams[i + 1] = {ip = v.Address, port = v.ServicePort}
	end
	ngx.shared.upstream_list:set("binghe_server", cjson.encode(upstreams))
end

local function get_upstreams()
	local upstreams_str = ngx.shared.upstream_list:get("binghe_server")
	return upstreams_str
end

local _M = {
	update_upstreams = update_upstreams;
	get_upstreams = get_upstreams;
}
_M._VERSION="0.1"
return _M

2.修改Nginx的配置文件,如下所示。

vim /usr/local/nginx-1.17.2/conf/nginx.conf

新添加的内容如下所示

#这行代码必须加的
lua_package_path "/usr/local/luajit/share/luajit-2.1.0-beta3/?.lua;;";
#存储upstream列表的共享字典
lua_shared_dict upstream_list 10m;

#Nginx Master进程加载配置文件时执行,用于第一次初始化配置
init_by_lua_block {
	local upstreams = require "upstreams"
	upstreams.update_upstreams();
}
#Nginx Worker进程调度,使用ngx.timer.at定时拉取配置
init_worker_by_lua_block {
	local upstreams = require "upstreams";
	local handle = nil;
	handle = function()
		--TODO:控制每次只有一个Worker执行
		upstreams.update_upstreams();
		ngx.timer.at(5, handle);
	end
	ngx.timer.at(5, handle);
	#健康监测模块
	local hc = require "resty.upstream.healthcheck";
	local ok, err = hc.spawn_checker{
		shm = "healthcheck",  -- defined by "lua_shared_dict"
		upstream = "binghe_server", -- defined by "upstream"
		type = "http",

		http_req = "GET /status HTTP/1.0\r\nHost: binghe_server\r\n\r\n",-- raw HTTP request for checking

		interval = 2000,  -- run the check cycle every 2 sec
		timeout = 1000,   -- 1 sec is the timeout for network operations
		fall = 3,  -- # of successive failures before turning a peer down
		rise = 2,  -- # of successive successes before turning a peer up
		valid_statuses = {200, 302},  -- a list valid HTTP status code
		concurrency = 10,  -- concurrency level for test requests
	};
	if not ok then
		ngx.log(ngx.ERR, "failed to spawn health checker: ", err);
		return;
	end
}

#Nginx中配置的upstream服务
upstream binghe_server{
	server 0.0.0.1; #占位Server
	balancer_by_lua_block{
		local balancer = require "ngx.balancer";
		local upstreams = require "upstreams";
		local tmp_upstreams = upstreams.get_upstreams();
		local ip_port = upstreams[math.random(1, table.getn(tmp_upstreams))];
		ngx.log(ngx.ERR, "current:=============", math.random(1, table.getn(tmp_upstreams)));
		balancer.set_current_peer(ip_port.ip, ip_port.port);
	}
}
server {
    listen       81;
    server_name  192.168.175.100;
    charset utf-8;
    location / {
         proxy_pass http://binghe_server;
    }
	
	# status page for all the peers:
	location = /status {
		access_log off;
		allow 127.0.0.1;
		deny all;
		default_type text/plain;
		content_by_lua_block {
			local hc = require "resty.upstream.healthcheck"
			ngx.say("Nginx Worker PID: ", ngx.worker.pid())
			ngx.print(hc.status_page())
		}
	}
}

修改后的nginx.conf文件如下所示。

user  hadoop hadoop;
worker_processes  auto;

error_log  logs/error.log;
#error_log  logs/error.log  notice;
#error_log  logs/error.log  info;

#pid        logs/nginx.pid;


events {
	use epoll;
    worker_connections  1024;
}


http {
     include       mime.types;
     default_type application/octet-stream;
 	 client_max_body_size     16m;
     client_body_buffer_size  256k;
     proxy_connect_timeout    1200;
     proxy_read_timeout       1200;
     proxy_send_timeout       6000;
     proxy_buffer_size        32k;
     proxy_buffers            4 64k;
     proxy_busy_buffers_size 128k;
     proxy_temp_file_write_size 128k;
	 log_format common "$remote_addr,$http_ip,$http_mac,$time_local,$status,$request_length,$bytes_sent,$body_bytes_sent,$http_user_agent,$http_referer,$request_method,$request_time,$request_uri,$server_protocol,$request_body,$http_token";
 
	log_format main "$remote_addr,$http_ip,$http_mac,$time_local,$status,$request_length,$bytes_sent,$body_bytes_sent,$http_user_agent,$http_referer,$request_method,$request_time,$request_uri,$server_protocol,$request_body,$http_token";
	 
	access_log  logs/access.log  common;
	access_log syslog:server=192.168.175.100:10000,facility=local7,tag=nginx,severity=info main;
	  map $http_upgrade $connection_upgrade {
		default upgrade;
		''      close;
	 } 

    sendfile        on;
    #tcp_nopush     on;
    #http连接的持续时间
    keepalive_timeout  65;
 
    #gzip压缩设置
    gzip  on;           #开启gzip
    gzip_min_length 1k;  #最小压缩文件大小
    gzip_buffers 4 16k;  #压缩缓冲区
 
    #http的协议版本(1.0/1.1),默认1.1,前端如果是squid2.5请使用1.0
    gzip_http_version 1.1;
 
    #gzip压缩比,1压缩比最小处理速度最快,9压缩比最大但处理速度最慢(传输快但比较消耗cpu)
    gzip_comp_level 2;   
 
    #和http头有关系,加个vary头,给代理服务器用的,有的浏览器支持压缩,有的不支持,所以避免浪费不支持的也压缩,所以根据客户端的HTTP头来判断,是否需要压缩
    gzip_vary on;
 
    #gzip压缩类型,不用添加text/html,否则会有警告信息
    gzip_types text/plain text/javascript text/css application/xmlapplication/x-javascript application/json;
	
	#这行代码必须加
	lua_package_path "/usr/local/luajit/share/luajit-2.1.0-beta3/?.lua;;";
	#存储upstream列表的共享字典
	lua_shared_dict upstream_list 10m;

	#Nginx Master进程加载配置文件时执行,用于第一次初始化配置
	init_by_lua_block {
		local upstreams = require "upstreams"
		upstreams.update_upstreams();
	}
	#Nginx Worker进程调度,使用ngx.timer.at定时拉取配置
	init_worker_by_lua_block {
		local upstreams = require "upstreams";
		local handle = nil;
		handle = function()
			--TODO:控制每次只有一个Worker执行
			upstreams.update_upstreams();
			ngx.timer.at(5, handle);
		end
		ngx.timer.at(5, handle);
		#健康监测模块
		local hc = require "resty.upstream.healthcheck";
		local ok, err = hc.spawn_checker{
			shm = "healthcheck",  -- defined by "lua_shared_dict"
			upstream = "binghe_server", -- defined by "upstream"
			type = "http",

			http_req = "GET /status HTTP/1.0\r\nHost: binghe_server\r\n\r\n",-- raw HTTP request for checking

			interval = 2000,  -- run the check cycle every 2 sec
			timeout = 1000,   -- 1 sec is the timeout for network operations
			fall = 3,  -- # of successive failures before turning a peer down
			rise = 2,  -- # of successive successes before turning a peer up
			valid_statuses = {200, 302},  -- a list valid HTTP status code
			concurrency = 10,  -- concurrency level for test requests
		};
		if not ok then
			ngx.log(ngx.ERR, "failed to spawn health checker: ", err);
			return;
		end
	}

	#Nginx中配置的upstream服务
	upstream binghe_server{
		server 0.0.0.1; #占位Server
		balancer_by_lua_block{
			local balancer = require "ngx.balancer";
			local upstreams = require "upstreams";
			local tmp_upstreams = upstreams.get_upstreams();
			local ip_port = upstreams[math.random(1, table.getn(tmp_upstreams))];
			ngx.log(ngx.ERR, "current:=============", math.random(1, table.getn(tmp_upstreams)));
			balancer.set_current_peer(ip_port.ip, ip_port.port);
		}
	}
	server {
		listen       81;
		server_name  192.168.175.100;
		charset utf-8;
		location / {
			 proxy_pass http://binghe_server;
		}
		
		# status page for all the peers:
		location = /status {
			access_log off;
			allow 127.0.0.1;
			deny all;
			default_type text/plain;
			content_by_lua_block {
				local hc = require "resty.upstream.healthcheck"
				ngx.say("Nginx Worker PID: ", ngx.worker.pid())
				ngx.print(hc.status_page())
			}
		}
	}

    server {
        listen       80;
        server_name  192.168.175.100;

        #charset koi8-r;

        #access_log  logs/host.access.log  main;

        location / {
            root   html;
            index  index.html index.htm;
        }

        #error_page  404              /404.html;

        # redirect server error pages to the static page /50x.html
        #
        error_page   500 502 503 504  /50x.html;
        location = /50x.html {
            root   html;
        }

        # proxy the PHP scripts to Apache listening on 127.0.0.1:80
        #
        #location ~ \.php$ {
        #    proxy_pass   http://127.0.0.1;
        #}

        # pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
        #
        #location ~ \.php$ {
        #    root           html;
        #    fastcgi_pass   127.0.0.1:9000;
        #    fastcgi_index  index.php;
        #    fastcgi_param  SCRIPT_FILENAME  /scripts$fastcgi_script_name;
        #    include        fastcgi_params;
        #}

        # deny access to .htaccess files, if Apache's document root
        # concurs with nginx's one
        #
        #location ~ /\.ht {
        #    deny  all;
        #}
    }
}

接下来,测试Nginx的配置文件是否正确,如下所示。

-bash-4.1$ sudo /usr/local/nginx-1.17.2/sbin/nginx -t -c /usr/local/nginx-1.17.2/conf/nginx.conf
nginx: the configuration file /usr/local/nginx-1.17.2/conf/nginx.conf syntax is ok
nginx: configuration file /usr/local/nginx-1.17.2/conf/nginx.conf test is successful

如果测试报错,报错信息如下:

nginx: the configuration file /usr/local/nginx-1.17.2/conf/nginx.conf syntax is ok
nginx: [error] init_by_lua error: /usr/local/luajit/share/luajit-2.1.0-beta3/upstreams.lua:9: bad argument #1 to 'decode' (string expected, got table)
stack traceback:
        [C]: in function 'decode'
        /usr/local/luajit/share/luajit-2.1.0-beta3/upstreams.lua:9: in function 'update_upstreams'
        init_by_lua:3: in main chunk
nginx: configuration file /usr/local/nginx-1.17.2/conf/nginx.conf test failed

则将/usr/local/luajit/share/luajit-2.1.0-beta3/upstreams.lua文件中的resp = cjson.decode(resp)一行代码去掉,如下:

local http = require("socket.http")
local ltn12 = require("ltn12")
local cjson = require "cjson"
local function update_upstreams()
	local resp = {}
	http.request = {
		url = "http://192.168.175.100:8500/v1/catalog/service/binghe_server", sink = ltn12.sink.table(resp)
	}
	
	local upstreams = {{ip="127.0.0.1",port=1111}}
	for i, v in ipairs(resp) do
		upstreams[i + 1] = {ip = v.Address, port = v.ServicePort}
	end
	ngx.shared.upstream_list:set("binghe_server", cjson.encode(upstreams))
end

local function get_upstreams()
	local upstreams_str = ngx.shared.upstream_list:get("binghe_server")
	return upstreams_str
end

local _M = {
	update_upstreams = update_upstreams;
	get_upstreams = get_upstreams;
}
_M._VERSION="0.1"
return _M

六、启动服务

1.启动Consul

consul agent -server -bootstrap-expect 1 -data-dir /tmp/consul -bind 0.0.0.0 -client 0.0.0.0 -ui

2.使用OpenResty启动Nginx

sudo /usr/local/openresty/bin/openresty -p /usr/local/nginx-1.17.2 -c /usr/local/nginx-1.17.2/conf/nginx.conf 

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

冰 河

可以吃鸡腿么?

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值