FastDFS + keepalived + nginx集群搭建

FastDFS + keepalived + nginx集群搭建

安装环境

nginx-1.17.3

fastdfs相关资源地址:https://github.com/happyfish100

libfastcommon-master.zip

fastdfs-master.zip

fastdfs-nginx-module_v1.16.tar.gz

ngx_cache_purge模块资源地址:http://labs.frickle.com/files/ngx_cache_purge-2.3.tar.gz

ngx_cache_purge-2.3.tar.gz(用于清除指定url的缓存)

keepalived

VM虚拟机Centos7-x64:192.168.71.100、192.168.71.101、192.168.71.102、192.168.71.103、192.168.71.104、192.168.71.105、192.168.71.106、192.168.71.107

Xshell5

部署方案

在这里插入图片描述

安装步骤

步骤一:配置负载均衡

192.168.71.100-master和192.168.71.103-backup安装nginx + keepalived,对192.168.71.101、192.168.71.102提供负载均衡,并对外提供统一访问IP

nginx做负载均衡,keepalived保证nginx高可用

nginx安装

参考:https://blog.csdn.net/wjy2460956668/article/details/108050103

nginx配置
vim nginx根路径/conf/nginx.conf

#修改配置:
upstream fastdfs_tracker {
	server 192.168.71.101 weight=1 max_fails=2 fail_timeout=30s;
	server 192.168.71.102 weight=1 max_fails=2 fail_timeout=30s;
}
server {
	listen 80;
	server_name localhost;
	location / {
		root html;
		index index.html index.htm;
	}
	error_page 500 502 503 504 /50x.html;
	location = /50x.html {
		root html;
	}
	#FastDFS Proxy 代理路径设置为 /group
	location ~ /group {
		root html;
		index index.html index.htm;
		proxy_pass http://fastdfs_tracker;
	}
}
keepalived安装
yum install keepalived
keepalived配置
#查看主机网卡
ip a

在这里插入图片描述

#配置keepalived.conf
vim /etc/keepalived/keepalived.conf
global_defs {
	#标识本节点的字符串,通常为hostname
	router_id 100
}
#keepalived会定时执行脚本并对脚本执行结果进行分析,动态调整vrrp_instance的优先级。
#如果脚本执行结果为0,并且weight配置的值大于0,则优先级相应的增加。
#如果脚本执行结果为非0,并且weight配置的值小于0,则优先级相应的减少。
#其他情况,维持原本配置的优先级,即配置文件中priority对应的值
vrrp_script chk_nginx {
	#检测nginx状态的脚本路径
	script "/etc/keepalived/nginx_check.sh"
	#检测时间间隔
	interval 2
	#如果条件成立,权重-20
	weight -20
}
#定义虚拟路由,VI_1为虚拟路由的标识符,自定义名称
vrrp_instance VI_1 {
	#主节点为MASTER,对应的备份节点为BACKUP
	state MASTER 
	#绑定虚拟IP的网络接口,与本机IP地址所在的网络接口相同,我的是ens33
	interface ens33
	#虚拟路由的ID号,同一个集群主备要一致
	#可选IP的最后一段使用,相同的virtual_router_id为一个组,他将决定多播的MAX地址
	#从0到255的任意唯一数字,用于区分vrrpd的多个实例
	virtual_router_id 51
	#本机IP地址
	#绑定vrrpd的默认IP是接口上的主IP。
	#如果您想隐藏vrrpd的位置,请使用这个IP作为src_addr,用于多播或单播vrrp包。
	#(因为它是多播,所以无论使用什么src_addr, vrrpd都将获得应答包)。
	mcast_src_ip 192.168.71.100
	#节点优先级,值范围0-254,MASTER要比BACKUP高
	priority 100
	#优先级高的设置 nopreempt 解决异常恢复后再次抢占的问题
	#当一个高优先级的机器上线时,VRRP通常会抢占一个低优先级的机器。
	#“nopreempt”允许低优先级的机器保持主角色,即使高优先级的机器恢复联机。
	#注意:要使其工作,此条目的初始状态必须是BACKUP。
	nopreempt
	#组播信息发送间隔,两个节点必须一致,默认1s
	advert_int 1
	#设置验证信息,两个节点必须一致
	authentication {  
        auth_type PASS
        auth_pass 1234 ## 真实生产环境下对密码进行匹配
    }
    #将track_script块加入到instance配置块
    track_script {
    	#执行nginx监控的服务
    	chk_nginx
    }
    #虚拟IP池,两个节点设置必须一样
    virtual_ipaddress {
    	192.168.71.10/24 dev eth0 label eth0:2
    }
}
状态检测脚本

编写nginx状态检查脚本/etc/keepalived/nginx_check.hs在keepalived配置中引用。

脚本要求:如果nginx停止运行,尝试启动,如果无法启动则杀死本机的keepalived进程,keepalived将虚拟IP绑定到BACKUP机器上。

vim /etc/keepalived/nginx_check.sh

#内容
#!/bin/bash 
A=`ps -C nginx –no-header |wc -l` 
if [ $A -eq 0 ];then 
    /home/tengine-2.3.2/sbin/nginx 
    sleep 2 
    if [ `ps -C nginx --no-header |wc -l` -eq 0 ];then
        killall keepalived 
    fi 
fi

#权限
chmod +x /etc/keepalived/nginx_check.sh
步骤二:配置Tracker

192.168.71.101、192.168.71.102安装nginx,为Storage存储节点提供负载均衡

nginx安装
nginx配置
vim nginx根路径/conf/nginx.conf

#修改配置
events {
    worker_connections  1024;
}

http {
    include       mime.types;
    default_type  application/octet-stream;

    #log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
    #                  '$status $body_bytes_sent "$http_referer" '
    #                  '"$http_user_agent" "$http_x_forwarded_for"';

    #access_log  logs/access.log  main;
    #access_log  "pipe:rollback logs/access_log interval=1d baknum=7 maxsize=2G"  main;

    sendfile        on;
    #tcp_nopush     on;

    #keepalive_timeout  0;
    keepalive_timeout  65;

    #gzip  on;

	#设置group1的服务器
    upstream fdfs_group1 {
        server 192.168.71.104:80;
        server 192.168.71.105:80;
    }
    #设置group2的服务器
    upstream fdfs_group1 {
        server 192.168.71.106:80;
        server 192.168.71.107:80;
    }

    server {
        listen       80;
        server_name  localhost;

        #charset koi8-r;

        #access_log  logs/host.access.log  main;
        #access_log  "pipe:rollback logs/host.access_log interval=1d baknum=7 maxsize=2G"  main;

        location /group1/M00 {
            proxy_next_upstream http_502 http_504 error timeout invalid_header;
            proxy_cache http-cache;
            proxy_cache_valid  200 304 12h;
            proxy_cache_key $uri$is_args$args;
            proxy_pass http://fdfs_group1;
            expires 30d;
        }
        
        location /group2/M00 {
            proxy_next_upstream http_502 http_504 error timeout invalid_header;
            proxy_cache http-cache;
            proxy_cache_valid  200 304 12h;
            proxy_cache_key $uri$is_args$args;
            proxy_pass http://fdfs_group2;
            expires 30d;
        }
        
        #error_page  404              /404.html;

        # redirect server error pages to the static page /50x.html
        #
        error_page   500 502 503 504  /50x.html;
        location = /50x.html {
            root   html;
        }

        # proxy the PHP scripts to Apache listening on 127.0.0.1:80
        #
        #location ~ \.php$ {
        #    proxy_pass   http://127.0.0.1;
        #}

        # pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
        #
        #location ~ \.php$ {
        #    root           html;
        #    fastcgi_pass   127.0.0.1:9000;
        #    fastcgi_index  index.php;
        #    fastcgi_param  SCRIPT_FILENAME  /scripts$fastcgi_script_name;
        #    include        fastcgi_params;
        #}

        # pass the Dubbo rpc to Dubbo provider server listening on 127.0.0.1:20880
        #
        #location /dubbo {
        #    dubbo_pass_all_headers on;
        #    dubbo_pass_set args $args;
        #    dubbo_pass_set uri $uri;
        #    dubbo_pass_set method $request_method;
        #
        #    dubbo_pass org.apache.dubbo.samples.tengine.DemoService 0.0.0 tengineDubbo dubbo_backend;
        #}

        # deny access to .htaccess files, if Apache's document root
        # concurs with nginx's one
        #
        #location ~ /\.ht {
        #    deny  all;
        #}
    }

    # upstream for Dubbo rpc to Dubbo provider server listening on 127.0.0.1:20880
    #
    #upstream dubbo_backend {
    #    multi 1;
    #    server 127.0.0.1:20880;
    #}

    # another virtual host using mix of IP-, name-, and port-based configuration
    #
    #server {
    #    listen       8000;
    #    listen       somename:8080;
    #    server_name  somename  alias  another.alias;

    #    location / {
    #        root   html;
    #        index  index.html index.htm;
    #    }
    #}


    # HTTPS server
    #
    #server {
    #    listen       443 ssl;
    #    server_name  localhost;

    #    ssl_certificate      cert.pem;
    #    ssl_certificate_key  cert.key;

    #    ssl_session_cache    shared:SSL:1m;
    #    ssl_session_timeout  5m;

    #    ssl_ciphers  HIGH:!aNULL:!MD5;
    #    ssl_prefer_server_ciphers  on;

    #    location / {
    #        root   html;
    #        index  index.html index.htm;
    #    }
    #}

}
FastDFS-Tracker安装、配置
#fastdfs安装参考:
https://blog.csdn.net/wjy2460956668/article/details/107910940
步骤三:配置Storage

192.168.71.104、192.168.71.105、192.168.71.106、192.168.71.107安装nginx + faster-storage

nginx作为FastDFS客户端

nginx安装
nginx配置
events {
    worker_connections  1024;
}

http {
    include       mime.types;
    default_type  application/octet-stream;

    #log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
    #                  '$status $body_bytes_sent "$http_referer" '
    #                  '"$http_user_agent" "$http_x_forwarded_for"';

    #access_log  logs/access.log  main;
    #access_log  "pipe:rollback logs/access_log interval=1d baknum=7 maxsize=2G"  main;

    sendfile        on;
    #tcp_nopush     on;

    #keepalive_timeout  0;
    keepalive_timeout  65;

    #gzip  on;

    server {
    	端口80必须和Storage服务器中的/etc/fdfs/storage.conf配置文件中的http.server_port=80一致。
        listen       80;
        server_name  localhost;

        #charset koi8-r;

        #access_log  logs/host.access.log  main;
        #access_log  "pipe:rollback logs/host.access_log interval=1d baknum=7 maxsize=2G"  main;
        
      	#主要是这里的配置
        location ~ /group([0-9])/M00 {
            ngx_fastdfs_module;
            # add_header Content-Disposition "attachment;filename=$arg_attname";
        }
        
        #error_page  404              /404.html;

        # redirect server error pages to the static page /50x.html
        #
        error_page   500 502 503 504  /50x.html;
        location = /50x.html {
            root   html;
        }

        # proxy the PHP scripts to Apache listening on 127.0.0.1:80
        #
        #location ~ \.php$ {
        #    proxy_pass   http://127.0.0.1;
        #}

        # pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
        #
        #location ~ \.php$ {
        #    root           html;
        #    fastcgi_pass   127.0.0.1:9000;
        #    fastcgi_index  index.php;
        #    fastcgi_param  SCRIPT_FILENAME  /scripts$fastcgi_script_name;
        #    include        fastcgi_params;
        #}

        # pass the Dubbo rpc to Dubbo provider server listening on 127.0.0.1:20880
        #
        #location /dubbo {
        #    dubbo_pass_all_headers on;
        #    dubbo_pass_set args $args;
        #    dubbo_pass_set uri $uri;
        #    dubbo_pass_set method $request_method;
        #
        #    dubbo_pass org.apache.dubbo.samples.tengine.DemoService 0.0.0 tengineDubbo dubbo_backend;
        #}

        # deny access to .htaccess files, if Apache's document root
        # concurs with nginx's one
        #
        #location ~ /\.ht {
        #    deny  all;
        #}
    }

    # upstream for Dubbo rpc to Dubbo provider server listening on 127.0.0.1:20880
    #
    #upstream dubbo_backend {
    #    multi 1;
    #    server 127.0.0.1:20880;
    #}

    # another virtual host using mix of IP-, name-, and port-based configuration
    #
    #server {
    #    listen       8000;
    #    listen       somename:8080;
    #    server_name  somename  alias  another.alias;

    #    location / {
    #        root   html;
    #        index  index.html index.htm;
    #    }
    #}


    # HTTPS server
    #
    #server {
    #    listen       443 ssl;
    #    server_name  localhost;

    #    ssl_certificate      cert.pem;
    #    ssl_certificate_key  cert.key;

    #    ssl_session_cache    shared:SSL:1m;
    #    ssl_session_timeout  5m;

    #    ssl_ciphers  HIGH:!aNULL:!MD5;
    #    ssl_prefer_server_ciphers  on;

    #    location / {
    #        root   html;
    #        index  index.html index.htm;
    #    }
    #}

}
FastDFS-Storage安装、配置
整合Nginx

参考:https://blog.csdn.net/wjy2460956668/article/details/107910940

修改配置文件mod_fastdfs.conf

# connect timeout in seconds
# default value is 30s
connect_timeout=2

# network recv and send timeout in seconds
# default value is 30s
network_timeout=30

# the base path to store log files
base_path=/tmp

# if load FastDFS parameters from tracker server
# since V1.12
# default value is false
load_fdfs_parameters_from_tracker=true

# storage sync file max delay seconds
# same as tracker.conf
# valid only when load_fdfs_parameters_from_tracker is false
# since V1.12
# default value is 86400 seconds (one day)
storage_sync_file_max_delay = 86400

# if use storage ID instead of IP address
# same as tracker.conf
# valid only when load_fdfs_parameters_from_tracker is false
# default value is false
# since V1.13
use_storage_id = false

# specify storage ids filename, can use relative or absolute path
# same as tracker.conf
# valid only when load_fdfs_parameters_from_tracker is false
# since V1.13
storage_ids_filename = storage_ids.conf

# FastDFS tracker_server can ocur more than once, and tracker_server format is
#  "host:port", host can be hostname or ip address
# valid only when load_fdfs_parameters_from_tracker is true

tracker_server=192.168.71.101:22122
tracker_server=192.168.71.102:22122

# the port of the local storage server
# the default value is 23000
storage_server_port=23000

# the group name of the local storage server

## 组名
## 104、105对应group1
## 106、107对应group2
group_name=group1

# if the url / uri including the group name
# set to false when uri like /M00/00/00/xxx
# set to true when uri like ${group_name}/M00/00/00/xxx, such as group1/M00/xxx
# default value is false
url_have_group_name = true

# path(disk or mount point) count, default value is 1
# must same as storage.conf
store_path_count=1

# store_path#, based 0, if store_path0 not exists, it's value is base_path
# the paths must be exist
# must same as storage.conf
store_path0=/home/fastdfs/fastdfs-storage/store
#store_path1=/home/yuqing/fastdfs1

# standard log level as syslog, case insensitive, value list:
### emerg for emergency
### alert
### crit for critical
### error
### warn for warning
### notice
### info
### debug
log_level=info

# set the log filename, such as /usr/local/apache2/logs/mod_fastdfs.log
# empty for output to stderr (apache and nginx error_log file)
log_filename=

# response mode when the file not exist in the local file system
## proxy: get the content from other storage server, then send to client
## redirect: redirect to the original storage server (HTTP Header is Location)
response_mode=proxy

# the NIC alias prefix, such as eth in Linux, you can see it by ifconfig -a
# multi aliases split by comma. empty value means auto set by OS type
# this paramter used to get all ip address of the local host
# default values is empty
if_alias_prefix=

# use "#include" directive to include HTTP config file
# NOTE: #include is an include directive, do NOT remove the # before include
#include http.conf


# if support flv
# default value is false
# since v1.15
flv_support = true

# flv file extension name
# default value is flv
# since v1.15
flv_extension = flv


# set the group count
# set to none zero to support multi-group on this storage server
# set to 0  for single group only
# groups settings section as [group1], [group2], ..., [groupN]
# default value is 0
# since v1.14
group_count = 2

# group settings for group #1
# since v1.14
# when support multi-group on this storage server, uncomment following section
[group1]
group_name=group1
storage_server_port=23000
store_path_count=1
store_path0=/home/fastdfs/fastdfs-storage/store
#store_path1=/home/yuqing/fastdfs1

# group settings for group #2
# since v1.14
# when support multi-group, uncomment following section as neccessary
[group2]
group_name=group2
storage_server_port=23000
store_path_count=1
store_path0=/home/fastdfs/fastdfs-storage/store
#store_path1=/home/yuqing/fastdfs1

遇到的问题

1.负载均衡配置keepalived不能检测到nginx停止运行

在这里插入图片描述

日志提示:/etc/keepalived/check_nginx.sh exited due to signal 15

解决:interval 5      #监控脚本的执行时间要大于advert_int(3秒不行就5秒)

advert_int 1     #心跳检查间隔时间`

2.未设置nginx_check.sh脚本的可执行权限

3.Tracker-Server需要端口22122

4.Storage-Server集群group之间文件不能同步

Storage-Server集群同步数据需要23000端口

5.keepalived:Unable to load ipset library - libipset.so.11: cannot open shared object file: No such file or directory

#确少依赖包
yum install ipset-devel

在这里插入图片描述

6.keepalived:Failed to dynamic link an ipset function - /lib64/libipset.so:undefined symbol: ipset_session_error

未找到解决方案

在这里插入图片描述

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值