使用keepalived做nginx高可用,redis3节点集群部署

1、使用keepalived做nginx高可用。
1.1 环境架构图

在这里插入图片描述

需要准备6台host主机,分别为

2台主机搭建keepalived(下文都简称KA)+HAProxy:10.0.0.133 10.0.0.134

2台NGINX web服务器:10.0.0.136 10.0.0.137

1台Client:10.0.0.138

1台DNS,解析地址10.0.0.100 www.xinqi.org: 10.0.0.135

1.2 分别在133和134上开始编译安装KA.
[root@Rocky8 ~]# yum -y install gcc curl openssl-devel libnl3-devel net-snmp-devel
[root@Rocky8 ~]# wget https://keepalived.org/software/keepalived-2.1.5.tar.gz
[root@Rocky8 ~]# tar xvf keepalived-2.1.5.tar.gz -C /usr/local/src/
[root@Rocky8 keepalived-2.1.5]# cd /usr/local/src/keepalived-2.1.5/
root@Rocky8 keepalived-2.1.5]# ./configure --prefix=/usr/local/keepalived --disable-fwmark
[root@Rocky8 keepalived-2.1.5]# make && make install
[root@Rocky8 keepalived-2.1.5]# cd
[root@Rocky8 ~]# /usr/local/keepalived/sbin/keepalived -v
Keepalived v2.1.5 (07/13,2020)

Copyright(C) 2001-2020 Alexandre Cassen, <acassen@gmail.com>

Built with kernel headers for Linux 4.18.0
Running on Linux 4.18.0-348.el8.0.2.x86_64 #1 SMP Sun Nov 14 00:51:12 UTC 2021

configure options: --prefix=/usr/local/keepalived --disable-fwmark

Config options:  LVS VRRP VRRP_AUTH OLD_CHKSUM_COMPAT FIB_ROUTING

System options:  PIPE2 SIGNALFD INOTIFY_INIT1 VSYSLOG EPOLL_CREATE1 IPV4_DEVCONF IPV6_ADVANCED_API LIBNL3 RTA_ENCAP RTA_EXPIRES RTA_NEWDST RTA_PREF FRA_SUPPRESS_PREFIXLEN FRA_SUPPRESS_IFGROUP FRA_TUN_ID RTAX_CC_ALGO RTAX_QUICKACK RTEXT_FILTER_SKIP_STATS FRA_L3MDEV FRA_UID_RANGE RTAX_FASTOPEN_NO_COOKIE RTA_VIA FRA_OIFNAME FRA_PROTOCOL FRA_IP_PROTO FRA_SPORT_RANGE FRA_DPORT_RANGE RTA_TTL_PROPAGATE IFA_FLAGS IP_MULTICAST_ALL LWTUNNEL_ENCAP_MPLS LWTUNNEL_ENCAP_ILA NET_LINUX_IF_H_COLLISION LIBIPTC_LINUX_NET_IF_H_COLLISION LIBIPVS_NETLINK IPVS_DEST_ATTR_ADDR_FAMILY IPVS_SYNCD_ATTRIBUTES IPVS_64BIT_STATS VRRP_VMAC VRRP_IPVLAN IFLA_LINK_NETNSID CN_PROC SOCK_NONBLOCK SOCK_CLOEXEC O_PATH GLOB_BRACE INET6_ADDR_GEN_MODE VRF SCHED_RESET_ON_FORK

#创建配置文件,编译安装完成后默认不会生成配置文件,需要复制安装包内示例配置文件到指定目录下,否则无法启动服务。
[root@Rocky8 keepalived]# mkdir /etc/keepalived
[root@Rocky8 keepalived]# cp /usr/local/keepalived/etc/keepalived/keepalived.conf /etc/keepalived/
[root@Rocky8 keepalived]# systemctl enable --now keepalived.service 
Created symlink /etc/systemd/system/multi-user.target.wants/keepalived.service → /usr/lib/systemd/system/keepalived.service.

#创建子配置文件
[root@ka1 ~]# cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived

global_defs {
   router_id ka1.xinqi.com  #这里133修改为ka1,134修改为ka2
   vrrp_skip_check_adv_addr
   vrrp_garp_interval 0
   vrrp_gna_interval 0
   vrrp_mcast_group4 230.0.0.1
}
include /etc/keepalived/conf.d/*.conf #指定子配置文件路径

[root@Rocky8 ~]# cat /etc/sysctl.conf #启用ip_forward
net.ipv4.ip_forward = 1

[root@Rocky8 ~]# cat /etc/keepalived/notify.sh 
#编写邮件自动告警脚本
#!/bin/bash

# 配置邮件相关参数
contact='477681339@qq.com'
email_send='477681339@qq.com'
email_passwd='xxxx'  
email_smtp_server='smtp.qq.com'

# 包含操作系统信息
. /etc/os-release

# 输出错误信息
msg_error() {
  echo -e "\033[1;31m$1\033[0m"
}

# 输出信息
msg_info() {
  echo -e "\033[1;32m$1\033[0m"
}

# 输出警告信息
msg_warn() {
  echo -e "\033[1;33m$1\033[0m"
}

# 输出彩色文本
color () {
  RES_COL=60
  MOVE_TO_COL="echo -en \\033[${RES_COL}G"
  SETCOLOR_SUCCESS="echo -en \\033[1;32m"
  SETCOLOR_FAILURE="echo -en \\033[1;31m"
  SETCOLOR_WARNING="echo -en \\033[1;33m"
  SETCOLOR_NORMAL="echo -en \E[0m"
  echo -n "$1" && $MOVE_TO_COL
  echo -n "["
  if [ "$2" = "success" -o "$2" = "0" ] ;then
    ${SETCOLOR_SUCCESS}
    echo -n $" OK "
  elif [ "$2" = "failure" -o "$2" = "1" ] ;then
    ${SETCOLOR_FAILURE}
    echo -n $"FAILED"
  else
    ${SETCOLOR_WARNING}
    echo -n $"WARNING"
  fi
  ${SETCOLOR_NORMAL}
  echo -n "]"
  echo
}

# 安装sendemail工具
install_sendemail () {
  if [[ "$ID" =~ rhel|centos|rocky ]];then
    rpm -q sendemail &> /dev/null || yum install -y sendemail
  elif [ "$ID" = 'ubuntu' ];then
    dpkg -l | grep -q sendemail || { apt update; apt install -y libio-socket-ssl-perl libnet-ssleay-perl sendemail ; }
  else
    color "不支持此操作系统,退出!" 1
    exit
  fi
}

# 发送邮件
send_email () {
  local email_receive="$1"
  local email_subject="$2"
  local email_message="$3"
  sendemail -f $email_send -t $email_receive -u $email_subject -m $email_message -s $email_smtp_server -o message-charset=utf-8 -o tls=yes -xu $email_send -xp $email_passwd
  [ $? -eq 0 ] && color "邮件发送成功!" 0 || color "邮件发送失败!" 1
}

# 通知函数
notify() {
  if [[ "$1" =~ ^(master|backup|fault)$ ]];then
    mailsubject="$(hostname) to be $1, vip floating"
    mailbody="$(date +'%F %T'): vrrp transition, $(hostname) changed to be $1"
    send_email "$contact" "$mailsubject" "$mailbody"
  else
    echo "Usage: $(basename $0) {master|backup|fault}"
    exit 1
  fi
}

# 安装sendemail
install_sendemail

# 调用通知函数,传递命令行参数
notify "$1"


[root@ka1 ~]#chmod a+x /etc/keepalived/notify.sh #添加执行权限
[root@ka1 ~]#vim /etc/keepalived/keepalived.conf #将有邮件告警添加进配置文件中,并重启服务是否会接收到邮件
vrrp_instance VI_1 {
......
virtual_ipaddress {
10.0.0.10 dev eth0 label eth0:1
}
notify_master "/etc/keepalived/notify.sh master"
notify_backup "/etc/keepalived/notify.sh backup"
notify_fault "/etc/keepalived/notify.sh fault"
}
1.3 在2个KA上搭建HAProxy ,并修改配置文件
#分别在133和134上使用以下脚本安装HAProxy
[root@Rocky8 ~]# tree
.
├── anaconda-ks.cfg
├── haproxy-2.6.15.tar.gz
├── install_haproxy.sh
└── lua-5.4.6.tar.gz

0 directories, 4 files
[root@Rocky8 ~]# vim install_haproxy.sh 
#!/bin/bash
#
#******************************************************************************
#Author:            xinqi66
#QQ:                477681339
#Date:              2023-09-08
#FileName:          install_haproxy.sh
#Description:       install haproxy for centos 7/8 & ubuntu 18.04/20.04
#Copyright (C):     2023 All rights reserved
#******************************************************************************

HAPROXY_VERSION=2.6.15
HAPROXY_FILE=haproxy-${HAPROXY_VERSION}.tar.gz
LUA_VERSION=5.4.6
LUA_FILE=lua-${LUA_VERSION}.tar.gz
HAPROXY_INSTALL_DIR=/apps/haproxy

SRC_DIR=/usr/local/src
CWD=`pwd`
CPUS=`lscpu |awk '/^CPU\(s\)/{print $2}'`
LOCAL_IP=$(hostname -I|awk '{print $1}')

STATS_AUTH_USER=admin
STATS_AUTH_PASSWORD=123456

VIP=192.168.10.100
MASTER1=192.168.10.101
MASTER2=192.168.10.102
MASTER3=192.168.10.103

. /etc/os-release

color () {
    RES_COL=60
    MOVE_TO_COL="echo -en \\033[${RES_COL}G"
    SETCOLOR_SUCCESS="echo -en \\033[1;32m"
    SETCOLOR_FAILURE="echo -en \\033[1;31m"
    SETCOLOR_WARNING="echo -en \\033[1;33m"
    SETCOLOR_NORMAL="echo -en \E[0m"
    echo -n "$1" && $MOVE_TO_COL
    echo -n "["
    if [ $2 = "success" -o $2 = "0" ] ;then
        ${SETCOLOR_SUCCESS}
        echo -n $"  OK  "    
    elif [ $2 = "failure" -o $2 = "1"  ] ;then 
        ${SETCOLOR_FAILURE}
        echo -n $"FAILED"
    else
        ${SETCOLOR_WARNING}
        echo -n $"WARNING"
    fi
    ${SETCOLOR_NORMAL}
    echo -n "]"
    echo 
}


check_file (){
    if [ ! -e ${LUA_FILE} ];then
        color "缺少${LUA_FILE}文件!" 1
        exit
    elif [ ! -e ${HAPROXY_FILE} ];then
        color "缺少${HAPROXY_FILE}文件!" 1
        exit
    else
        color "相关文件已准备!" 0
    fi
}

install_haproxy(){
    if [ $ID = "centos" -o $ID = "rocky" ];then
        yum -y install gcc make gcc-c++ glibc glibc-devel pcre pcre-devel openssl openssl-devel systemd-devel libtermcap-devel ncurses-devel libevent-devel readline-devel 
    elif [ $ID = "ubuntu" ];then
        apt update 
        apt -y install gcc make openssl libssl-dev libpcre3 libpcre3-dev zlib1g-dev  libreadline-dev libsystemd-dev 
    else
        color "不支持此操作系统!" 1
    fi
    [ $? -eq 0 ] ||  { color 'HAPROXY 启动失败,退出!' 1; exit; }
    tar xf ${LUA_FILE} -C ${SRC_DIR}
    LUA_DIR=${LUA_FILE%.tar*}
    cd ${SRC_DIR}/${LUA_DIR}
    make all test
    cd ${CWD}
    tar xf ${HAPROXY_FILE} -C ${SRC_DIR}
    HAPROXY_DIR=${HAPROXY_FILE%.tar*}
    cd ${SRC_DIR}/${HAPROXY_DIR}
    make -j ${CPUS} ARCH=x86_64 TARGET=linux-glibc USE_PCRE=1 USE_OPENSSL=1 USE_ZLIB=1 USE_SYSTEMD=1 USE_CPU_AFFINITY=1 USE_LUA=1 LUA_INC=${SRC_DIR}/${LUA_DIR}/src/ LUA_LIB=${SRC_DIR}/${LUA_DIR}/src/ PREFIX=${HAPROXY_INSTALL_DIR}
    make install PREFIX=${HAPROXY_INSTALL_DIR}
    [ $? -eq 0 ] && color "HAPROXY编译安装成功" 0 ||  { color "HAPROXY编译安装失败,退出!" 1;exit; }
    
    [ -L /usr/sbin/haproxy ] || ln -s ${HAPROXY_INSTALL_DIR}/sbin/haproxy /usr/sbin/ &> /dev/null
    [ -d /etc/haproxy ] || mkdir /etc/haproxy &> /dev/null  
    [ -d /var/lib/haproxy/ ] || mkdir -p /var/lib/haproxy/ &> /dev/null
    cat > /etc/haproxy/haproxy.cfg <<-EOF
global
maxconn 100000
stats socket /var/lib/haproxy/haproxy.sock mode 600 level admin
uid 99
gid 99
daemon

pidfile /var/lib/haproxy/haproxy.pid
log 127.0.0.1 local3 info

defaults
option http-keep-alive
option forwardfor
maxconn 100000
mode http
timeout connect 300000ms
timeout client 300000ms
timeout server 300000ms

listen stats
    mode http
    bind 0.0.0.0:9999
    stats enable
    log global
    stats uri /haproxy-status
    stats auth ${STATS_AUTH_USER}:${STATS_AUTH_PASSWORD}

#listen kubernetes-6443
#    bind ${VIP}:6443
#    mode tcp
#    log global
#    server ${MASTER1} ${MASTER1}:6443 check inter 3000 fall 2 rise 5
#    server ${MASTER2} ${MASTER2}:6443 check inter 3000 fall 2 rise 5
#    server ${MASTER3} ${MASTER2}:6443 check inter 3000 fall 2 rise 5

EOF
    #echo "PATH=${HAPROXY_INSTALL_DIR}/sbin:${PATH}" > /etc/profile.d/haproxy.sh
	groupadd -g 99 haproxy
	useradd -u 99 -g haproxy -d /var/lib/haproxy -M -r -s /sbin/nologin haproxy
	cat > /lib/systemd/system/haproxy.service <<-EOF
[Unit]
Description=HAProxy Load Balancer
After=syslog.target network.target

[Service]
ExecStartPre=/usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -c -q
ExecStart=/usr/sbin/haproxy -Ws -f /etc/haproxy/haproxy.cfg -p /var/lib/haproxy/haproxy.pid
ExecReload=/bin/kill -USR2 $MAINPID

[Install]
WantedBy=multi-user.target
EOF
    systemctl daemon-reload
    systemctl enable --now haproxy 
    systemctl is-active haproxy &> /dev/null && color 'HAPROXY安装完成!' 0 ||  { color 'HAPROXY 启动失败,退出!' 1; exit; }
    echo "-------------------------------------------------------------------"
    echo -e "请访问链接: \E[32;1mhttp://${LOCAL_IP}:9999/haproxy-status\E[0m"
    echo -e "用户和密码: \E[32;1m${STATS_AUTH_USER}/${STATS_AUTH_PASSWORD}\E[0m" 
}

main(){
    check_file
    install_haproxy
}

main


[root@Rocky8 ~]# bash install_haproxy.sh #执行脚本
[root@ka1 ~]# cat /etc/haproxy/haproxy.cfg #修改配置文件
global
~~
~~~
~~~~

listen xinqi_http #添加后端2台web的地址,监听80端口。
bind 10.0.0.100:80 
server web1 10.0.0.137:80 check 
server web2 10.0.0.136:80 check

在这里插入图片描述

1.4 创建HAproxy状态检测脚本,分别在133和134上添加。修改keeplived配置文件,配置虚拟VIP并 将检测脚本加入keeplived中。
[root@ka1 ~]# cat /etc/keepalived/check_haproxy.sh
#!/bin/bash
/usr/bin/killall -0 haproxy || systemctl restart haproxy

[root@ka1 ~]# vim /etc/keepalived/conf.d/xinqi.conf  # 10.0.0.133 ka1配置
vrrp_script check_haproxy {  # 定义名为check_haproxy的VRRP脚本
    script "/etc/keepalived/check_haproxy.sh"  # 指定脚本路径
    interval 1  # 设置脚本执行间隔为1秒
    weight -30  # 设置脚本权重为-30
    fall 3  # 如果脚本连续3次失败,则切换状态
    rise 2  # 如果脚本连续2次成功,则切换状态回来
}

vrrp_instance xinqi.org {  # 定义VRRP实例 xinqi.org
    state MASTER  # 设置本节点为MASTER状态
    interface eth0  # 使用eth0接口
    virtual_router_id 66  # 设置虚拟路由器ID为66
    priority 100  # 设置优先级为100
    advert_int 1  # 设置广播间隔为1秒
    authentication {  # 配置认证信息
        auth_type PASS  # 使用密码认证方式
        auth_pass 123456  # 设置认证密码为123456
    }
    virtual_ipaddress {  # 配置虚拟IP地址
        10.0.0.100/24 eth0 label eth0:1  # 设置虚拟IP地址及标签
    }
    notify_master "/etc/keepalived/notify.sh master"  # 在切换到MASTER状态时执行通知脚本
    notify_backup "/etc/keepalived/notify.sh backup"  # 在切换到BACKUP状态时执行通知脚本
    notify_fault "/etc/keepalived/notify.sh fault"  # 在发生故障时执行通知脚本
    track_script {  # 配置脚本跟踪
        check_haproxy  # 跟踪名为check_haproxy的脚本
    }
}

[root@ka2 ~]# cat /etc/keepalived/conf.d/xinqi.conf #10.0.0.134 KA2配置
vrrp_script check_haproxy { 
    script "/etc/keepalived/check_haproxy.sh"
    interval 1
    weight -30
    fall 3
    rise 2
    }

vrrp_instance xinqi.org {
    state BACKUP #设置节点为BACKUP
    interface eth0
    virtual_router_id 66
    priority 80   #这里权重设置80
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 123456
    }
    virtual_ipaddress {
        10.0.0.100/24 eth0 label eth0:1
    }
    notify_master "/etc/keepalived/notify.sh master"
    notify_backup "/etc/keepalived/notify.sh backup"
    notify_fault "/etc/keepalived/notify.sh fault"
    track_script {
        check_haproxy
    }
}

1.5 分别搭建136 137 两个NGINX web站点。
[root@Rocky8 ~]# yum -y install nginx
[root@Rocky8 ~]# cat /etc/nginx/conf.d/www.xinqi.org.conf 
server {
        listen       80 ;
	server_name  www.xinqi66.org;
	location / {
          root         /data/nginx/www/html;

	}
}
[root@Rocky8 ~]# cat /data/nginx/www/html/index.html 
<h1> www.xinqi66.org 136 </h1> #10.0.0.137这里写137以便于后期验证
[root@Rocky8 ~]# systemctl enable --now nginx.service
1.6 分别在133和134上重启ka和HAProxy服务,做以下测试,关闭一台KA验证HA是否正常使用,关闭一台WEB在client上验证访问流量是否到另外一台web。
[root@ka1 ~]# systemctl restart keepalived.service haproxy.service

#Client上访问www.xinqi66.org ,提前配置好DNS解析10.0.0.100 www.xinqi66.org
[root@Rocky8 ~]# curl www.xinqi66.org
<h1> www.xinqi66.org 136 </h1>
[root@Rocky8 ~]# curl www.xinqi66.org
<h1> www.xinqi66.org 137 </h1>
[root@Rocky8 ~]# curl www.xinqi66.org
<h1> www.xinqi66.org 136 </h1>
[root@Rocky8 ~]# curl www.xinqi66.org
<h1> www.xinqi66.org 137 </h1>

#关闭KA1验证
[root@Rocky8 ~]# curl www.xinqi66.org
<h1> www.xinqi66.org 136 </h1>
[root@Rocky8 ~]# curl www.xinqi66.org
<h1> www.xinqi66.org 137 </h1>
[root@Rocky8 ~]# curl www.xinqi66.org
<h1> www.xinqi66.org 136 </h1>
[root@Rocky8 ~]# curl www.xinqi66.org
<h1> www.xinqi66.org 137 </h1>

#关闭136 web1验证
[root@Rocky8 ~]# curl www.xinqi66.org
<h1> www.xinqi66.org 137 </h1>
[root@Rocky8 ~]# curl www.xinqi66.org
<h1> www.xinqi66.org 137 </h1>
[root@Rocky8 ~]# curl www.xinqi66.org
<h1> www.xinqi66.org 137 </h1>
[root@Rocky8 ~]# curl www.xinqi66.org
<h1> www.xinqi66.org 137 </h1>
2、做三个节点的redis集群。
2.1 Redis集群

Redis-Cluster采用无中心结构,每个节点保存数据和整个集群状态,每个节点都和其他所有节点连接。
一组Redis Cluster是由多个Redis实例组成,使用6实例,其中3个为主节点,3个为从节点。一旦有主节点发生故障的时候,Redis Cluster可以选举出对应的从节点成为新的主节点,继续对外服务,从而保证服务的高可用性
注意:
1、当集群内一个Master以及其对应的Slave同时宕机,集群将无法提供服务。
2、当存活的主节点数小于总节点数的一半时,整个集群就无法提供服务了。

原理架构图:

在这里插入图片描述

部署架构图

在这里插入图片描述

#每个Redis 节点采用相同的相同的Redis版本、相同的密码、硬件配置,所有Redis服务器必须没有任何数据,准备六台主机,地址如下:10.0.0.132 10.0.0.133 10.0.0.134 10.0.0.135 10.0.0.136 10.0.0.137

#修改每台redis配置文件
[root@redis-node1 ~]# sed -i.bak -e '/masterauth/a masterauth 123456' -e'/# cluster-enabled yes/a cluster-enabled yes' -e '/# cluster-config-file nodes-6379.conf/a cluster-config-file nodes-6379.conf' -e '/cluster-require- full-coverage yes/c cluster-require-full-coverage no' /apps/redis/etc/redis.conf

[root@redis-node1 ~]# systemctl restart redis

#使用命令创建集群;命令redis-cli的选项 --cluster-replicas 1 表示每个master对应一个slave节点
[root@redis-node1 ~]# redis-cli -a 123456 --cluster create 10.0.0.132:6379 10.0.0.133:6379 10.0.0.134:6379 10.0.0.135:6379 10.0.0.136:6379 10.0.0.137:6379 --cluster-replicas 1

在这里插入图片描述

[root@redis-node1 ~]# Adding replica 10.0.0.136:6379 to 10.0.0.132:6379
[root@redis-node1 ~]# Adding replica 10.0.0.137:6379 to 10.0.0.133:6379
[root@redis-node1 ~]# Adding replica 10.0.0.135:6379 to 10.0.0.134:6379

#验证集群状态
[root@localhost ~]# redis-cli -a 123456 CLUSTER INFO
Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe.
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:6      #节点数
cluster_size:3             #三个集群
cluster_current_epoch:6
cluster_my_epoch:1
cluster_stats_messages_ping_sent:467
cluster_stats_messages_pong_sent:476

redis-cli -c -h 10.0.0.132 -a 123456 --no-auth-warning #以集群方式登录
#输入数据验证跳转
10.0.0.132:6379> set linux love
-> Redirected to slot [12299] located at 10.0.0.134:6379
OK

#关闭node2节点验证故障转移
[root@Rocky8 log]# redis-cli -a 12345
Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe.
Warning: AUTH failed
127.0.0.1:6379> shutdown
(error) NOAUTH Authentication required.
127.0.0.1:6379> auth 123456
OK
127.0.0.1:6379> shutdown
not connected> exit
[root@Rocky8 log]# ss -ntl
State         Recv-Q        Send-Q               Local Address:Port               Peer Address:Port       Process        
LISTEN        0             128                        0.0.0.0:22                      0.0.0.0:*                         
LISTEN        0             128                           [::]:22                         [::]:*                         
[root@Rocky8 log]# redis-cli -a 123456 --cluster info 10.0.0.132:6379
Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe.
Could not connect to Redis at 10.0.0.133:6379: Connection refused
10.0.0.132:6379 (d4c3d819...) -> 3331 keys | 5461 slots | 1 slaves.
10.0.0.137:6379 (6041e516...) -> 3340 keys | 5462 slots | 0 slaves. #137已经升级为主节点
10.0.0.134:6379 (c39ec114...) -> 3330 keys | 5461 slots | 1 slaves.
[OK] 10001 keys in 3 masters.
0.61 keys per slot on average.

#重新启动node2,自动成为从节点
[root@Rocky8 data]# systemctl restart redis
#查看自动生成的配置文件
[root@Rocky8 data]# cat /apps/redis/data/nodes-6379.conf 
d4c3d8193b89fa8eb6c71467833c43d096b1490f 10.0.0.132:6379@16379 master - 0 1690248230198 1 connected 0-5460
6792543de315c7e4a0fe2f2c0809695d77ce345f 10.0.0.135:6379@16379 slave c39ec114c57640888507ce0577cc702458ce8c2d 0 1690248230198 3 connected
c39ec114c57640888507ce0577cc702458ce8c2d 10.0.0.134:6379@16379 master - 0 1690248230198 3 connected 10923-16383
6041e5163e49bb5851cc9e2f13fc154486d01a49 10.0.0.137:6379@16379 master - 0 1690248230199 7 connected 5461-10922
476df290972e404d6a9db92970db854dc35f775f 10.0.0.136:6379@16379 slave d4c3d8193b89fa8eb6c71467833c43d096b1490f 0 1690248230198 1 connected
e02f94e221e5980c57653e9ca40fc278d06831e2 10.0.0.133:6379@16379 myself,slave 6041e5163e49bb5851cc9e2f13fc154486d01a49 0 1690248230193 7 connected
vars currentEpoch 7 lastVoteEpoch 0

2.2 主从容错切换迁移完成!
主从容错切换迁移的优点:

一致性哈希算法的容错性:
假设Node C宕机,可以看到此时对象A、B、D不会受到影响,只有C对象被重定位到Node D。一般的,在一致性Hash算法中,如果一台服务器不可用,则受影响的数据仅仅是此服务器到其环空间中前一台服务器(即沿着逆时针方向行走遇到的第一台服务器)之间数据,其它不会受到影响。简单说,就是C挂了,受到影响的只是B、C之间的数据,并且这些数据会转移到D进行存储。

在这里插入图片描述

2.3 添加redis节点,增加到4主4从。
#准备2台主机,并使用脚本安装好redis数据库

#修改配置文件
sed -i.bak -e '/masterauth/a masterauth 123456' -e'/# cluster-enabled yes/a cluster-enabled yes' -e '/# cluster-config-file nodes-6379.conf/a cluster-config-file nodes-6379.conf' -e '/cluster-require- full-coverage yes/c cluster-require-full-coverage no' /apps/redis/etc/redis.conf

#将一台新的主机10.0.0.68加入集群,以下示例中10.0.0.58可以是任意存在的集群节点,此命令适用于Redis5以上版本
[root@redis-node1 ~]#redis-cli -a 123456 --cluster add-node 10.0.0.38:6379 <当前
任意集群节点>:6379

[root@Rocky8 ~]# redis-cli -a 123456 --cluster add-node 10.0.0.138:6379 10.0.0.132:6379
Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe.
>>> Adding node 10.0.0.138:6379 to cluster 10.0.0.132:6379
>>> Performing Cluster Check (using node 10.0.0.132:6379)
M: d4c3d8193b89fa8eb6c71467833c43d096b1490f 10.0.0.132:6379
   slots:[0-5460] (5461 slots) master
   1 additional replica(s)
M: 6041e5163e49bb5851cc9e2f13fc154486d01a49 10.0.0.137:6379
   slots:[5461-10922] (5462 slots) master
   1 additional replica(s)
M: c39ec114c57640888507ce0577cc702458ce8c2d 10.0.0.134:6379
   slots:[10923-16383] (5461 slots) master
   1 additional replica(s)
S: 6792543de315c7e4a0fe2f2c0809695d77ce345f 10.0.0.135:6379
   slots: (0 slots) slave
   replicates c39ec114c57640888507ce0577cc702458ce8c2d
S: 476df290972e404d6a9db92970db854dc35f775f 10.0.0.136:6379
   slots: (0 slots) slave
   replicates d4c3d8193b89fa8eb6c71467833c43d096b1490f
S: e02f94e221e5980c57653e9ca40fc278d06831e2 10.0.0.133:6379
   slots: (0 slots) slave
   replicates 6041e5163e49bb5851cc9e2f13fc154486d01a49
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
>>> Send CLUSTER MEET to node 10.0.0.138:6379 to make it join the cluster.
[OK] New node added correctly.

#添加成功,成为新的主节点
[root@Rocky8 ~]# redis-cli -a 123456 --cluster info 10.0.0.132:6379
Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe.
10.0.0.132:6379 (d4c3d819...) -> 3331 keys | 5461 slots | 1 slaves.
10.0.0.137:6379 (6041e516...) -> 3340 keys | 5462 slots | 1 slaves.
10.0.0.134:6379 (c39ec114...) -> 3330 keys | 5461 slots | 1 slaves.
10.0.0.138:6379 (c8a59583...) -> 0 keys | 0 slots | 0 slaves.
[OK] 10001 keys in 4 masters.
0.61 keys per slot on average

#开始给新节点分配槽位
redis-cli -a 123456 --cluster reshard <当前任意集群节点>:6379

How many slots do you want to move (from 1 to 16384)?4096 #新分配多少个槽位
=16384/master个数
What is the receiving node ID? d6e2eca6b338b717923f64866bd31d42e52edc98 #新的
master的ID
Please enter all the source node IDs.
Type 'all' to use all the nodes as source nodes for the hash slots.
Type 'done' once you entered all the source nodes IDs.
Source node #1: all #输入all,将哪些源主机的槽位分配给新的节点,all是自动在所有的redis
node选择划分,如果是从redis cluster删除某个主机可以使用此方式将指定主机上的槽位全部移动到别的
redis主机
......
Do you want to proceed with the proposed reshard plan (yes/no)? yes #确认分配
......
Moving slot 12280 from 10.0.0.28:6379 to 10.0.0.68:6379: .
Moving slot 12281 from 10.0.0.28:6379 to 10.0.0.68:6379: .
Moving slot 12282 from 10.0.0.28:6379 to 10.0.0.68:6379:
Moving slot 12283 from 10.0.0.28:6379 to 10.0.0.68:6379: ..
Moving slot 12284 from 10.0.0.28:6379 to 10.0.0.68:6379:
Moving slot 12285 from 10.0.0.28:6379 to 10.0.0.68:6379: .
Moving slot 12286 from 10.0.0.28:6379 to 10.0.0.68:6379:
Moving slot 12287 from 10.0.0.28:6379 to 10.0.0.68:6379: ..
[root@redis-node1 ~]#

#确认slot分配成功
[root@Rocky8 ~]# redis-cli -a 123456 --cluster check 10.0.0.132:6379
Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe.
10.0.0.132:6379 (d4c3d819...) -> 2511 keys | 4096 slots | 1 slaves.
10.0.0.137:6379 (6041e516...) -> 2515 keys | 4096 slots | 1 slaves.
10.0.0.134:6379 (c39ec114...) -> 2501 keys | 4096 slots | 1 slaves.
10.0.0.138:6379 (c8a59583...) -> 2474 keys | 4096 slots | 0 slaves. #分配成功
[OK] 10001 keys in 4 masters.
0.61 keys per slot on average.
>>> Performing Cluster Check (using node 10.0.0.132:6379)
M: d4c3d8193b89fa8eb6c71467833c43d096b1490f 10.0.0.132:6379
   slots:[1365-5460] (4096 slots) master
   1 additional replica(s)
M: 6041e5163e49bb5851cc9e2f13fc154486d01a49 10.0.0.137:6379
   slots:[6827-10922] (4096 slots) master
   1 additional replica(s)
M: c39ec114c57640888507ce0577cc702458ce8c2d 10.0.0.134:6379
   slots:[12288-16383] (4096 slots) master
   1 additional replica(s)
S: 6792543de315c7e4a0fe2f2c0809695d77ce345f 10.0.0.135:6379
   slots: (0 slots) slave
   replicates c39ec114c57640888507ce0577cc702458ce8c2d
S: 476df290972e404d6a9db92970db854dc35f775f 10.0.0.136:6379
   slots: (0 slots) slave
   replicates d4c3d8193b89fa8eb6c71467833c43d096b1490f
M: c8a59583eb2d9d1c0bf3bc8080964543e6afe395 10.0.0.138:6379
   slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master  ##
S: e02f94e221e5980c57653e9ca40fc278d06831e2 10.0.0.133:6379
   slots: (0 slots) slave
   replicates 6041e5163e49bb5851cc9e2f13fc154486d01a49
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.

#为新添加的主节点指定新的从节点,以达到高可用功能
redis-cli -a 123456 --cluster add-node 10.0.0.139:6379 <任意集群节点>:6379 --cluster-slave --cluster-master-id c8a59583eb2d9d1c0bf3bc8080964543e6afe395

[root@Rocky8 ~]# redis-cli -a 123456 --cluster add-node 10.0.0.139:6379 10.0.0.132:6379 --cluster-slave --cluster-master-id c8a59583eb2d9d1c0bf3bc8080964543e6afe395
Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe.
>>> Adding node 10.0.0.139:6379 to cluster 10.0.0.132:6379
>>> Performing Cluster Check (using node 10.0.0.132:6379)
M: d4c3d8193b89fa8eb6c71467833c43d096b1490f 10.0.0.132:6379
   slots:[1365-5460] (4096 slots) master
   1 additional replica(s)
M: 6041e5163e49bb5851cc9e2f13fc154486d01a49 10.0.0.137:6379
   slots:[6827-10922] (4096 slots) master
   1 additional replica(s)
M: c39ec114c57640888507ce0577cc702458ce8c2d 10.0.0.134:6379
   slots:[12288-16383] (4096 slots) master
   1 additional replica(s)
S: 6792543de315c7e4a0fe2f2c0809695d77ce345f 10.0.0.135:6379
   slots: (0 slots) slave
   replicates c39ec114c57640888507ce0577cc702458ce8c2d
S: 476df290972e404d6a9db92970db854dc35f775f 10.0.0.136:6379
   slots: (0 slots) slave
   replicates d4c3d8193b89fa8eb6c71467833c43d096b1490f
M: c8a59583eb2d9d1c0bf3bc8080964543e6afe395 10.0.0.138:6379
   slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master
S: e02f94e221e5980c57653e9ca40fc278d06831e2 10.0.0.133:6379
   slots: (0 slots) slave
   replicates 6041e5163e49bb5851cc9e2f13fc154486d01a49
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
>>> Send CLUSTER MEET to node 10.0.0.139:6379 to make it join the cluster.
Waiting for the cluster to join

>>> Configure node as replica of 10.0.0.138:6379.
[OK] New node added correctly.

#验证是否成功
root@Rocky8 ~]# redis-cli -a 123456 --cluster check 10.0.0.132:6379
Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe.
10.0.0.132:6379 (d4c3d819...) -> 2511 keys | 4096 slots | 1 slaves.
10.0.0.137:6379 (6041e516...) -> 2515 keys | 4096 slots | 1 slaves.
10.0.0.134:6379 (c39ec114...) -> 2501 keys | 4096 slots | 1 slaves.
10.0.0.138:6379 (c8a59583...) -> 2474 keys | 4096 slots | 1 slaves.
[OK] 10001 keys in 4 masters.
0.61 keys per slot on average.
>>> Performing Cluster Check (using node 10.0.0.132:6379)
M: d4c3d8193b89fa8eb6c71467833c43d096b1490f 10.0.0.132:6379
   slots:[1365-5460] (4096 slots) master
   1 additional replica(s)
S: fae7372057f0604e99dc256e30d7a69a5e48d4c2 10.0.0.139:6379
   slots: (0 slots) slave
   replicates c8a59583eb2d9d1c0bf3bc8080964543e6afe395
M: 6041e5163e49bb5851cc9e2f13fc154486d01a49 10.0.0.137:6379
   slots:[6827-10922] (4096 slots) master
   1 additional replica(s)
M: c39ec114c57640888507ce0577cc702458ce8c2d 10.0.0.134:6379
   slots:[12288-16383] (4096 slots) master
   1 additional replica(s)
S: 6792543de315c7e4a0fe2f2c0809695d77ce345f 10.0.0.135:6379
   slots: (0 slots) slave
   replicates c39ec114c57640888507ce0577cc702458ce8c2d
S: 476df290972e404d6a9db92970db854dc35f775f 10.0.0.136:6379
   slots: (0 slots) slave
   replicates d4c3d8193b89fa8eb6c71467833c43d096b1490f
M: c8a59583eb2d9d1c0bf3bc8080964543e6afe395 10.0.0.138:6379
   slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master
   1 additional replica(s)
S: e02f94e221e5980c57653e9ca40fc278d06831e2 10.0.0.133:6379
   slots: (0 slots) slave
   replicates 6041e5163e49bb5851cc9e2f13fc154486d01a49
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
redis-cli -a 123456 --cluster del-node <任意集群节点的IP>:6379 cb028b83f9dc463d732f6e76ca6bbcd469d948a7
#cb028b83f9dc463d732f6e76ca6bbcd469d948a7是删除节点的ID #6.2.5删除节点后不会自动关闭监听和文档不匹配

458ce8c2d
S: 476df290972e404d6a9db92970db854dc35f775f 10.0.0.136:6379
slots: (0 slots) slave
replicates d4c3d8193b89fa8eb6c71467833c43d096b1490f
M: c8a59583eb2d9d1c0bf3bc8080964543e6afe395 10.0.0.138:6379
slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master
1 additional replica(s)
S: e02f94e221e5980c57653e9ca40fc278d06831e2 10.0.0.133:6379
slots: (0 slots) slave
replicates 6041e5163e49bb5851cc9e2f13fc154486d01a49
[OK] All nodes agree about slots configuration.

Check for open slots…
Check slots coverage…
[OK] All 16384 slots covered.


redis-cli -a 123456 --cluster del-node <任意集群节点的IP>:6379 cb028b83f9dc463d732f6e76ca6bbcd469d948a7
#cb028b83f9dc463d732f6e76ca6bbcd469d948a7是删除节点的ID #6.2.5删除节点后不会自动关闭监听和文档不匹配


评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值