Docker(redis集群)


一、搭建3主3从redis集群

redis集群中至少3个主节点

1、准备6台redis节点的数据卷挂载路径

[root@localhost redis]# ll
drwxr-xr-x. 3 systemd-coredump root 92 Nov 13 17:09 redis-node-1
drwxr-xr-x. 3 systemd-coredump root 36 Nov 13 17:09 redis-node-2
drwxr-xr-x. 3 systemd-coredump root 36 Nov 13 17:10 redis-node-3
drwxr-xr-x. 3 systemd-coredump root 36 Nov 13 17:10 redis-node-4
drwxr-xr-x. 3 systemd-coredump root 36 Nov 13 17:11 redis-node-5
drwxr-xr-x. 3 systemd-coredump root 36 Nov 13 17:12 redis-node-6

2、创建6台redis节点挂载的外部配置文件

[root@localhost redis-node-1]# pwd
/redis/redis-node-1
[root@localhost redis-node-1]# ll
-rw-r--r--. 1 root             root  63285 Nov 13 17:02 redis.conf

redis.conf配置文件如下:将每一台的redis节点配置文件的端口号修改对应

[root@localhost redis-node-1]# vim redis.conf
protected-mode no
port 6379
tcp-backlog 511
timeout 0
tcp-keepalive 300
daemonize no
supervised no
pidfile /var/run/redis_6379.pid
loglevel notice
logfile ""
databases 16
always-show-logo yes
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename dump.rdb
dir ./
replica-serve-stale-data yes
replica-read-only yes
repl-diskless-sync-delay 5
repl-disable-tcp-nodelay no
replica-priority 100
lazyfree-lazy-eviction no
lazyfree-lazy-expire no
lazyfree-lazy-server-del no
replica-lazy-flush no
appendonly no
appendfilename "appendonly.aof"
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
aof-use-rdb-preamble yes
lua-time-limit 5000
slowlog-log-slower-than 10000
slowlog-max-len 128
notify-keyspace-events Ex
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
stream-node-max-bytes 4096
stream-node-max-entries 100
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit replica 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
dynamic-hz yes
aof-rewrite-incremental-fsync yes
rdb-save-incremental-fsync yes

3、创建六个redis容器

[root@localhost tomcatData]# docker run -d --name redis-node-1 --net host --privileged=true -v /redis/redis-node-1:/data redis --cluster-enabled yes --appendonly yes --port 6379
7f597842ac768d5d25124f560a1fd07488b29c44b2d7bf35560701ad52157aa6
[root@localhost tomcatData]# docker run -d --name redis-node-2 --net host --privileged=true -v /redis/redis-node-2:/data redis --cluster-enabled yes --appendonly yes --port 6380
72dbd4eef385a6dd521b8167619542b7b75d049dc306ef33c4478537f5ee8153
[root@localhost tomcatData]# docker run -d --name redis-node-3 --net host --privileged=true -v /redis/redis-node-3:/data redis --cluster-enabled yes --appendonly yes --port 6381
b385ca96ea0d29ad7d5dda0f1ab69d2d359e6009587d83500f2c586ef656b319
[root@localhost tomcatData]# docker run -d --name redis-node-4 --net host --privileged=true -v /redis/redis-node-4:/data redis --cluster-enabled yes --appendonly yes --port 6382
808b5e0c9ddb7736e0855644b917da05ad0ec307a4b1eb39b967ccb3edff1ed5
[root@localhost tomcatData]# docker run -d --name redis-node-5 --net host --privileged=true -v /redis/redis-node-5:/data redis --cluster-enabled yes --appendonly yes --port 6383
9a7156a41bc4abe6edb0f45354dc1aa1a812e2c8a667ea53036f66690317ba51
[root@localhost tomcatData]# docker run -d --name redis-node-6 --net host --privileged=true -v /redis/redis-node-6:/data redis --cluster-enabled yes --appendonly yes --port 6384
8016db0a503c2563fe06433d347191c84388d39f7d4cefec069e1e2bfe8ca07f

4、查看docker容器

[root@localhost tomcatData]# docker ps
CONTAINER ID   IMAGE     COMMAND                  CREATED              STATUS              PORTS     NAMES
8016db0a503c   redis     "docker-entrypoint.s…"   3 seconds ago        Up 3 seconds                  redis-node-6
9a7156a41bc4   redis     "docker-entrypoint.s…"   15 seconds ago       Up 14 seconds                 redis-node-5
808b5e0c9ddb   redis     "docker-entrypoint.s…"   27 seconds ago       Up 26 seconds                 redis-node-4
b385ca96ea0d   redis     "docker-entrypoint.s…"   40 seconds ago       Up 39 seconds                 redis-node-3
72dbd4eef385   redis     "docker-entrypoint.s…"   53 seconds ago       Up 53 seconds                 redis-node-2
7f597842ac76   redis     "docker-entrypoint.s…"   About a minute ago   Up About a minute             redis-node-1

5、进入节点1,构建6台节点的集群关系

命令格式: --cluster-replicas 1 表示为每个master创建一个slave节点

redis-cli --cluster create 192.168.109.141:6379 192.168.109.141:6380 192.168.109.141:6381 192.168.109.141:6382 192.168.109.141:6383 192.168.109.141:6384 --cluster-replicas 1
[root@localhost redis]# docker exec -it 7f597842ac76 /bin/bash
root@localhost:/data# redis-cli --cluster create 192.168.109.141:6379 192.168.109.141:6380 192.168.109.141:6381 192.168.109.141:6382 192.168.109.141:6383 192.168.109.141:6384 --cluster-replicas 1
>>> Performing hash slots allocation on 6 nodes...
Master[0] -> Slots 0 - 5460
Master[1] -> Slots 5461 - 10922
Master[2] -> Slots 10923 - 16383
Adding replica 192.168.109.141:6383 to 192.168.109.141:6379
Adding replica 192.168.109.141:6384 to 192.168.109.141:6380
Adding replica 192.168.109.141:6382 to 192.168.109.141:6381
>>> Trying to optimize slaves allocation for anti-affinity
[WARNING] Some slaves are in the same host as their master
M: ac91d14851cf4f35a376a453b452c06a9d065069 192.168.109.141:6379
   slots:[0-5460] (5461 slots) master
M: 5ed4b6716402e263b10efeb19c86e91caeee7623 192.168.109.141:6380
   slots:[5461-10922] (5462 slots) master
M: 1d822dd5b1886b84b6697543471f5916cdaa3665 192.168.109.141:6381
   slots:[10923-16383] (5461 slots) master
S: 933b378ec43d9d016997ff93f16a812723657d15 192.168.109.141:6382
   replicates ac91d14851cf4f35a376a453b452c06a9d065069
S: d43e6129151a03404212826cb48d20eb591c4d8a 192.168.109.141:6383
   replicates 5ed4b6716402e263b10efeb19c86e91caeee7623
S: 98318770a6f56c9a9b3f00b4797d367ea955cc09 192.168.109.141:6384
   replicates 1d822dd5b1886b84b6697543471f5916cdaa3665
Can I set the above configuration? (type 'yes' to accept): yes
>>> Nodes configuration updated
>>> Assign a different config epoch to each node
>>> Sending CLUSTER MEET messages to join the cluster
Waiting for the cluster to join
.
>>> Performing Cluster Check (using node 192.168.109.141:6379)
M: ac91d14851cf4f35a376a453b452c06a9d065069 192.168.109.141:6379
   slots:[0-5460] (5461 slots) master
   1 additional replica(s)
M: 1d822dd5b1886b84b6697543471f5916cdaa3665 192.168.109.141:6381
   slots:[10923-16383] (5461 slots) master
   1 additional replica(s)
S: 933b378ec43d9d016997ff93f16a812723657d15 192.168.109.141:6382
   slots: (0 slots) slave
   replicates ac91d14851cf4f35a376a453b452c06a9d065069
M: 5ed4b6716402e263b10efeb19c86e91caeee7623 192.168.109.141:6380
   slots:[5461-10922] (5462 slots) master
   1 additional replica(s)
S: d43e6129151a03404212826cb48d20eb591c4d8a 192.168.109.141:6383
   slots: (0 slots) slave
   replicates 5ed4b6716402e263b10efeb19c86e91caeee7623
S: 98318770a6f56c9a9b3f00b4797d367ea955cc09 192.168.109.141:6384
   slots: (0 slots) slave
   replicates 1d822dd5b1886b84b6697543471f5916cdaa3665
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
root@localhost:/data#

6、查看主从关系

命令格式:redis-cli --cluster check 【本台redis自己的IP】:【本台redis自己的端口】

root@localhost:/data# redis-cli --cluster check 192.168.109.141:6379
192.168.109.141:6379 (ac91d148...) -> 0 keys | 5461 slots | 1 slaves.
192.168.109.141:6381 (1d822dd5...) -> 0 keys | 5461 slots | 1 slaves.
192.168.109.141:6380 (5ed4b671...) -> 0 keys | 5462 slots | 1 slaves.
[OK] 0 keys in 3 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.109.141:6379)
M: ac91d14851cf4f35a376a453b452c06a9d065069 192.168.109.141:6379
   slots:[0-5460] (5461 slots) master
   1 additional replica(s)
M: 1d822dd5b1886b84b6697543471f5916cdaa3665 192.168.109.141:6381
   slots:[10923-16383] (5461 slots) master
   1 additional replica(s)
S: 933b378ec43d9d016997ff93f16a812723657d15 192.168.109.141:6382
   slots: (0 slots) slave
   replicates ac91d14851cf4f35a376a453b452c06a9d065069
M: 5ed4b6716402e263b10efeb19c86e91caeee7623 192.168.109.141:6380
   slots:[5461-10922] (5462 slots) master
   1 additional replica(s)
S: d43e6129151a03404212826cb48d20eb591c4d8a 192.168.109.141:6383
   slots: (0 slots) slave
   replicates 5ed4b6716402e263b10efeb19c86e91caeee7623
S: 98318770a6f56c9a9b3f00b4797d367ea955cc09 192.168.109.141:6384
   slots: (0 slots) slave
   replicates 1d822dd5b1886b84b6697543471f5916cdaa3665
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.

7、数据读写测试

加参数 -c ,防止路由失效

root@localhost:/data# redis-cli -p 6379
127.0.0.1:6379> get name
(error) MOVED 5798 192.168.109.141:6380
127.0.0.1:6379> set name zhangsan
(error) MOVED 5798 192.168.109.141:6380
127.0.0.1:6379>
root@localhost:/data# redis-cli -p 6379 -c
127.0.0.1:6379> get name
-> Redirected to slot [5798] located at 192.168.109.141:6380
"zhangsan"
192.168.109.141:6380>

8、从节点读数据测试:

情况:redis cluster集群中slave节点能成功复制master节点数据槽数据,但是无法get数据,显示只能到对应的master节点读取
原因:Redis Cluster集群中的从节点,官方默认设置的是不分担读请求的、只作备份和故障转移用,当有请求读向从节点时,会被重定向对应的主节点来处理
解决办法:在get数据之前先使用命令readonly,这个readonly告诉 Redis Cluster 从节点客户端愿意读取可能过时的数据并且对写请求不感兴趣
注意:断开连接后readonly就失效了,再次连接需要重新使用该命令

root@localhost:/data# redis-cli -p 6381
127.0.0.1:6381> get age
(error) MOVED 741 192.168.109.144:6385
127.0.0.1:6381> readonly
OK
127.0.0.1:6381> get age
"50"
127.0.0.1:6381>

二、容错切换迁移

1、情况——集群中某一台主机节点停机后再恢复

1)停节点node1主机

docker stop [容器ID]

2)查看集群状态(从机变主机)

cluster nodes

3)原主机恢复

docker start [容器ID]

4)查看集群状态(node1变从机)

cluster nodes

2、情况——主节点挂掉后再恢复,查看主备数据一致性

3、情况——集群中某一主从都挂掉,读写数据情况

三、主从扩容

1、创建两个容器开启集群模式

[root@localhost redis]# docker run -d --name redis-node-7 --net host --privileged=true -v /redis/redis-node-7:/data redis --cluster-enabled yes --appendonly yes --port 6385
3ee891534940154a539d38717475f4c358c4e48a80c9cef85ca14e4d5410c721
[root@localhost redis]# docker run -d --name redis-node-8 --net host --privileged=true -v /redis/redis-node-8:/data redis --cluster-enabled yes --appendonly yes --port 6386
0561fc04757e90da47c850d198e8eade0925c93099b1214468bf31783cd81c67
[root@localhost redis]# docker ps
CONTAINER ID   IMAGE     COMMAND                  CREATED          STATUS          PORTS     NAMES
0561fc04757e   redis     "docker-entrypoint.s…"   6 seconds ago    Up 2 seconds              redis-node-8
3ee891534940   redis     "docker-entrypoint.s…"   20 seconds ago   Up 19 seconds             redis-node-7
8016db0a503c   redis     "docker-entrypoint.s…"   42 minutes ago   Up 42 minutes             redis-node-6
9a7156a41bc4   redis     "docker-entrypoint.s…"   42 minutes ago   Up 42 minutes             redis-node-5
808b5e0c9ddb   redis     "docker-entrypoint.s…"   42 minutes ago   Up 42 minutes             redis-node-4
b385ca96ea0d   redis     "docker-entrypoint.s…"   42 minutes ago   Up 42 minutes             redis-node-3
72dbd4eef385   redis     "docker-entrypoint.s…"   42 minutes ago   Up 42 minutes             redis-node-2
7f597842ac76   redis     "docker-entrypoint.s…"   43 minutes ago   Up 43 minutes             redis-node-1
[root@localhost redis]#

2、进入node7容器,将node7作为主机加入集群

redis-cli --cluster add-node 自己实际IP地址:6387 自己实际IP地址:6381
6387 就是将要作为master新增节点
6381 就是原来集群节点里面的领路人,相当于6387拜拜6381的码头从而找到组织加入集群

[root@localhost redis]# docker exec -it 3ee891534940 /bin/bash
root@localhost:/data# redis-cli --cluster add-node 192.168.109.141:6385 192.168.109.141:6379
>>> Adding node 192.168.109.141:6385 to cluster 192.168.109.141:6379
>>> Performing Cluster Check (using node 192.168.109.141:6379)
M: ac91d14851cf4f35a376a453b452c06a9d065069 192.168.109.141:6379
   slots:[0-5460] (5461 slots) master
   1 additional replica(s)
M: 1d822dd5b1886b84b6697543471f5916cdaa3665 192.168.109.141:6381
   slots:[10923-16383] (5461 slots) master
   1 additional replica(s)
S: 933b378ec43d9d016997ff93f16a812723657d15 192.168.109.141:6382
   slots: (0 slots) slave
   replicates ac91d14851cf4f35a376a453b452c06a9d065069
M: 5ed4b6716402e263b10efeb19c86e91caeee7623 192.168.109.141:6380
   slots:[5461-10922] (5462 slots) master
   1 additional replica(s)
S: d43e6129151a03404212826cb48d20eb591c4d8a 192.168.109.141:6383
   slots: (0 slots) slave
   replicates 5ed4b6716402e263b10efeb19c86e91caeee7623
S: 98318770a6f56c9a9b3f00b4797d367ea955cc09 192.168.109.141:6384
   slots: (0 slots) slave
   replicates 1d822dd5b1886b84b6697543471f5916cdaa3665
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
>>> Send CLUSTER MEET to node 192.168.109.141:6385 to make it join the cluster.
[OK] New node added correctly.

最后一行中 New node added correctly.表示新节点加入了集群

3、查看集群情况

新节点并没有分配到槽点

root@localhost:/data# redis-cli --cluster check 192.168.109.141:6385
192.168.109.141:6385 (d26431f5...) -> 0 keys | 0 slots | 0 slaves.
192.168.109.141:6381 (1d822dd5...) -> 0 keys | 5461 slots | 1 slaves.
192.168.109.141:6379 (ac91d148...) -> 0 keys | 5461 slots | 1 slaves.
192.168.109.141:6380 (5ed4b671...) -> 1 keys | 5462 slots | 1 slaves.
[OK] 1 keys in 4 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.109.141:6385)
M: d26431f565b881bcb84c4827d8496c51df7683bd 192.168.109.141:6385
   slots: (0 slots) master
M: 1d822dd5b1886b84b6697543471f5916cdaa3665 192.168.109.141:6381
   slots:[10923-16383] (5461 slots) master
   1 additional replica(s)
S: 98318770a6f56c9a9b3f00b4797d367ea955cc09 192.168.109.141:6384
   slots: (0 slots) slave
   replicates 1d822dd5b1886b84b6697543471f5916cdaa3665
M: ac91d14851cf4f35a376a453b452c06a9d065069 192.168.109.141:6379
   slots:[0-5460] (5461 slots) master
   1 additional replica(s)
S: d43e6129151a03404212826cb48d20eb591c4d8a 192.168.109.141:6383
   slots: (0 slots) slave
   replicates 5ed4b6716402e263b10efeb19c86e91caeee7623
M: 5ed4b6716402e263b10efeb19c86e91caeee7623 192.168.109.141:6380
   slots:[5461-10922] (5462 slots) master
   1 additional replica(s)
S: 933b378ec43d9d016997ff93f16a812723657d15 192.168.109.141:6382
   slots: (0 slots) slave
   replicates ac91d14851cf4f35a376a453b452c06a9d065069
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.

4、分配槽号

重新分派槽号
命令:redis-cli --cluster reshard IP地址:端口号
redis-cli --cluster reshard 192.168.111.147:6381
redis-cli --cluster reshard 192.168.109.141:6379

root@localhost:/data# redis-cli --cluster reshard 192.168.109.141:6379
>>> Performing Cluster Check (using node 192.168.109.141:6379)
M: ac91d14851cf4f35a376a453b452c06a9d065069 192.168.109.141:6379
   slots:[0-5460] (5461 slots) master
   1 additional replica(s)
M: 1d822dd5b1886b84b6697543471f5916cdaa3665 192.168.109.141:6381
   slots:[10923-16383] (5461 slots) master
   1 additional replica(s)
S: 933b378ec43d9d016997ff93f16a812723657d15 192.168.109.141:6382
   slots: (0 slots) slave
   replicates ac91d14851cf4f35a376a453b452c06a9d065069
M: d26431f565b881bcb84c4827d8496c51df7683bd 192.168.109.141:6385
   slots: (0 slots) master
M: 5ed4b6716402e263b10efeb19c86e91caeee7623 192.168.109.141:6380
   slots:[5461-10922] (5462 slots) master
   1 additional replica(s)
S: d43e6129151a03404212826cb48d20eb591c4d8a 192.168.109.141:6383
   slots: (0 slots) slave
   replicates 5ed4b6716402e263b10efeb19c86e91caeee7623
S: 98318770a6f56c9a9b3f00b4797d367ea955cc09 192.168.109.141:6384
   slots: (0 slots) slave
   replicates 1d822dd5b1886b84b6697543471f5916cdaa3665
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.

16384除以主节点数量=4096
How many slots do you want to move (from 1 to 16384)? 4096

How many slots do you want to move (from 1 to 16384)? 4096

新加入主节点的ID

What is the receiving node ID? d26431f565b881bcb84c4827d8496c51df7683bd

请输入所有原集群中主节点参与重新分配槽号的ID:
all表示所有
输入多个Id时以done结束

Please enter all the source node IDs.
  Type 'all' to use all the nodes as source nodes for the hash slots.
  Type 'done' once you entered all the source nodes IDs.
Source node #1: all

Ready to move 4096 slots.
  Source nodes:
    M: ac91d14851cf4f35a376a453b452c06a9d065069 192.168.109.141:6379
       slots:[0-5460] (5461 slots) master
       1 additional replica(s)
    M: 1d822dd5b1886b84b6697543471f5916cdaa3665 192.168.109.141:6381
       slots:[10923-16383] (5461 slots) master
       1 additional replica(s)
    M: 5ed4b6716402e263b10efeb19c86e91caeee7623 192.168.109.141:6380
       slots:[5461-10922] (5462 slots) master
       1 additional replica(s)
  Destination node:
    M: d26431f565b881bcb84c4827d8496c51df7683bd 192.168.109.141:6385
       slots: (0 slots) master
  Resharding plan:
  Moving slot 5461 from 5ed4b6716402e263b10efeb19c86e91caeee7623
    Moving slot 5462 from 5ed4b6716402e263b10efeb19c86e91caeee7623
    Moving slot 5463 from 5ed4b6716402e263b10efeb19c86e91caeee7623
    Moving slot 5464 from 5ed4b6716402e263b10efeb19c86e91caeee7623
    Moving slot 5465 from 5ed4b6716402e263b10efeb19c86e91caeee7623
    Moving slot 5466 from 5ed4b6716402e263b10efeb19c86e91caeee7623
    Moving slot 5467 from 5ed4b6716402e263b10efeb19c86e91caeee7623
    Moving slot 5468 from 5ed4b6716402e263b10efeb19c86e91caeee7623

是否要继续执行提议的重新分片计划(是/否)?是的

Do you want to proceed with the proposed reshard plan (yes/no)? yes

5、检查集群状态

为什么6387是3个新的区间,以前的还是连续?
重新分配成本太高,所以前3家各自匀出来一部分,从6379/6380/6381三个旧节点分别匀出1364个坑位给新节点6385

root@localhost:/data# redis-cli --cluster check 192.168.109.141:6385
192.168.109.141:6385 (d26431f5...) -> 1 keys | 4096 slots | 0 slaves.
192.168.109.141:6381 (1d822dd5...) -> 0 keys | 4096 slots | 1 slaves.
192.168.109.141:6379 (ac91d148...) -> 0 keys | 4096 slots | 1 slaves.
192.168.109.141:6380 (5ed4b671...) -> 0 keys | 4096 slots | 1 slaves.
[OK] 1 keys in 4 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.109.141:6385)
M: d26431f565b881bcb84c4827d8496c51df7683bd 192.168.109.141:6385
   slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master
M: 1d822dd5b1886b84b6697543471f5916cdaa3665 192.168.109.141:6381
   slots:[12288-16383] (4096 slots) master
   1 additional replica(s)
S: 98318770a6f56c9a9b3f00b4797d367ea955cc09 192.168.109.141:6384
   slots: (0 slots) slave
   replicates 1d822dd5b1886b84b6697543471f5916cdaa3665
M: ac91d14851cf4f35a376a453b452c06a9d065069 192.168.109.141:6379
   slots:[1365-5460] (4096 slots) master
   1 additional replica(s)
S: d43e6129151a03404212826cb48d20eb591c4d8a 192.168.109.141:6383
   slots: (0 slots) slave
   replicates 5ed4b6716402e263b10efeb19c86e91caeee7623
M: 5ed4b6716402e263b10efeb19c86e91caeee7623 192.168.109.141:6380
   slots:[6827-10922] (4096 slots) master
   1 additional replica(s)
S: 933b378ec43d9d016997ff93f16a812723657d15 192.168.109.141:6382
   slots: (0 slots) slave
   replicates ac91d14851cf4f35a376a453b452c06a9d065069
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
root@localhost:/data#

6、为主节点分配从节点

命令:redis-cli --cluster add-node ip:新slave端口 ip:新master端口 --cluster-slave --cluster-master-id 新主机节点ID

redis-cli --cluster add-node 192.168.109.141:6386 192.168.109.141:6385 --cluster-slave --cluster-master-id d26431f565b881bcb84c4827d8496c51df7683bd

root@localhost:/data# redis-cli --cluster add-node 192.168.109.141:6386 192.168.109.141:6385 --cluster-slave --cluster-master-id d26431f565b881bcb84c4827d8496c51df7683bd
>>> Adding node 192.168.109.141:6386 to cluster 192.168.109.141:6385
>>> Performing Cluster Check (using node 192.168.109.141:6385)
M: d26431f565b881bcb84c4827d8496c51df7683bd 192.168.109.141:6385
   slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master
M: 1d822dd5b1886b84b6697543471f5916cdaa3665 192.168.109.141:6381
   slots:[12288-16383] (4096 slots) master
   1 additional replica(s)
S: 98318770a6f56c9a9b3f00b4797d367ea955cc09 192.168.109.141:6384
   slots: (0 slots) slave
   replicates 1d822dd5b1886b84b6697543471f5916cdaa3665
M: ac91d14851cf4f35a376a453b452c06a9d065069 192.168.109.141:6379
   slots:[1365-5460] (4096 slots) master
   1 additional replica(s)
S: d43e6129151a03404212826cb48d20eb591c4d8a 192.168.109.141:6383
   slots: (0 slots) slave
   replicates 5ed4b6716402e263b10efeb19c86e91caeee7623
M: 5ed4b6716402e263b10efeb19c86e91caeee7623 192.168.109.141:6380
   slots:[6827-10922] (4096 slots) master
   1 additional replica(s)
S: 933b378ec43d9d016997ff93f16a812723657d15 192.168.109.141:6382
   slots: (0 slots) slave
   replicates ac91d14851cf4f35a376a453b452c06a9d065069
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
>>> Send CLUSTER MEET to node 192.168.109.141:6386 to make it join the cluster.
Waiting for the cluster to join

>>> Configure node as replica of 192.168.109.141:6385.
[OK] New node added correctly.

7、查看集群状态

redis-cli --cluster check 192.168.109.141:6385

root@localhost:/data# redis-cli --cluster check 192.168.109.141:6385
192.168.109.141:6385 (d26431f5...) -> 1 keys | 4096 slots | 1 slaves.
192.168.109.141:6381 (1d822dd5...) -> 0 keys | 4096 slots | 1 slaves.
192.168.109.141:6379 (ac91d148...) -> 0 keys | 4096 slots | 1 slaves.
192.168.109.141:6380 (5ed4b671...) -> 0 keys | 4096 slots | 1 slaves.
[OK] 1 keys in 4 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.109.141:6385)
M: d26431f565b881bcb84c4827d8496c51df7683bd 192.168.109.141:6385
   slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master
   1 additional replica(s)
S: e1653a89863a31a8f60c0e7562e9297b8492e490 192.168.109.141:6386
   slots: (0 slots) slave
   replicates d26431f565b881bcb84c4827d8496c51df7683bd
M: 1d822dd5b1886b84b6697543471f5916cdaa3665 192.168.109.141:6381
   slots:[12288-16383] (4096 slots) master
   1 additional replica(s)
S: 98318770a6f56c9a9b3f00b4797d367ea955cc09 192.168.109.141:6384
   slots: (0 slots) slave
   replicates 1d822dd5b1886b84b6697543471f5916cdaa3665
M: ac91d14851cf4f35a376a453b452c06a9d065069 192.168.109.141:6379
   slots:[1365-5460] (4096 slots) master
   1 additional replica(s)
S: d43e6129151a03404212826cb48d20eb591c4d8a 192.168.109.141:6383
   slots: (0 slots) slave
   replicates 5ed4b6716402e263b10efeb19c86e91caeee7623
M: 5ed4b6716402e263b10efeb19c86e91caeee7623 192.168.109.141:6380
   slots:[6827-10922] (4096 slots) master
   1 additional replica(s)
S: 933b378ec43d9d016997ff93f16a812723657d15 192.168.109.141:6382
   slots: (0 slots) slave
   replicates ac91d14851cf4f35a376a453b452c06a9d065069
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
root@localhost:/data#

四、主从缩容

1、查看集群状态

root@localhost:/data# redis-cli --cluster check 192.168.109.141:6385
192.168.109.141:6385 (d26431f5...) -> 1 keys | 4096 slots | 1 slaves.
192.168.109.141:6381 (1d822dd5...) -> 0 keys | 4096 slots | 1 slaves.
192.168.109.141:6379 (ac91d148...) -> 0 keys | 4096 slots | 1 slaves.
192.168.109.141:6380 (5ed4b671...) -> 0 keys | 4096 slots | 1 slaves.
[OK] 1 keys in 4 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.109.141:6385)
M: d26431f565b881bcb84c4827d8496c51df7683bd 192.168.109.141:6385
   slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master
   1 additional replica(s)
S: e1653a89863a31a8f60c0e7562e9297b8492e490 192.168.109.141:6386
   slots: (0 slots) slave
   replicates d26431f565b881bcb84c4827d8496c51df7683bd
M: 1d822dd5b1886b84b6697543471f5916cdaa3665 192.168.109.141:6381
   slots:[12288-16383] (4096 slots) master
   1 additional replica(s)
S: 98318770a6f56c9a9b3f00b4797d367ea955cc09 192.168.109.141:6384
   slots: (0 slots) slave
   replicates 1d822dd5b1886b84b6697543471f5916cdaa3665
M: ac91d14851cf4f35a376a453b452c06a9d065069 192.168.109.141:6379
   slots:[1365-5460] (4096 slots) master
   1 additional replica(s)
S: d43e6129151a03404212826cb48d20eb591c4d8a 192.168.109.141:6383
   slots: (0 slots) slave
   replicates 5ed4b6716402e263b10efeb19c86e91caeee7623
M: 5ed4b6716402e263b10efeb19c86e91caeee7623 192.168.109.141:6380
   slots:[6827-10922] (4096 slots) master
   1 additional replica(s)
S: 933b378ec43d9d016997ff93f16a812723657d15 192.168.109.141:6382
   slots: (0 slots) slave
   replicates ac91d14851cf4f35a376a453b452c06a9d065069
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
root@localhost:/data#

2、将6386从节点移除集群

命令:redis-cli --cluster del-node ip:从机端口 从机6386节点ID

 root@localhost:/data# redis-cli --cluster del-node 192.168.109.141:6386 e1653a89863a31a8f60c0e7562e9297b8492e490
>>> Removing node e1653a89863a31a8f60c0e7562e9297b8492e490 from cluster 192.168.109.141:6386
>>> Sending CLUSTER FORGET messages to the cluster...
>>> Sending CLUSTER RESET SOFT to the deleted node.
root@localhost:/data#

检查集群情况,发现6386已经没有了

root@localhost:/data# redis-cli --cluster check 192.168.109.141:6385
192.168.109.141:6385 (d26431f5...) -> 1 keys | 4096 slots | 0 slaves.
192.168.109.141:6381 (1d822dd5...) -> 0 keys | 4096 slots | 1 slaves.
192.168.109.141:6379 (ac91d148...) -> 0 keys | 4096 slots | 1 slaves.
192.168.109.141:6380 (5ed4b671...) -> 0 keys | 4096 slots | 1 slaves.
[OK] 1 keys in 4 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.109.141:6385)
M: d26431f565b881bcb84c4827d8496c51df7683bd 192.168.109.141:6385
   slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master
M: 1d822dd5b1886b84b6697543471f5916cdaa3665 192.168.109.141:6381
   slots:[12288-16383] (4096 slots) master
   1 additional replica(s)
S: 98318770a6f56c9a9b3f00b4797d367ea955cc09 192.168.109.141:6384
   slots: (0 slots) slave
   replicates 1d822dd5b1886b84b6697543471f5916cdaa3665
M: ac91d14851cf4f35a376a453b452c06a9d065069 192.168.109.141:6379
   slots:[1365-5460] (4096 slots) master
   1 additional replica(s)
S: d43e6129151a03404212826cb48d20eb591c4d8a 192.168.109.141:6383
   slots: (0 slots) slave
   replicates 5ed4b6716402e263b10efeb19c86e91caeee7623
M: 5ed4b6716402e263b10efeb19c86e91caeee7623 192.168.109.141:6380
   slots:[6827-10922] (4096 slots) master
   1 additional replica(s)
S: 933b378ec43d9d016997ff93f16a812723657d15 192.168.109.141:6382
   slots: (0 slots) slave
   replicates ac91d14851cf4f35a376a453b452c06a9d065069
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
root@localhost:/data#

3、将6385主节点的槽号移除,重新分配到其他主节点

1)移动2096个槽位到6379

root@localhost:/data# redis-cli --cluster reshard 192.168.109.141:6380
>>> Performing Cluster Check (using node 192.168.109.141:6380)
M: 5ed4b6716402e263b10efeb19c86e91caeee7623 192.168.109.141:6380
   slots:[6827-10922] (4096 slots) master
   1 additional replica(s)
M: d26431f565b881bcb84c4827d8496c51df7683bd 192.168.109.141:6385
   slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master
M: ac91d14851cf4f35a376a453b452c06a9d065069 192.168.109.141:6379
   slots:[1365-5460] (4096 slots) master
   1 additional replica(s)
M: 1d822dd5b1886b84b6697543471f5916cdaa3665 192.168.109.141:6381
   slots:[12288-16383] (4096 slots) master
   1 additional replica(s)
S: 98318770a6f56c9a9b3f00b4797d367ea955cc09 192.168.109.141:6384
   slots: (0 slots) slave
   replicates 1d822dd5b1886b84b6697543471f5916cdaa3665
S: 933b378ec43d9d016997ff93f16a812723657d15 192.168.109.141:6382
   slots: (0 slots) slave
   replicates ac91d14851cf4f35a376a453b452c06a9d065069
S: d43e6129151a03404212826cb48d20eb591c4d8a 192.168.109.141:6383
   slots: (0 slots) slave
   replicates 5ed4b6716402e263b10efeb19c86e91caeee7623
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.

移动多少槽位

How many slots do you want to move (from 1 to 16384)? 2096

接收槽位的主节点ID,这里我用的6379节点在集群中的ID

What is the receiving node ID? ac91d14851cf4f35a376a453b452c06a9d065069

从哪个主节点移出槽位,这里我用6385节点在集群中的ID

Please enter all the source node IDs.
  Type 'all' to use all the nodes as source nodes for the hash slots.
  Type 'done' once you entered all the source nodes IDs.
Source node #1: d26431f565b881bcb84c4827d8496c51df7683bd
Source node #2: done

是否按照以上规划分配:是

    Moving slot 6189 from d26431f565b881bcb84c4827d8496c51df7683bd
    Moving slot 6190 from d26431f565b881bcb84c4827d8496c51df7683bd
    Moving slot 6191 from d26431f565b881bcb84c4827d8496c51df7683bd
Do you want to proceed with the proposed reshard plan (yes/no)? yes
Moving slot 0 from 192.168.109.141:6385 to 192.168.109.141:6379:
Moving slot 1 from 192.168.109.141:6385 to 192.168.109.141:6379:
Moving slot 2 from 192.168.109.141:6385 to 192.168.109.141:6379:

检查集群状态:
发现6379多个2096个槽位

root@localhost:/data# redis-cli --cluster check 192.168.109.141:6385
192.168.109.141:6385 (d26431f5...) -> 0 keys | 2000 slots | 0 slaves.
192.168.109.141:6381 (1d822dd5...) -> 0 keys | 4096 slots | 1 slaves.
192.168.109.141:6379 (ac91d148...) -> 1 keys | 6192 slots | 1 slaves.
192.168.109.141:6380 (5ed4b671...) -> 0 keys | 4096 slots | 1 slaves.
[OK] 1 keys in 4 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.109.141:6385)
M: d26431f565b881bcb84c4827d8496c51df7683bd 192.168.109.141:6385
   slots:[6192-6826],[10923-12287] (2000 slots) master
M: 1d822dd5b1886b84b6697543471f5916cdaa3665 192.168.109.141:6381
   slots:[12288-16383] (4096 slots) master
   1 additional replica(s)
S: 98318770a6f56c9a9b3f00b4797d367ea955cc09 192.168.109.141:6384
   slots: (0 slots) slave
   replicates 1d822dd5b1886b84b6697543471f5916cdaa3665
M: ac91d14851cf4f35a376a453b452c06a9d065069 192.168.109.141:6379
   slots:[0-6191] (6192 slots) master
   1 additional replica(s)
S: d43e6129151a03404212826cb48d20eb591c4d8a 192.168.109.141:6383
   slots: (0 slots) slave
   replicates 5ed4b6716402e263b10efeb19c86e91caeee7623
M: 5ed4b6716402e263b10efeb19c86e91caeee7623 192.168.109.141:6380
   slots:[6827-10922] (4096 slots) master
   1 additional replica(s)
S: 933b378ec43d9d016997ff93f16a812723657d15 192.168.109.141:6382
   slots: (0 slots) slave
   replicates ac91d14851cf4f35a376a453b452c06a9d065069
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.

2)移动2000个槽位到6380

同上

4、检查集群情况

6385节点已经没有槽位了

root@localhost:/data# redis-cli --cluster check 192.168.109.141:6385
192.168.109.141:6385 (d26431f5...) -> 0 keys | 0 slots | 0 slaves.
192.168.109.141:6381 (1d822dd5...) -> 0 keys | 4096 slots | 1 slaves.
192.168.109.141:6379 (ac91d148...) -> 1 keys | 6192 slots | 1 slaves.
192.168.109.141:6380 (5ed4b671...) -> 0 keys | 6096 slots | 1 slaves.
[OK] 1 keys in 4 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.109.141:6385)
M: d26431f565b881bcb84c4827d8496c51df7683bd 192.168.109.141:6385
   slots: (0 slots) master
M: 1d822dd5b1886b84b6697543471f5916cdaa3665 192.168.109.141:6381
   slots:[12288-16383] (4096 slots) master
   1 additional replica(s)
S: 98318770a6f56c9a9b3f00b4797d367ea955cc09 192.168.109.141:6384
   slots: (0 slots) slave
   replicates 1d822dd5b1886b84b6697543471f5916cdaa3665
M: ac91d14851cf4f35a376a453b452c06a9d065069 192.168.109.141:6379
   slots:[0-6191] (6192 slots) master
   1 additional replica(s)
S: d43e6129151a03404212826cb48d20eb591c4d8a 192.168.109.141:6383
   slots: (0 slots) slave
   replicates 5ed4b6716402e263b10efeb19c86e91caeee7623
M: 5ed4b6716402e263b10efeb19c86e91caeee7623 192.168.109.141:6380
   slots:[6192-12287] (6096 slots) master
   1 additional replica(s)
S: 933b378ec43d9d016997ff93f16a812723657d15 192.168.109.141:6382
   slots: (0 slots) slave
   replicates ac91d14851cf4f35a376a453b452c06a9d065069
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
root@localhost:/data#

5、将6385主节点移除集群

命令:redis-cli --cluster del-node 6385的ip:端口 6385节点ID

root@localhost:/data# redis-cli --cluster del-node 192.168.109.141:6385 d26431f565b881bcb84c4827d8496c51df7683bd
>>> Removing node d26431f565b881bcb84c4827d8496c51df7683bd from cluster 192.168.109.141:6385
>>> Sending CLUSTER FORGET messages to the cluster...
>>> Sending CLUSTER RESET SOFT to the deleted node.

6、查看集群状态

root@localhost:/data# redis-cli --cluster check 192.168.109.141:6381
192.168.109.141:6381 (1d822dd5...) -> 0 keys | 4096 slots | 1 slaves.
192.168.109.141:6379 (ac91d148...) -> 1 keys | 6192 slots | 1 slaves.
192.168.109.141:6380 (5ed4b671...) -> 0 keys | 6096 slots | 1 slaves.
[OK] 1 keys in 3 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.109.141:6381)
M: 1d822dd5b1886b84b6697543471f5916cdaa3665 192.168.109.141:6381
   slots:[12288-16383] (4096 slots) master
   1 additional replica(s)
S: 933b378ec43d9d016997ff93f16a812723657d15 192.168.109.141:6382
   slots: (0 slots) slave
   replicates ac91d14851cf4f35a376a453b452c06a9d065069
M: ac91d14851cf4f35a376a453b452c06a9d065069 192.168.109.141:6379
   slots:[0-6191] (6192 slots) master
   1 additional replica(s)
S: d43e6129151a03404212826cb48d20eb591c4d8a 192.168.109.141:6383
   slots: (0 slots) slave
   replicates 5ed4b6716402e263b10efeb19c86e91caeee7623
M: 5ed4b6716402e263b10efeb19c86e91caeee7623 192.168.109.141:6380
   slots:[6192-12287] (6096 slots) master
   1 additional replica(s)
S: 98318770a6f56c9a9b3f00b4797d367ea955cc09 192.168.109.141:6384
   slots: (0 slots) slave
   replicates 1d822dd5b1886b84b6697543471f5916cdaa3665
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
  • 3
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值