一,解决ruby环境
https://blog.csdn.net/liuhaoy/article/details/104293482
二,修改配置文件
去掉两行注释
[root@youyou2 ~]# vi /apps/redis/etc/redis.conf
cluster-enabled yes
cluster-config-file nodes-6379.conf
查看端口会有6379和16379 两个端口
三,创建集群
如果需要密码请修改成自己集群的密码,集群中redis 环境密码设置一样,或者不设置
[root@youyou1 ~]# cd /usr/src/redis-4.0.14/src/
[root@youyou1 src]# cat /usr/local/lib/ruby/gems/2.5.0/gems/redis-4.1.3/lib/redis/client.rb | grep 123456
:password => "123456",
[root@youyou1 src]#
[root@youyou1 src]# ./redis-trib.rb create --replicas 1 192.168.181.200:6379 192.168.181.201:6379 192.168.181.202:6379 192.168.181.203:6379 192.168.181.204:6379 192.168.181.205:6379
>>> Creating cluster
>>> Performing hash slots allocation on 6 nodes...
Using 3 masters:
192.168.181.200:6379
192.168.181.201:6379
192.168.181.202:6379
Adding replica 192.168.181.204:6379 to 192.168.181.200:6379
Adding replica 192.168.181.205:6379 to 192.168.181.201:6379
Adding replica 192.168.181.203:6379 to 192.168.181.202:6379
M: 9d7fe846ea69d93323206359feb8f235c9ec096a 192.168.181.200:6379
slots:0-5460 (5461 slots) master
M: fa436230ab395d53a31ae8fc6dd6eea63dd1a2fa 192.168.181.201:6379
slots:5461-10922 (5462 slots) master
M: 71461df9cb04a2f24430935cdd802a492fe4620a 192.168.181.202:6379
slots:10923-16383 (5461 slots) master
S: 4bdc58c65d5aae267119ec57c377963f3cc843fe 192.168.181.203:6379
replicates 71461df9cb04a2f24430935cdd802a492fe4620a
S: 0a3ce29afcc77915040e280ff3ff0c3764ab4079 192.168.181.204:6379
replicates 9d7fe846ea69d93323206359feb8f235c9ec096a
S: 0703a4e443a75adf8d5c531f2922b7e531d86e5e 192.168.181.205:6379
replicates fa436230ab395d53a31ae8fc6dd6eea63dd1a2fa
Can I set the above configuration? (type 'yes' to accept): yes
>>> Nodes configuration updated
>>> Assign a different config epoch to each node
>>> Sending CLUSTER MEET messages to join the cluster
Waiting for the cluster to join....
>>> Performing Cluster Check (using node 192.168.181.200:6379)
M: 9d7fe846ea69d93323206359feb8f235c9ec096a 192.168.181.200:6379
slots:0-5460 (5461 slots) master
1 additional replica(s)
M: 71461df9cb04a2f24430935cdd802a492fe4620a 192.168.181.202:6379
slots:10923-16383 (5461 slots) master
1 additional replica(s)
S: 4bdc58c65d5aae267119ec57c377963f3cc843fe 192.168.181.203:6379
slots: (0 slots) slave
replicates 71461df9cb04a2f24430935cdd802a492fe4620a
S: 0703a4e443a75adf8d5c531f2922b7e531d86e5e 192.168.181.205:6379
slots: (0 slots) slave
replicates fa436230ab395d53a31ae8fc6dd6eea63dd1a2fa
M: fa436230ab395d53a31ae8fc6dd6eea63dd1a2fa 192.168.181.201:6379
slots:5461-10922 (5462 slots) master
1 additional replica(s)
S: 0a3ce29afcc77915040e280ff3ff0c3764ab4079 192.168.181.204:6379
slots: (0 slots) slave
replicates 9d7fe846ea69d93323206359feb8f235c9ec096a
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
[root@youyou1 src]#