Docker研学-部署Redis集群(三主三从)

学习自b站狂神说Docker: https://www.bilibili.com/video/BV1og4y1q7M4/?spm_id_from=333.337.search-card.all.click

部署Redis集群(三主三从)

在这里插入图片描述
6台服务器,三主三从,每台主机有自己的从机,主可用的情况下从就是备份,若主机挂了从机就会替代主机,启动6个容器进行测试,由于启动太慢,编写shell脚本进行启动,同时要为这个集群创建网卡

# 创建redis网卡
[root@localhost ~]# docker network create redis --subnet 172.38.0.0/16
# 查看网卡信息
[root@localhost ~]# docker network ls
[root@localhost ~]# docker network inspect redis

# 创建6个redis服务,通过脚本创建6个redis配置(redis需要写配置文件)
# 通过for循环6次
[root@localhost ~]# for port in $(seq 1 6);\
do \
# 创建redis的配置文件(mydata/redis目录下)
mkdir -p /mydata/redis/node-${port}/conf
# 创建redis的conf配置文件
touch /mydata/redis/node-${port}/conf/redis.conf
# 配置文件内写具体配置
cat<< EOF >>/mydata/redis/node-${port}/conf/redis.conf
# 配置端口
port 6379
bind 0.0.0.0
# 开启集群
cluster-enabled yes
cluster-config-file nodes.conf
cluster-node-timeout 5000
# 连接具体的ip
cluster-announce-ip 172.38.0.1${port}
cluster-announce-port 6379
cluster-announce-bus-port 16379
# 将redis的RDB方式切换为AOF方式 生成appendonly.aof文件
appendonly yes
EOF
done

# 粘贴用
for port in $(seq 1 6); \
do \
mkdir -p /mydata/redis/node-${port}/conf
touch /mydata/redis/node-${port}/conf/redis.conf
cat << EOF >/mydata/redis/node-${port}/conf/redis.conf
port 6379 
bind 0.0.0.0
cluster-enabled yes 
cluster-config-file nodes.conf
cluster-node-timeout 5000
cluster-announce-ip 172.38.0.1${port}
cluster-announce-port 6379
cluster-announce-bus-port 16379
appendonly yes
EOF
done

# 查询对应目录
[root@localhost mydata]# cd /mydata
[root@localhost mydata]# ls
redis
[root@localhost redis]# ls
node-1  node-2  node-3  node-4  node-5  node-6
[root@localhost redis]# cd node-1
[root@localhost node-1]# ls
conf
[root@localhost node-1]# cd conf
[root@localhost conf]# ls
redis.conf
[root@localhost conf]# cat redis.conf
port 6379
bind 0.0.0.0
cluster-enabled yes
cluster-config-file nodes.conf
cluster-node-timeout 5000
cluster-announce-ip 172.38.0.11
cluster-announce-port 6379
cluster-announce-bus-port 16379
appendonly yes

# 逐个启动,外部6371内部6379(容器内部端口不需变,外部端口需递增),将容器连接到名为redis的网络中,并指定容器的IP地址为172.38.0.11,通过redis.conf启动redis-server /etc/redis/redis.conf:以/etc/redis/redis.conf文件中的配置启动Redis服务器
docker run -p 6371:6379 -p 16371:16379 --name redis-1 \
-v /mydata/redis/node-1/data:/data \
-v /mydata/redis/node-1/conf/redis.conf:/etc/redis/redis.conf \
-d --net redis --ip 172.38.0.11 redis:5.0.9-alpine3.11 redis-server /etc/redis/redis.conf

# 查看创建的容器
[root@localhost conf]# docker ps
CONTAINER ID   IMAGE     COMMAND   CREATED    STATUS   PORTS  NAMES
cd6ffaff0946   redis:5.0.9-alpine3.11   "docker-entrypoint.s…"   9 seconds ago   Up 8 seconds   0.0.0.0:6371->6379/tcp, :::6371->6379/tcp, 0.0.0.0:16371->16379/tcp, :::16371->16379/tcp   redis-1

# 创建另外5个容器
docker run -p 6371:6379 -p 16371:16379 --name redis-1 \
-v /mydata/redis/node-1/data:/data \
-v /mydata/redis/node-1/conf/redis.conf:/etc/redis/redis.conf \
-d --net redis --ip 172.38.0.11 redis:5.0.9-alpine3.11 redis-server /etc/redis/redis.conf
//。。。。。。
docker run -p 6376:6379 -p 16376:16379 --name redis-6 \
-v /mydata/redis/node-6/data:/data \
-v /mydata/redis/node-6/conf/redis.conf:/etc/redis/redis.conf \
-d --net redis --ip 172.38.0.16 redis:5.0.9-alpine3.11 redis-server /etc/redis/redis.conf

# 或者通过脚本启动
docker run -p 637${port}:6379 -p 1637${port}:16379 --name redis-${port} \
-v /mydata/redis/node-${port}/data:/data \
-v /mydata/redis/node-${port}/conf/redis.conf:/etc/redis/redis.conf \
-d --net redis --ip 172.38.0.1${port} redis:5.0.9-alpine3.11 redis-server /etc/redis/redis.conf

# 查看是否6个容器都启动了
[root@localhost conf]# docker ps

docker的轻量级使得他可以在一个虚拟机上启动6个redis

docker logs -f 容器ID 可以打印错误日志 方便你找到错误原因

创建集群

# 进入容器redis中没有/bin/bash命令,有/bin/sh命令
[root@localhost conf]# docker exec -it redis-1 /bin/sh
# 查看AOF配置和节点配置
/data # ls
appendonly.aof  nodes.conf

# redis链接client 通过集群方式 集群的切片 容器内部的端口都是6379 输入yes
/data # redis-cli --cluster create 172.38.0.11:6379 172.38.0.12:6379 172.38.0.13:6379 172.38.0.14:6379 172.38.0.15:6379 172.38.0.16:6379 --cluster-replicas 1
>>> Performing hash slots allocation on 6 nodes...
Master[0] -> Slots 0 - 5460
Master[1] -> Slots 5461 - 10922
Master[2] -> Slots 10923 - 16383
Adding replica 172.38.0.15:6379 to 172.38.0.11:6379
Adding replica 172.38.0.16:6379 to 172.38.0.12:6379
Adding replica 172.38.0.14:6379 to 172.38.0.13:6379
M: 38b1ee8fbd071ce0fe6379675548cc0d9f3b909e 172.38.0.11:6379
   slots:[0-5460] (5461 slots) master
M: 1b4b25e192ad251fa83b627c19953d5910314786 172.38.0.12:6379
   slots:[5461-10922] (5462 slots) master
M: 9b90851fc95e774658d19f86373b31dde647eaa8 172.38.0.13:6379
   slots:[10923-16383] (5461 slots) master
S: f0ef5156c43b3bec0fdc892a2106533078403fbc 172.38.0.14:6379
   replicates 9b90851fc95e774658d19f86373b31dde647eaa8
S: f51588372d280a67ddcb7987efddb54a737071bc 172.38.0.15:6379
   replicates 38b1ee8fbd071ce0fe6379675548cc0d9f3b909e
S: 0dc753743c85cb7595482957208c76c333d0120a 172.38.0.16:6379
   replicates 1b4b25e192ad251fa83b627c19953d5910314786
Can I set the above configuration? (type 'yes' to accept): yes
>>> Nodes configuration updated
>>> Assign a different config epoch to each node
>>> Sending CLUSTER MEET messages to join the cluster
Waiting for the cluster to join
...
>>> Performing Cluster Check (using node 172.38.0.11:6379)
M: 38b1ee8fbd071ce0fe6379675548cc0d9f3b909e 172.38.0.11:6379
   slots:[0-5460] (5461 slots) master
   1 additional replica(s)
S: 0dc753743c85cb7595482957208c76c333d0120a 172.38.0.16:6379
   slots: (0 slots) slave
   replicates 1b4b25e192ad251fa83b627c19953d5910314786
M: 1b4b25e192ad251fa83b627c19953d5910314786 172.38.0.12:6379
   slots:[5461-10922] (5462 slots) master
   1 additional replica(s)
M: 9b90851fc95e774658d19f86373b31dde647eaa8 172.38.0.13:6379
   slots:[10923-16383] (5461 slots) master
   1 additional replica(s)
S: f51588372d280a67ddcb7987efddb54a737071bc 172.38.0.15:6379
   slots: (0 slots) slave
   replicates 38b1ee8fbd071ce0fe6379675548cc0d9f3b909e
S: f0ef5156c43b3bec0fdc892a2106533078403fbc 172.38.0.14:6379
   slots: (0 slots) slave
   replicates 9b90851fc95e774658d19f86373b31dde647eaa8
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
/data # 

# 测试redis-cli是单机redis-cli -c是集群
/data # redis-cli -c
127.0.0.1:6379> 

# 查看集群信息是否正确
127.0.0.1:6379> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:6
cluster_size:3                           #集群数量为3
cluster_current_epoch:6
cluster_my_epoch:1
cluster_stats_messages_ping_sent:357
cluster_stats_messages_pong_sent:365
cluster_stats_messages_sent:722
cluster_stats_messages_ping_received:360
cluster_stats_messages_pong_received:357
cluster_stats_messages_meet_received:5
cluster_stats_messages_received:722

# 查看节点信息是否成功连接 三主三从
127.0.0.1:6379> cluster nodes
0dc753743c85cb7595482957208c76c333d0120a 172.38.0.16:6379@16379 slave 1b4b25e192ad251fa83b627c19953d5910314786 0 1698201288951 6 connected
38b1ee8fbd071ce0fe6379675548cc0d9f3b909e 172.38.0.11:6379@16379 myself,master - 0 1698201288000 1 connected 0-5460
1b4b25e192ad251fa83b627c19953d5910314786 172.38.0.12:6379@16379 master - 0 1698201287422 2 connected 5461-10922
9b90851fc95e774658d19f86373b31dde647eaa8 172.38.0.13:6379@16379 master - 0 1698201288000 3 connected 10923-16383
f51588372d280a67ddcb7987efddb54a737071bc 172.38.0.15:6379@16379 slave 38b1ee8fbd071ce0fe6379675548cc0d9f3b909e 0 1698201288000 5 connected
f0ef5156c43b3bec0fdc892a2106533078403fbc 172.38.0.14:6379@16379 slave 9b90851fc95e774658d19f86373b31dde647eaa8 0 1698201289556 4 connected

# set一个值 是13:6379处理的服务 这是我们的第三个节点 信息存到了第三个节点中
127.0.0.1:6379> set a b
-> Redirected to slot [15495] located at 172.38.0.13:6379
OK

# 将3号节点停机 查看从机是否替代主机
[root@localhost ~]# docker stop redis-3

# 在集群中获取a对应的值 在14:6379中获取到 14是13的从机 实现高可用
127.0.0.1:6379> get a
-> Redirected to slot [15495] located at 172.38.0.14:6379
"b"

# 再次查看节点信息 13故障转移 14变为主机
172.38.0.14:6379> cluster nodes
f51588372d280a67ddcb7987efddb54a737071bc 172.38.0.15:6379@16379 slave 38b1ee8fbd071ce0fe6379675548cc0d9f3b909e 0 1698201832093 5 connected
f0ef5156c43b3bec0fdc892a2106533078403fbc 172.38.0.14:6379@16379 myself,master - 0 1698201832000 7 connected 10923-16383
1b4b25e192ad251fa83b627c19953d5910314786 172.38.0.12:6379@16379 master - 0 1698201831072 2 connected 5461-10922
9b90851fc95e774658d19f86373b31dde647eaa8 172.38.0.13:6379@16379 master,fail - 1698201619703 1698201618173 3 connected
38b1ee8fbd071ce0fe6379675548cc0d9f3b909e 172.38.0.11:6379@16379 master - 0 1698201833111 1 connected 0-5460
0dc753743c85cb7595482957208c76c333d0120a 172.38.0.16:6379@16379 slave 1b4b25e192ad251fa83b627c19953d5910314786 0 1698201833111 6 connected

redis集群搭建完成

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
以下是使用docker-compose部署Redis集群的步骤: 1.创建一个目录,例如redis-cluster,并在该目录中创建一个docker-compose.yaml文件。 2.在docker-compose.yaml文件中编写以下内容: ```yaml version: '3' services: redis-1: image: redis:6.0.9 command: redis-server /usr/local/etc/redis/redis.conf volumes: - ./redis.conf:/usr/local/etc/redis/redis.conf ports: - "6379" networks: - redis-cluster redis-2: image: redis:6.0.9 command: redis-server /usr/local/etc/redis/redis.conf volumes: - ./redis.conf:/usr/local/etc/redis/redis.conf ports: - "6380" networks: - redis-cluster redis-3: image: redis:6.0.9 command: redis-server /usr/local/etc/redis/redis.conf volumes: - ./redis.conf:/usr/local/etc/redis/redis.conf ports: - "6381" networks: - redis-cluster networks: redis-cluster: driver: bridge ``` 3.在redis-cluster目录中创建一个redis.conf文件,并将以下内容复制到文件中: ```conf bind 0.0.0.0 port 6379 cluster-enabled yes cluster-config-file nodes.conf cluster-node-timeout 5000 appendonly yes requirepass 1111 masterauth 1111 ``` 4.在终端中导航到redis-cluster目录,并运行以下命令启动Redis集群: ```shell docker-compose up -d ``` 5.使用以下命令进入redis-1容器: ```shell docker exec -it redis-cluster_redis-1_1 /bin/bash ``` 6.在redis-1容器中,使用以下命令创建Redis集群: ```shell redis-cli --cluster create 172.20.0.2:6379 172.20.0.3:6379 172.20.0.4:6379 --cluster-replicas 0 ``` 7.现在,您已经成功地使用docker-compose部署Redis集群

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

泰勒疯狂展开

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值