【摘要】 前言 在分布式情况下,我们可以考虑使用主从模式进行提高数据的读写效率,并且可以利用主从复制可以做到数据的高可用。把数据写入多个节点会提高数据的冗余备份,也更加安全。 目录 环境准备拉取redis...
- 环境准备
- 系统 CentOS 7
- 服务器IP准备
- master ip:192.190.10.13
slave ip:192.190.10.15 - 关闭防火墙
- 查看状态: # systemctl status firewalld
立即关闭: # systemctl stop firewalld
开机不启动: # systemctl disable firewalld - 查看状态: # getenforce #enforcing:拦截 permissive:提醒不拦截 disabled:禁用
- 立即关闭: # setenforce 0 #将状态改为permissive
开机不启动: # vim /etc/selinux/config # SELINUX=disabled - 拉取redis:6.2.5 镜像
-
在redis文件夹下新建下图目录docker pull redis:6.2.5
//执行如下命令即可
master
mkdir -p /home/redis/conf
mkdir -p /home/redis/data
slave
mkdir -p /home/redis/conf
mkdir -p /home/redis/data - master 配置文件
- cat myredis.conf
-
[root@minio2 conf]# cat myredis.conf # nd 192.168.1.100 10.0.0.1 # bind 127.0.0.1 ::1 bind 0.0.0.0 # 取消保护模式 protected-mode no # 端口号 port 6379 tcp-backlog 511 # 密码 requirepass ksh@@@ timeout 0 tcp-keepalive 300 daemonize no supervised no pidfile /var/run/redis_6379.pid loglevel notice logfile "" databases 30 always-show-logo yes save 900 1 save 300 10 save 60 10000 stop-writes-on-bgsave-error yes rdbcompression yes rdbchecksum yes dbfilename dump.rdb dir ./ replica-serve-stale-data yes replica-read-only yes repl-diskless-sync no repl-disable-tcp-nodelay no replica-priority 100 lazyfree-lazy-eviction no lazyfree-lazy-expire no lazyfree-lazy-server-del no replica-lazy-flush no appendonly yes appendfilename "appendonly.aof" no-appendfsync-on-rewrite no auto-aof-rewrite-percentage 100 auto-aof-rewrite-min-size 64mb aof-load-truncated yes aof-use-rdb-preamble yes lua-time-limit 5000 slowlog-max-len 128 notify-keyspace-events "" hash-max-ziplist-entries 512 hash-max-ziplist-value 64 list-max-ziplist-size -2 list-compress-depth 0 set-max-intset-entries 512 zset-max-ziplist-entries 128 zset-max-ziplist-value 64 hll-sparse-max-bytes 3000 stream-node-max-bytes 4096 stream-node-max-entries 100 activerehashing yes hz 10 dynamic-hz yes aof-rewrite-incremental-fsync yes rdb-save-incremental-fsync yes slave-announce-ip 192.190.10.13 slave-announce-port 6379
master(主节点,以写为主)
-
docker run -d --name myredis -p 6379:6379 -v /home/redis/conf/myredis.conf:/etc/redis/redis.conf -v /home/redis/Data/:/data redis:6.2.5 /etc/redis/redis.conf
-
slave(从节点,以读为主) -
slave配置文件
-
cat myredis02.conf
-
[root@benji conf]# cat myredis02.conf # nd 192.168.1.100 10.0.0.1 # bind 127.0.0.1 ::1 bind 0.0.0.0 protected-mode no port 6379 tcp-backlog 511 requirepass ksh@@@ timeout 0 tcp-keepalive 300 daemonize no supervised no pidfile /var/run/redis_6379.pid loglevel notice logfile "" databases 30 always-show-logo yes save 900 1 save 300 10 save 60 10000 stop-writes-on-bgsave-error yes rdbcompression yes rdbchecksum yes dbfilename dump.rdb dir ./ replica-serve-stale-data yes replica-read-only yes repl-diskless-sync no repl-disable-tcp-nodelay no replica-priority 100 lazyfree-lazy-eviction no lazyfree-lazy-expire no lazyfree-lazy-server-del no replica-lazy-flush no appendonly yes appendfilename "appendonly.aof" no-appendfsync-on-rewrite no auto-aof-rewrite-percentage 100 auto-aof-rewrite-min-size 64mb aof-load-truncated yes aof-use-rdb-preamble yes lua-time-limit 5000 slowlog-max-len 128 notify-keyspace-events "" hash-max-ziplist-entries 512 hash-max-ziplist-value 64 list-max-ziplist-size -2 list-compress-depth 0 set-max-intset-entries 512 zset-max-ziplist-entries 128 zset-max-ziplist-value 64 hll-sparse-max-bytes 3000 stream-node-max-bytes 4096 stream-node-max-entries 100 activerehashing yes hz 10 dynamic-hz yes aof-rewrite-incremental-fsync yes rdb-save-incremental-fsync yes #replicaof 192.168.193.13 6379 slaveof 192.168.193.13 6379 masterauth ksh@@@ slave-announce-ip 192.190.10.15 slave-announce-port 6379
docker run -d --name myredis01 -p 6378:6379 -v /home/redis/conf/myredis02.conf:/etc/redis/redis.conf -v /home/redis/data:/data redis:6.2.5 /etc/redis/redis.conf
-
执行主从同步
-
master
docker exec -it myredis /bin/bash
redis-cli -h 127.0.0.1 -p 6379 -a ksh@@@
info replication -
slave
docker exec -it myredis01 /bin/bash -
测试
- 致此:
- 感谢阅读~docker部署redis主从复制(一主,一从)到此结束!