任务描述
完成 redis 主从迁移:
主节点
从 10.200.16.75 迁移到 10.200.16.10从节点
从 10.200.16.74 迁移到 10.200.16.11
最后保证迁移后数据基本一致.
搭建 Redis 主从
编写 ansible
Note:手动给多个机器搭建 redis 主从,比较麻烦… 人生苦短,我选 ansible
初始化配置
给服务器进行目录相关的初始化.
# 1. /ansible/playbooks/redis5_init.yml
# --------
# 1.主节点配置部署
# ansible-playbook -i 10.200.16.10, playbooks/infra/redis5_deploy.yml -e "name=soc-cache port=6379 REDIS_MAXMEMORY=4GB" -v
#
# 2.从节点配置部署
# ansible-playbook -i 10.200.16.11, playbooks/infra/redis5_deploy.yml -e "name=soc-cache port=6379 master_ip=10.200.16.10 master_port=6379 REDIS_MAXMEMORY=4GB" -v
# --------
- name: redis5 deploy
gather_facts: false
hosts: all
serial: 1
roles:
- role: redis5_deploy
# 2. /ansible/roles/redis5_init
# ./tasks/main.yml
- name: prepare dir
file:
path: "{{ item }}"
state: directory
mode: 0755
owner: admin
with_items:
- "{{ INSTALL_DIR }}"
- "{{ REDIS_DATA_DIR }}"
- "{{ REDIS_PID_DIR }}"
- "{{ REDIS_LOG_DIR }}"
- "{{ REDIS_CONFIG_DIR }}"
- name: Install aptitude
become: yes
apt:
name: aptitude
state: latest
update_cache: true
- name: Install required system packages
become: yes
apt:
pkg:
- unzip
state: latest
update_cache: true
# ./vars/main.yml
REDIS_DATA_PATH: "/piqiu/data/redis"
REDIS_DATA_DIR: "{{REDIS_DATA_PATH}}/redis-{{name}}"
REDIS_PID_DIR: "/piqiu/data/work/redis-{{name}}"
REDIS_LOG_DIR: "/piqiu/logs/usr/redis-{{name}}"
REDIS_CONFIG_DIR: "/piqiu/dist/conf/redis-{{name}}"
version: 5.0.5
REDIS_DISTRIBUTION: "redis_{{version}}_linux_amd64.zip"
INSTALL_DIR: "/piqiu/dist/sys/redis-{{name}}"
节点部署
下载 redis,并进行部署(运行、日志配置…)
# 1. /ansible/playbooks/redis5_deploy.yml
# --------
# 1.主节点配置部署
# ansible-playbook -i 10.200.16.10, playbooks/infra/redis5_deploy.yml -e "name=soc-cache port=6379 REDIS_MAXMEMORY=4GB" -v
#
# 2.从节点配置部署
# ansible-playbook -i 10.200.16.11, playbooks/infra/redis5_deploy.yml -e "name=soc-cache port=6379 master_ip=10.200.16.10 master_port=6379 REDIS_MAXMEMORY=4GB" -v
# --------
- name: redis5 deploy
gather_facts: false
hosts: all
serial: 1
roles:
- role: redis5_deploy
# 2. /ansible/roles/redis5_deploy
# ./tasks/main.yml
- include_tasks: main_redis_install.yml
- include_tasks: main_conf_deploy.yml
- include_tasks: main_start_action.yml
# ./tasks/main_redis_install.yml
- name: download the redis
unarchive:
src={{ REDIS_INSTALL_SRC }}
dest="{{ INSTALL_DIR }}"
copy=no
tags:
- download
# ./tasks/main_conf_deploy.yml
- name: template redis.sh
template:
src=redis.sh.tpl
dest={{ REDIS_SH_FILE }}
mode=0744
- name: template redis conf
template:
src=redis.conf.tpl
dest={{ REDIS_CONFIG_FILE }}
mode=0644
# ./tasks/main_start_action.yml
- name: start redis
shell: "{{ REDIS_SH_FILE }} start"
# ./templates/redis.confg.tpl
bind {{REDIS_HOST}}
port {{port}}
protected-mode no
tcp-backlog 10240
timeout 3600
tcp-keepalive 300
daemonize yes
supervised no
pidfile {{REDIS_PID_FILE}}
loglevel warning
logfile {{REDIS_LOG_FILE}}
databases 1
always-show-logo no
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename {{REDIS_RDB_FILE}}
dir {{REDIS_DATA_DIR}}
replica-serve-stale-data yes
replica-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-disable-tcp-nodelay no
replica-priority 100
maxmemory {{REDIS_MAXMEMORY}}
maxmemory-policy {{REDIS_MAXMEMORY_POLICY}}
lazyfree-lazy-eviction no
lazyfree-lazy-expire no
lazyfree-lazy-server-del no
replica-lazy-flush no
appendonly no
appendfilename {{REDIS_AOF_FILE}}
appendfsync no
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 512mb
aof-load-truncated yes
aof-use-rdb-preamble yes
lua-time-limit 5000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events ""
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
stream-node-max-bytes 4096
stream-node-max-entries 100
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit replica 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
dynamic-hz yes
aof-rewrite-incremental-fsync yes
rdb-save-incremental-fsync yes
{% if master_ip is defined and master_port is defined %}
slaveof {{ master_ip }} {{ master_port }}
{% endif %}
# ./templates/redis.sh.tpl
#!/bin/bash
# redis start
REDIS_HOME={{ INSTALL_DIR }}
START_OPTS="{{REDIS_CONFIG_FILE}}"
PID_FILE={{REDIS_PID_FILE}}
function start() {
if [[ -f ${PID_FILE} ]];
then
echo "pid file already exist"
exit 1
fi
${REDIS_HOME}/bin/redis-server $START_OPTS
echo "start redis success"
}
function stop() {
if [[ ! -f ${PID_FILE} ]];
then
echo "pid file not existd"
exit 1
fi
PID=`cat ${PID_FILE}`
echo "pid is $PID"
kill $PID
rm ${PID_FILE}
echo "stop redis exporter success"
}
function restart () {
if [[ ! -f ${PID_FILE} ]];
then
start
else
stop
start
fi
}
usage () {
echo "Usage"
echo "$0 [-h] [start/stop/restart]"
}
for i in `seq 1 $#`
do
case $1 in
start)
start
exit 0
;;
stop)
stop
exit 0
;;
restart)
restart
exit 0
;;
*)
usage
;;
esac
done
# ./vars/main.yml
REDIS_HOST: "{{ansible_host}}"
DATA_DEVICE: "vdb"
REDIS_PORT: "{{port}}"
REDIS_MAXMEMORY: 4GB
REDIS_MAXMEMORY_POLICY: noeviction
REDIS_DATA_PATH: "/piqiu/data/redis"
REDIS_DATA_DIR: "{{REDIS_DATA_PATH}}/redis-{{name}}"
REDIS_PID_DIR: "/piqiu/data/work/redis-{{name}}"
REDIS_PID_FILE: "{{REDIS_PID_DIR}}/redis-{{port}}.pid"
REDIS_LOG_DIR: "/piqiu/logs/usr/redis-{{name}}"
REDIS_LOG_FILE: "{{REDIS_LOG_DIR}}/redis-{{port}}.log"
REDIS_PERSISTENCE: true
REDIS_RDB_FILE: "{{port}}.rdb"
REDIS_AOF_FILE: "{{port}}.aof"
REDIS_CONFIG_DIR: "/piqiu/dist/conf/redis-{{name}}"
REDIS_CONFIG_FILE: "{{REDIS_CONFIG_DIR}}/redis-{{port}}.conf"
version: 5.0.5
REDIS_DISTRIBUTION: "redis_{{version}}_linux_amd64.zip"
INSTALL_DIR: "/piqiu/dist/sys/redis-{{name}}"
REDIS_INSTALL_SRC: "http://10.200.48.3/piqiu/{{REDIS_DISTRIBUTION}}"
REDIS_SH_FILE: "{{INSTALL_DIR}}/redis-{{port}}.sh"
执行 ansible
a)部署主节点
初始化:
ansible-playbook -i 10.200.16.10, playbooks/infra/redis5_init.yml -e name=soc-cache -v
部署:
ansible-playbook -i 10.200.16.10, playbooks/infra/redis5_deploy.yml -e "name=soc-cache port=6379 REDIS_MAXMEMORY=4GB" -v
b)部署从节点
初始化:
ansible-playbook -i 10.200.16.11, playbooks/infra/redis5_init.yml -e name=soc-cache -v
部署:
ansible-playbook -i 10.200.16.11, playbooks/infra/redis5_deploy.yml -e "name=soc-cache port=6379 master_ip=10.200.16.10 master_port=6379 REDIS_MAXMEMORY=4GB" -v
c)验证
进入 redis cli,通过 info replication 查看主节点连接情况:
10.200.16.11:6379> info replication
# Replication
role:slave
master_host:10.200.16.10
master_port:6379
数据迁移
数据迁移
Note:
redis-shake 下载地址:https://github.com/tair-opensource/RedisShake
redis-shake 中文文档:https://tair-opensource.github.io/RedisShake/zh/guide/getting-started.html
a)这里我们使用 redis-shake 完成数据迁移.
# 下载
wget https://github.com/tair-opensource/RedisShake/releases/download/v4.1.1/redis-shake-linux-amd64.tar.gz
#解压
tar -xvf redis-shake-linux-amd64.tar.gz
b)修改 shake.toml 配置文件
主要配置内容如下:
[sync_reader] address
:从哪个 redis 实例中读.- `[sync_reader] sync_rdb:同步 rdb 文件中的内容,相当于全量复制(并不是把 rdb 文件发过去,而是解析并执行里面的命令).
- `[sync_reader] sync_aof:同步 aof 文件中的内容,相当于实时复制(开启后会一直实时复制,不会停止).
[redis_writer] address
:写到哪个 redis 实例中去.
[sync_reader]
cluster = false # set to true if source is a redis cluster
address = "10.200.16.75:6379" # when cluster is true, set address to one of the cluster node
username = "" # keep empty if not using ACL
password = "" # keep empty if no authentication is required
tls = false #
sync_rdb = true # set to false if you don't want to sync rdb
sync_aof = true # set to false if you don't want to sync aof
prefer_replica = false # set to true if you want to sync from replica node
try_diskless = false # set to true if you want to sync by socket and source repl-diskless-sync=yes
[redis_writer]
cluster = false # set to true if target is a redis cluster
sentinel = false # set to true if target is a redis sentinel
master = "" # set to master name if target is a redis sentinel
address = "10.200.16.10:6379" # when cluster is true, set address to one of the cluster node
username = "" # keep empty if not using ACL
password = "" # keep empty if no authentication is required
tls = false
off_reply = false # ture off the server reply
[advanced]
dir = "data"
ncpu = 0 # runtime.GOMAXPROCS, 0 means use runtime.NumCPU() cpu cores
pprof_port = 0 # pprof port, 0 means disable
status_port = 0 # status port, 0 means disable
# log
log_file = "shake.log"
log_level = "info" # debug, info or warn
log_interval = 5 # in seconds
# redis-shake gets key and value from rdb file, and uses RESTORE command to
# create the key in target redis. Redis RESTORE will return a "Target key name
# is busy" error when key already exists. You can use this configuration item
# to change the default behavior of restore:
# panic: redis-shake will stop when meet "Target key name is busy" error.
# rewrite: redis-shake will replace the key with new value.
# skip: redis-shake will skip restore the key when meet "Target key name is busy" error.
rdb_restore_command_behavior = "panic" # panic, rewrite or skip
# redis-shake uses pipeline to improve sending performance.
# This item limits the maximum number of commands in a pipeline.
pipeline_count_limit = 1024
# Client query buffers accumulate new commands. They are limited to a fixed
# amount by default. This amount is normally 1gb.
target_redis_client_max_querybuf_len = 1024_000_000
# In the Redis protocol, bulk requests, that are, elements representing single
# strings, are normally limited to 512 mb.
target_redis_proto_max_bulk_len = 512_000_000
# If the source is Elasticache or MemoryDB, you can set this item.
aws_psync = "" # example: aws_psync = "10.0.0.1:6379@nmfu2sl5osync,10.0.0.1:6379@xhma21xfkssync"
# destination will delete itself entire database before fetching files
# from source during full synchronization.
# This option is similar redis replicas RDB diskless load option:
# repl-diskless-load on-empty-db
empty_db_before_sync = false
[module]
# The data format for BF.LOADCHUNK is not compatible in different versions. v2.6.3 <=> 20603
target_mbbloom_version = 20603
c)执行数据同步脚本,如下命令
./redis-shake shake.toml
此时,执行完全量复制后,开始进行实时复制(等到修改完连接redis项目的配置,手动断开即可).
Nacos 修改配置
此处所有涉及到 Redis 到项目配置都是在 nacos 上的,因此先找找项目中的 NacosConfig(namespace、dataId…),然后改 nacos 上对应的 redis.host 和 redis.port 即可.
nacos 会自动完成热更新(发布系统部署,需要重新发布).