Mysql的xtrabackup备份实现
1.在数据库从节点部署,配置crontab:
#30 01 * * 1-6 /data/dbbackup/scripts/mysqlback.sh incremental 1>/dev/null 2>&1
#30 01 * * 7 /data/dbbackup/scripts/mysqlback.sh full 1>/dev/null 2>&1
2.在目录下载最新xtrabackup包
percona-xtrabackup-2.4.12-Linux-x86_64.libgcrypt145.tar.gz,解压到
/data/dbbackup/percona-xtrabackup-2.4.12-Linux-x86_64
3.创建目录和数据目录,分别如下
脚本运行目录:
/data/dbbackup/
scripts--运行脚本放的地方(mysqlback.sh,config.env),脚本见5
log--运行后产生日志
lsn--存放最新LSN数据,每次备份从这里加载,备份后更新这个目录数据
数据目录:
/udisk/dbbackup
full--全量备份,保留最新一份
incr--增量备份,保留最新一份
4.恢复
全量备份和解压
1.全量备份(480G大约1个半小时完成,压缩后大概80G):
/data/dbbackup/percona-xtrabackup-2.4.12-Linux-x86_64/bin/innobackupex --defaults-file=/etc/my.cnf --parallel=4 --host=172.16.0.6 --port=5688 --user=root --password=Yzf270701# --databases=yzf_task --stream=xbstream --compress --extra-lsndir=/data/dbbackup/lsn /udisk/dbbackup/data 2>/data/dbbackup/log/task.log | pigz -6 -p 4 > /udisk/dbbackup/data/full_yzf_task.xbstream.gz
2. 全量解压(40G压缩文件,解压需要45分钟):
#pigz解压:40G 10min:
pigz -d -p 50 full_yzf_task.xbstream.gz
#xbstream解压 40G 10min:
/data/dbbackup/percona-xtrabackup-2.4.12-Linux-x86_64/bin/xbstream -x < /udisk/dbbackup/data/full_yzf_task.xbstream -C /udisk/dbbackup/data
#qpress解压,40G 15min:
/data/dbbackup/percona-xtrabackup-2.4.12-Linux-x86_64/bin/xtrabackup --parallel=4 --decompress --target-dir=/udisk/dbbackup/data/full_20181017
增量备份和解压
增量备份1:
/data/dbbackup/percona-xtrabackup-2.4.12-Linux-x86_64/bin/innobackupex --defaults-file=/etc/my.cnf --parallel=4 --host=172.16.0.6 --port=5688 --user=root --password=Yzf270701# --databases=yzf_task --stream=xbstream --compress --incremental --incremental-basedir=/data/dbbackup/lsn --extra-lsndir=/data/dbbackup/lsn /udisk/dbbackup/data 2>/data/dbbackup/log/incre_task.log | pigz -6 -p 4 > /udisk/dbbackup/data/incre_yzf_task.xbstream.gz
增量备份1解压:
pigz -d -p 50 incre_yzf_task.xbstream.gz
/data/dbbackup/percona-xtrabackup-2.4.12-Linux-x86_64/bin/xbstream -x < /udisk/dbbackup/data/incre_yzf_task.xbstream -C /udisk/dbbackup/data/incre_20181017/
/data/dbbackup/percona-xtrabackup-2.4.12-Linux-x86_64/bin/xtrabackup --decompress --target-dir=/udisk/dbbackup/data/incre_20181017
增量备份2:
/data/dbbackup/percona-xtrabackup-2.4.12-Linux-x86_64/bin/innobackupex --defaults-file=/etc/my.cnf --parallel=4 --host=172.16.0.6 --port=5688 --user=root --password=Yzf270701# --databases=yzf_task --stream=xbstream --compress --incremental --incremental-basedir=/data/dbbackup/lsn --extra-lsndir=/data/dbbackup/lsn /udisk/dbbackup/data 2>/data/dbbackup/log/incre2_task.log | pigz -6 -p 4 > /udisk/dbbackup/data/incre2_yzf_task.xbstream.gz
增量备份2解压:
pigz -d -p 50 incre2_yzf_task.xbstream.gz
/data/dbbackup/percona-xtrabackup-2.4.12-Linux-x86_64/bin/xbstream -x < /udisk/dbbackup/data/incre2_yzf_task.xbstream -C /udisk/dbbackup/data/incre2_20181017/
/data/dbbackup/percona-xtrabackup-2.4.12-Linux-x86_64/bin/xtrabackup --decompress --target-dir=/udisk/dbbackup/data/incre2_20181017
恢复:
把全量和增量N次的解压数据合并在一起:
/data/dbbackup/percona-xtrabackup-2.4.12-Linux-x86_64/bin/xtrabackup --prepare --apply-log-only --target-dir=/udisk/dbbackup/data/full_20181017
/data/dbbackup/percona-xtrabackup-2.4.12-Linux-x86_64/bin/xtrabackup --prepare --apply-log-only --target-dir=/udisk/dbbackup/data/full_20181017 --incremental-dir=/udisk/dbbackup/data/incre_20181017
#最后一步直接prepare
/data/dbbackup/percona-xtrabackup-2.4.12-Linux-x86_64/bin/xtrabackup --prepare --target-dir=/udisk/dbbackup/data/full_20181017 --incremental-dir=/udisk/dbbackup/data/incre2_20181017
后续步骤
1.停止mysql
service mysqld stop
#移走mysql目录
mv mysql mysql_bak_test
#创建mysql目录,如果有bin log数据,请在恢复后从对应目录拷贝过来比如 cp -r mysql_bak_test/data mysql/data
mkdir -p /data/mysql
2.执行恢复:
/data/dbbackup/percona-xtrabackup-2.4.12-Linux-x86_64/bin/xtrabackup --defaults-file=/etc/my.cnf --copy-back --target-dir=/udisk/dbbackup/data/full_20181017
3.修改权限:
chown -R mysql:mysql mysql
4.启动数据库:
service mysqld start
5.脚本:
config.env:
#重点关注下面参数
HOST=10.10.180.4
PORT=5688
USER=xxx
PASSWD=xxx
DB="xxx"
PROGRAMDIR=/data/dbbackup #程序运行目录
BACKUPDIR=/udisk/dbbackup # 数据备份的主目录
SCPDIR="root@10.19.83.66:/data/backup/mysql/" #备份的目录
SCPREPORT="root@10.19.83.66:/data/sre_report/daily/" #日志存放目录
#这些基本不用改
CNF_FILE="/etc/my.cnf"
XTRABACKUP=$PROGRAMDIR/percona-xtrabackup-2.4.12-Linux-x86_64/bin/xtrabackup
INNOBACKUP=$PROGRAMDIR/percona-xtrabackup-2.4.12-Linux-x86_64/bin/innobackupex
mysqlback.sh:
#!/bin/sh
#@author: guoyouchun 2018-10-16
# Mysql backup via xtrabackup
#请安装 yum install pigz
#请安装 percona-xtrabackup-2.4.12-Linux-x86_64
#set -x
source ./config.env
TODAY=`date +%Y%m%d%H%M%S`
YESTERDAY=`date -d"yesterday" +%Y%m%d%H%M%S`
USEROPTIONS="--host=$HOST --port=$PORT --user=$USER --password=$PASSWD"
FULLBACKUPDIR=$BACKUPDIR/full # 全库备份的目录
INCRBACKUPDIR=$BACKUPDIR/incr # 增量备份的目录
LSNDIR=$PROGRAMDIR/lsn #最新lsn目录
LOGFILE=$PROGRAMDIR/log/mysql_backup_$TODAY.log
TMPFILE=$PROGRAMDIR/tmp.log
#############################################################################
# Display error message and exit
# log()
#############################################################################
log_err()
{
time=`date +%Y%m%d%H%M%S`
echo "$time:error:$1" >> $LOGFILE
exit 1
}
log_info()
{
time=`date +%Y%m%d%H%M%S`
echo "$time:info:$1" >> $LOGFILE
}
#删除30天前的历史日志
clean()
{
if [ -d $PROGRAMDIR/log/ ];then
find $PROGRAMDIR/log/ -name "mysql_backup_*.log" -mtime +30 |xargs rm -rf
fi
if [ -d $FULLBACKUPDIR ];then
find $FULLBACKUPDIR -name "FULL.*.gz" -mtime +7 |xargs rm -rf
fi
if [ -d $INCRBACKUPDIR ];then
find $INCRBACKUPDIR -name "INCR.*.gz" -mtime +2 |xargs rm -rf
fi
}
check()
{
# Check options before proceeding
if [ ! -x $XTRABACKUP ]; then
log_err "$XTRABACKUP does not exist."
fi
if [ ! -x $INNOBACKUP ]; then
log_err "$INNOBACKUP does not exist."
fi
if [ ! -d $BACKUPDIR ]; then
log_err "$BACKUPDIR does not exist."
fi
if ! `echo 'exit' | mysql -s $USEROPTIONS` ; then
log_err " Supplied mysql username or password appears to be incorrect (not copied here for security, see script)."
fi
}
fullback()
{
# Create full and incr backup directories if they not exist.
for i in $FULLBACKUPDIR $INCRBACKUPDIR $XTRABACKUP_BASE/log/
do
if [ ! -d $i ]; then
mkdir -pv $i
fi
done
check
begin_time=$(date +%s)
#开始备份,并行度4 --databases=$DB暂时不用
name=$FULLBACKUPDIR/FULL.$HOST.$DB.$TODAY.xbstream.gz
$INNOBACKUP --defaults-file=$CNF_FILE --backup --parallel=4 $USEROPTIONS --stream=xbstream --compress --extra-lsndir=$LSNDIR $FULLBACKUPDIR 2>$TMPFILE | pigz -6 -p 4 > $name
end_time=$(date +%s)
#花费时间
cost_time1=$(($end_time-$begin_time))
cur_time=`date +%Y%m%d%H%M%S`
if [ -z "`tail -1 $TMPFILE | grep 'completed OK!'`" ] ; then
log_info "Host:$HOST,Operation_time:$cur_time,Operation_objects:cost[$cost_time1 s] $name,Status:FAIL,Operation_type:backup.db,Detail_log:Backup to local Failed,check $TMPFILE"
else
log_info "Host:$HOST,Operation_time:$cur_time,Operation_objects: cost[$cost_time1 s] $name,Status:OK,Operation_type:backup.db,Detail_log:Backup to local Success"
scp $name $SCPDIR
if [ $? = 0 ]; then
cur_time=`date +%Y%m%d%H%M%S`
log_info "Host:$HOST,Operation_time:$cur_time,Operation_objects: $name,Status:OK,Operation_type:backup.upload,Detail_log:Upload to Remote $SCPDIR Success"
else
cur_time=`date +%Y%m%d%H%M%S`
log_info "Host:$HOST,Operation_time:$cur_time,Operation_objects: cost[$cost_time1 s] $name,Status:FAILED,Operation_type:backup.db,Detail_log:Upload to Remote $SCPDIR Failed"
fi
fi
#上传日志到报表系统
scp $LOGFILE $SCPREPORT
clean
}
incrback()
{
# Create full and incr backup directories if they not exist.
for i in $FULLBACKUPDIR $INCRBACKUPDIR $XTRABACKUP_BASE/log/
do
if [ ! -d $i ]; then
mkdir -pv $i
fi
done
begin_time=$(date +%s)
#开始备份,并行度4
name=$INCRBACKUPDIR/INCR.$HOST.$DB.$TODAY.xbstream.gz
$INNOBACKUP --defaults-file=$CNF_FILE --backup --parallel=4 $USEROPTIONS --stream=xbstream --compress --incremental --incremental-basedir=$LSNDIR --extra-lsndir=$LSNDIR $INCRBACKUPDIR 2>$TMPFILE |pigz -6 -p 4 > $name
end_time=$(date +%s)
#花费时间
cost_time1=$(($end_time-$begin_time))
cur_time=`date +%Y%m%d%H%M%S`
if [ -z "`tail -1 $TMPFILE | grep 'completed OK!'`" ] ; then
log_info "Host:$HOST,Operation_time:$cur_time,Operation_objects:cost[$cost_time1 s] $name,Status:FAIL,Operation_type:backup.db,Detail_log:Backup to local Failed,check $TMPFILE"
else
log_info "Host:$HOST,Operation_time:$cur_time,Operation_objects: cost[$cost_time1 s] $name,Status:OK,Operation_type:backup.db,Detail_log:Backup to local Success"
scp $name $SCPDIR
if [ $? = 0 ]; then
cur_time=`date +%Y%m%d%H%M%S`
log_info "Host:$HOST,Operation_time:$cur_time,Operation_objects: $name,Status:OK,Operation_type:backup.upload,Detail_log:Upload to Remote $SCPDIR Success"
else
cur_time=`date +%Y%m%d%H%M%S`
log_info "Host:$HOST,Operation_time:$cur_time,Operation_objects: cost[$cost_time1 s] $name,Status:FAILED,Operation_type:backup.db,Detail_log:Upload to Remote $SCPDIR Failed"
fi
fi
#上传日志到报表系统
scp $LOGFILE $SCPREPORT
clean
}
usage="Usage: `basename $0` full|incremental"
command=$1
#1全量,其他增量
case $command in
(full)
fullback
;;
(incremental)
incrback
;;
(*)
echo "Error command"
echo "$usage"
;;
esac