为什么mysqldumper mydumper能实现一致性备份?--源码分析

为什么mysqldumper mydumper能实现一致性备份?–源码分析

mydmper是一个针对mysql和tidb的高性能多线程备份与恢复工具,其开发人员来自mysql等。

mydumper具有以下特征

采用轻量c语言编写 #include <glib.h>
执行速度比mysqldumper快十倍(参看资料)
执行innodb表和非innodb表(是否支持事务的表都可以。不支持事务的表加表锁,innodb表开启一个事务设为RR级别)
多线程备份
多线程恢复
基于 GNU General Public Lv3协议,也就是说是开源的

原理
备份过程由一个主线程和若干worker线程。

  • 主线程对备份实现加读锁 FTWRL(flush tables with read lock),阻塞dml,以建立一执行的数据快照点。记录备份点的binlog信息。
  • 建立woker线程(worker 线程的个数通过- -thread 或者 - t参数决定),初始化备份任务队列,并向队列中推送数据库元数据(schema),执行non innodb表和innodb表的备份任务。
  • worker负责将备份任务队列中的任务按照顺序取出并完成备份(由单独的函数完成),有一个job列表和type,顺序为遍历任务列表的顺序
  • worker线程拿到备份任务后,分别与建立备份实例链接,将session事务隔离级别改为RR,用于获取一执行的时间点的数据。
  • 在主线程仍持有全局读锁时开启事务快照读,实现读到的一致性的数据与主线程的时间点相同,实现备份数据的一致性。
  • 按顺序从备份队列中取出备份任务,先进行non innodb表的备份,在进行innodb表备份。这样可以在完成innodb表备份后通知主线程释放读锁,尽可能减少备份对业务的影响。
    (划重点)
    为啥能保证备份的一致性?

主线程获取备份的mededata,执行show master status,show slave status的时刻是持有全局的s锁,这个时候worker线程执行了start trx,并设置session隔离级别为RR,就保证了worker线程与主线程事务的一致性。

这里很巧妙的运用了事务隔离级别和并发查询。

(具体的见下方我添加的代码备注)

知道了整体的备份思路和设计原理,具体的备份过程是怎样的呢?

从代码中不难发现,mydumper的记录级备份由主线程负责任务拆分,(获取所有的job),由多个worker线程完成正式的备份任务。

主线程将表分为多个chunk(根据记录数量),每个chunk作为一个备份的job。

表的数据拆分方式如下:

在这里插入图片描述

在确定chunk之后,先获取该字段的最大最小值,再通过explain select field from db.table来估计该表的记录数,最后根据所设置的每个任务的记录数将该表分为多个chunk(生成多个job)

==>以上实现了基于单表的不同range的并行复制。

为了快速完成备份,尽可能减少对表的锁定时间。(non innodb表影响更大。)

代码中实现如下:

在这里插入图片描述

最后,将备注后的源代码粘贴到这里:

/*
    This program is free software: you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation, either version 3 of the License, or
    (at your option) any later version.

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program.  If not, see <http://www.gnu.org/licenses/>.

        Authors: 	Domas Mituzas, Facebook ( domas at fb dot com )
                    Mark Leith, Oracle Corporation (mark dot leith at oracle dot com)
                    Andrew Hutchings, SkySQL (andrew at skysql dot com)
                    Max Bubenick, Percona RDBA (max dot bubenick at percona dot com)
*/

#define _LARGEFILE64_SOURCE
#define _FILE_OFFSET_BITS 64

#include <mysql.h>

#if defined MARIADB_CLIENT_VERSION_STR && !defined MYSQL_SERVER_VERSION
#define MYSQL_SERVER_VERSION MARIADB_CLIENT_VERSION_STR
#endif

#include <unistd.h>
#include <stdio.h>
#include <string.h>
#include <glib.h>
#include <stdlib.h>
#include <stdarg.h>
#include <errno.h>
#include <time.h>
#include <zlib.h>
#include <pcre.h>
#include <signal.h>
#include <glib/gstdio.h>
#include "config.h"
#ifdef WITH_BINLOG
#include "binlog.h"
#else
#include "mydumper.h"
#endif
#include "server_detect.h"
#include "connection.h"
#include "common.h"
#include "g_unix_signal.h"
#include <math.h>
#include "getPassword.h"
#include "logging.h"
#include "set_verbose.h"

char *regexstring = NULL;          //char *字符型指针,指向一个字符;

const char DIRECTORY[] = "export";
#ifdef WITH_BINLOG
const char BINLOG_DIRECTORY[] = "binlog_snapshot";
const char DAEMON_BINLOGS[] = "binlogs";
#endif

/* Some earlier versions of MySQL do not yet define MYSQL_TYPE_JSON */
#ifndef MYSQL_TYPE_JSON
#define MYSQL_TYPE_JSON 245
#endif

static GMutex *init_mutex = NULL;
//glib里_GMutex类型
//不透明数据类型隐藏了它们内部格式或结构。在C语言中,它们就像黑盒一样。支持它们的语言不是很多。

/* Program options */
gchar *output_directory = NULL;
guint statement_size = 1000000;
guint rows_per_file = 0;
guint chunk_filesize = 0;
int longquery = 60;
int longquery_retries = 0;
int longquery_retry_interval = 60;
int build_empty_files = 0;
int skip_tz = 0;
int need_dummy_read = 0;
int need_dummy_toku_read = 0;
int compress_output = 0;
int killqueries = 0;
int detected_server = 0;
int lock_all_tables = 0;
guint snapshot_interval = 60;
gboolean daemon_mode = FALSE;
gboolean have_snapshot_cloning = FALSE;

gchar *ignore_engines = NULL;
char **ignore = NULL;

gchar *tables_list = NULL;
gchar *tidb_snapshot = NULL;
GSequence *tables_skiplist = NULL;
gchar *tables_skiplist_file = NULL;
char **tables = NULL;
GList *no_updated_tables = NULL;

#ifdef WITH_BINLOG
gboolean need_binlogs = FALSE;
gchar *binlog_directory = NULL;
gchar *daemon_binlog_directory = NULL;
#endif

gboolean no_schemas = FALSE;
gboolean no_data = FALSE;
gboolean no_locks = FALSE;
gboolean dump_triggers = FALSE;
gboolean dump_events = FALSE;
gboolean dump_routines = FALSE;
gboolean no_dump_views = FALSE;
gboolean less_locking = FALSE;
gboolean use_savepoints = FALSE;
gboolean success_on_1146 = FALSE;
gboolean no_backup_locks = FALSE;
gboolean insert_ignore = FALSE;

GList *innodb_tables = NULL;
GMutex *innodb_tables_mutex = NULL;
GList *non_innodb_table = NULL;
GMutex *non_innodb_table_mutex = NULL;
GList *table_schemas = NULL;
GMutex *table_schemas_mutex = NULL;
GList *view_schemas = NULL;
GMutex *view_schemas_mutex = NULL;
GList *schema_post = NULL;
GMutex *schema_post_mutex = NULL;
gint database_counter = 0;
gint non_innodb_table_counter = 0;
gint non_innodb_done = 0;
guint less_locking_threads = 0;
guint updated_since = 0;
guint trx_consistency_only = 0;
guint complete_insert = 0;
gchar *set_names_str=NULL;

// For daemon mode, 0 or 1
guint dump_number = 0;
guint binlog_connect_id = 0;
gboolean shutdown_triggered = FALSE;
GAsyncQueue *start_scheduled_dump;
GMainLoop *m1;
static GCond *ll_cond = NULL;
static GMutex *ll_mutex = NULL;

int errors;

static GOptionEntry entries[] = {
    {"database", 'B', 0, G_OPTION_ARG_STRING, &db, "Database to dump", NULL},
    {"tables-list", 'T', 0, G_OPTION_ARG_STRING, &tables_list,
     "Comma delimited table list to dump (does not exclude regex option)",
     NULL},
    {"omit-from-file", 'O', 0, G_OPTION_ARG_STRING, &tables_skiplist_file,
     "File containing a list of database.table entries to skip, one per line "
     "(skips before applying regex option)",
     NULL},
    {"outputdir", 'o', 0, G_OPTION_ARG_FILENAME, &output_directory,
     "Directory to output files to", NULL},
    {"statement-size", 's', 0, G_OPTION_ARG_INT, &statement_size,
     "Attempted size of INSERT statement in bytes, default 1000000", NULL},
    {"rows", 'r', 0, G_OPTION_ARG_INT, &rows_per_file,
     "Try to split tables into chunks of this many rows. This option turns off "
     "--chunk-filesize",
     NULL},
    {"chunk-filesize", 'F', 0, G_OPTION_ARG_INT, &chunk_filesize,
     "Split tables into chunks of this output file size. This value is in MB",
     NULL},
    {"compress", 'c', 0, G_OPTION_ARG_NONE, &compress_output,
     "Compress output files", NULL},
    {"build-empty-files", 'e', 0, G_OPTION_ARG_NONE, &build_empty_files,
     "Build dump files even if no data available from table", NULL},
    {"regex", 'x', 0, G_OPTION_ARG_STRING, &regexstring,
     "Regular expression for 'db.table' matching", NULL},
    {"ignore-engines", 'i', 0, G_OPTION_ARG_STRING, &ignore_engines,
     "Comma delimited list of storage engines to ignore", NULL},
    {"insert-ignore", 'N', 0, G_OPTION_ARG_NONE, &insert_ignore,
     "Dump rows with INSERT IGNORE", NULL},
    {"no-schemas", 'm', 0, G_OPTION_ARG_NONE, &no_schemas,
     "Do not dump table schemas with the data", NULL},
    {"no-data", 'd', 0, G_OPTION_ARG_NONE, &no_data, "Do not dump table data",
     NULL},
    {"triggers", 'G', 0, G_OPTION_ARG_NONE, &dump_triggers, "Dump triggers",
     NULL},
    {"events", 'E', 0, G_OPTION_ARG_NONE, &dump_events, "Dump events", NULL},
    {"routines", 'R', 0, G_OPTION_ARG_NONE, &dump_routines,
     "Dump stored procedures and functions", NULL},
    {"no-views", 'W', 0, G_OPTION_ARG_NONE, &no_dump_views, "Do not dump VIEWs",
     NULL},
    {"no-locks", 'k', 0, G_OPTION_ARG_NONE, &no_locks,
     "Do not execute the temporary shared read lock.  WARNING: This will cause "
     "inconsistent backups",
     NULL},
    {"no-backup-locks", 0, 0, G_OPTION_ARG_NONE, &no_backup_locks,
     "Do not use Percona backup locks", NULL},
    {"less-locking", 0, 0, G_OPTION_ARG_NONE, &less_locking,
     "Minimize locking time on InnoDB tables.", NULL},
    {"long-query-retries", 0, 0, G_OPTION_ARG_INT, &longquery_retries,
     "Retry checking for long queries, default 0 (do not retry)", NULL},
    {"long-query-retry-interval", 0, 0, G_OPTION_ARG_INT, &longquery_retry_interval,
     "Time to wait before retrying the long query check in seconds, default 60", NULL},
    {"long-query-guard", 'l', 0, G_OPTION_ARG_INT, &longquery,
     "Set long query timer in seconds, default 60", NULL},
    {"kill-long-queries", 'K', 0, G_OPTION_ARG_NONE, &killqueries,
     "Kill long running queries (instead of aborting)", NULL},
#ifdef WITH_BINLOG
    {"binlogs", 'b', 0, G_OPTION_ARG_NONE, &need_binlogs,
     "Get a snapshot of the binary logs as well as dump data", NULL},
#endif
    {"daemon", 'D', 0, G_OPTION_ARG_NONE, &daemon_mode, "Enable daemon mode",
     NULL},
    {"snapshot-interval", 'I', 0, G_OPTION_ARG_INT, &snapshot_interval,
     "Interval between each dump snapshot (in minutes), requires --daemon, "
     "default 60",
     NULL},
    {"logfile", 'L', 0, G_OPTION_ARG_FILENAME, &logfile,
     "Log file name to use, by default stdout is used", NULL},
    {"tz-utc", 0, G_OPTION_FLAG_REVERSE, G_OPTION_ARG_NONE, &skip_tz,
     "SET TIME_ZONE='+00:00' at top of dump to allow dumping of TIMESTAMP data "
     "when a server has data in different time zones or data is being moved "
     "between servers with different time zones, defaults to on use "
     "--skip-tz-utc to disable.",
     NULL},
    {"skip-tz-utc", 0, 0, G_OPTION_ARG_NONE, &skip_tz, "", NULL},
    {"use-savepoints", 0, 0, G_OPTION_ARG_NONE, &use_savepoints,
     "Use savepoints to reduce metadata locking issues, needs SUPER privilege",
     NULL},
    {"success-on-1146", 0, 0, G_OPTION_ARG_NONE, &success_on_1146,
     "Not increment error count and Warning instead of Critical in case of "
     "table doesn't exist",
     NULL},
    {"lock-all-tables", 0, 0, G_OPTION_ARG_NONE, &lock_all_tables,
     "Use LOCK TABLE for all, instead of FTWRL", NULL},
    {"updated-since", 'U', 0, G_OPTION_ARG_INT, &updated_since,
     "Use Update_time to dump only tables updated in the last U days", NULL},
    {"trx-consistency-only", 0, 0, G_OPTION_ARG_NONE, &trx_consistency_only,
     "Transactional consistency only", NULL},
    {"complete-insert", 0, 0, G_OPTION_ARG_NONE, &complete_insert,
     "Use complete INSERT statements that include column names", NULL},
    { "set-names",0, 0, G_OPTION_ARG_STRING, &set_names_str, 
      "Sets the names, use it at your own risk, default binary", NULL },
    {"tidb-snapshot", 'z', 0, G_OPTION_ARG_STRING, &tidb_snapshot,
     "Snapshot to use for TiDB", NULL},
    {NULL, 0, 0, G_OPTION_ARG_NONE, NULL, NULL, NULL}};

struct tm tval;

void dump_schema_data(MYSQL *conn, char *database, char *table, char *filename);
void dump_triggers_data(MYSQL *conn, char *database, char *table,
                        char *filename);
void dump_view_data(MYSQL *conn, char *database, char *table, char *filename,
                    char *filename2);
void dump_schema(MYSQL *conn, char *database, char *table,
                 struct configuration *conf);
void dump_view(char *database, char *table, struct configuration *conf);
void dump_table(MYSQL *conn, char *database, char *table,
                struct configuration *conf, gboolean is_innodb);
void dump_tables(MYSQL *, GList *, struct configuration *);
void dump_schema_post(char *database, struct configuration *conf);
void restore_charset(GString *statement);
void set_charset(GString *statement, char *character_set,
                 char *collation_connection);
void dump_schema_post_data(MYSQL *conn, char *database, char *filename);
guint64 dump_table_data(MYSQL *, FILE *, char *, char *, char *, char *);
void dump_database(char *, struct configuration *);
void dump_database_thread(MYSQL *, char *);
void dump_create_database(char *, struct configuration *);
void dump_create_database_data(MYSQL *, char *, char *);
void get_tables(MYSQL *conn, struct configuration *);
void get_not_updated(MYSQL *conn, FILE *);
GList *get_chunks_for_table(MYSQL *, char *, char *,
                            struct configuration *conf);
guint64 estimate_count(MYSQL *conn, char *database, char *table, char *field,
                       char *from, char *to);
void dump_table_data_file(MYSQL *conn, char *database, char *table, char *where,
                          char *filename);
void create_backup_dir(char *directory);
gboolean write_data(FILE *, GString *);
gboolean check_regex(char *database, char *table);
gboolean check_skiplist(char *database, char *table);
int tables_skiplist_cmp(gconstpointer a, gconstpointer b, gpointer user_data);
void read_tables_skiplist(const gchar *filename);
#ifdef WITH_BINLOG
MYSQL *reconnect_for_binlog(MYSQL *thrconn);
void *binlog_thread(void *data);
#endif
void start_dump(MYSQL *conn);
MYSQL *create_main_connection();
void *exec_thread(void *data);
void write_log_file(const gchar *log_domain, GLogLevelFlags log_level,
                    const gchar *message, gpointer user_data);

gboolean sig_triggered(gpointer user_data) {
  (void)user_data;

  g_message("Shutting down gracefully");
  shutdown_triggered = TRUE;
  g_main_loop_quit(m1);
  return FALSE;
}

void clear_dump_directory() {
  GError *error = NULL;
  char *dump_directory =
      g_strdup_printf("%s/%d", output_directory, dump_number);
  GDir *dir = g_dir_open(dump_directory, 0, &error);

  if (error) {
    g_critical("cannot open directory %s, %s\n", dump_directory,
               error->message);
    errors++;
    return;
  }

  const gchar *filename = NULL;

  while ((filename = g_dir_read_name(dir))) {
    gchar *path = g_build_filename(dump_directory, filename, NULL);
    if (g_unlink(path) == -1) {
      g_critical("error removing file %s (%d)\n", path, errno);
      errors++;
      return;
    }
    g_free(path);
  }

  g_dir_close(dir);
  g_free(dump_directory);
}

gboolean run_snapshot(gpointer *data) {
  (void)data;

  g_async_queue_push(start_scheduled_dump, GINT_TO_POINTER(1));

  return (shutdown_triggered) ? FALSE : TRUE;
}

/* Check database.table string against regular expression */

gboolean check_regex(char *database, char *table) {
  /* This is not going to be used in threads */
  static pcre *re = NULL;
  int rc;
  int ovector[9] = {0};
  const char *error;
  int erroroffset;

  char *p;

  /* Let's compile the RE before we do anything */
  if (!re) {
    re = pcre_compile(regexstring, PCRE_CASELESS | PCRE_MULTILINE, &error,
                      &erroroffset, NULL);
    if (!re) {
      g_critical("Regular expression fail: %s", error);
      exit(EXIT_FAILURE);
    }
  }

  p = g_strdup_printf("%s.%s", database, table);
  rc = pcre_exec(re, NULL, p, strlen(p), 0, 0, ovector, 9);
  g_free(p);

  return (rc > 0) ? TRUE : FALSE;
}

/* Check database.table string against skip list; returns TRUE if found */

gboolean check_skiplist(char *database, char *table) {
  if (g_sequence_lookup(tables_skiplist,
                        g_strdup_printf("%s.%s", database, table),
                        tables_skiplist_cmp, NULL)) {
    return TRUE;
  } else {
    return FALSE;
  };
}

/* Comparison function for skiplist sort and lookup */

int tables_skiplist_cmp(gconstpointer a, gconstpointer b, gpointer user_data) {
  /* Not using user_data, but needed for function prototype, shutting up
   * compiler warnings about unused variable */
  (void)user_data;
  /* Any sorting function would work, as long as its usage is consistent
   * between sort and lookup.  strcmp should be one of the fastest. */
  return strcmp(a, b);
}

/* Read the list of tables to skip from the given filename, and prepares them
 * for future lookups. */

void read_tables_skiplist(const gchar *filename) {
  GIOChannel *tables_skiplist_channel = NULL;
  gchar *buf = NULL;
  GError *error = NULL;
  /* Create skiplist if it does not exist */
  if (!tables_skiplist) {
    tables_skiplist = g_sequence_new(NULL);
  };
  tables_skiplist_channel = g_io_channel_new_file(filename, "r", &error);

  /* Error opening/reading the file? bail out. */
  if (!tables_skiplist_channel) {
    g_critical("cannot read/open file %s, %s\n", filename, error->message);
    errors++;
    return;
  };

  /* Read lines, push them to the list */
  do {
    g_io_channel_read_line(tables_skiplist_channel, &buf, NULL, NULL, NULL);
    if (buf) {
      g_strchomp(buf);
      g_sequence_append(tables_skiplist, buf);
    };
  } while (buf);
  g_io_channel_shutdown(tables_skiplist_channel, FALSE, NULL);
  /* Sort the list, so that lookups work */
  g_sequence_sort(tables_skiplist, tables_skiplist_cmp, NULL);
  g_message("Omit list file contains %d tables to skip\n",
            g_sequence_get_length(tables_skiplist));
  return;
}

/* Write some stuff知识?? we know about snapshot快照, before it changes */
//获取mysql源库的备份时刻的binlog信息并写入到metedata
void write_snapshot_info(MYSQL *conn, FILE *file) {
  MYSQL_RES *master = NULL, *slave = NULL, *mdb = NULL;
  MYSQL_FIELD *fields;
  MYSQL_ROW row;

  char *masterlog = NULL;
  char *masterpos = NULL;
  char *mastergtid = NULL;

  char *connname = NULL;
  char *slavehost = NULL;
  char *slavelog = NULL;
  char *slavepos = NULL;
  char *slavegtid = NULL;
  guint isms;
  guint i;

  mysql_query(conn, "SHOW MASTER STATUS");
  master = mysql_store_result(conn);
  if (master && (row = mysql_fetch_row(master))) {
    masterlog = row[0];
    masterpos = row[1];
    /* Oracle/Percona GTID */
    if (mysql_num_fields(master) == 5) {
      mastergtid = row[4];
    } else {
      /* Let's try with MariaDB 10.x */
      /* Use gtid_binlog_pos due to issue with gtid_current_pos with galera
       * cluster, gtid_binlog_pos works as well with normal mariadb server
       * https://jira.mariadb.org/browse/MDEV-10279 */
      mysql_query(conn, "SELECT @@gtid_binlog_pos");
      mdb = mysql_store_result(conn);
      if (mdb && (row = mysql_fetch_row(mdb))) {
        mastergtid = row[0];
      }
    }
  }

  if (masterlog) {
    fprintf(file, "SHOW MASTER STATUS:\n\tLog: %s\n\tPos: %s\n\tGTID:%s\n\n",
            masterlog, masterpos, mastergtid);
    g_message("Written master status");
//mydumper日志  ** Message: Written master status
  }
  //这里不管源库是否是备库,都会执行

  isms = 0;
  mysql_query(conn, "SELECT @@default_master_connection");
  MYSQL_RES *rest = mysql_store_result(conn);
  if (rest != NULL && mysql_num_rows(rest)) {  
  /*MySQL C API 中的mysql_num_rows() 返回结果集中的行数
  C语言mysql接口学习资料参考
  https://blog.csdn.net/qq_40421919/article/details/93355219*/
    mysql_free_result(rest);
    g_message("Multisource slave detected."); //当前数据库角色为slave,且有多个master
    isms = 1;
  }

  if (isms)
    mysql_query(conn, "SHOW ALL SLAVES STATUS");  //mysql没有这个命令吧
  else
    mysql_query(conn, "SHOW SLAVE STATUS");    //这里应该是mysql ????
//get 返回的每值得到下面几个参数
  slave = mysql_store_result(conn);
  while (slave && (row = mysql_fetch_row(slave))) {
    fields = mysql_fetch_fields(slave);
    for (i = 0; i < mysql_num_fields(slave); i++) {
      if (isms && !strcasecmp("connection_name", fields[i].name))
        connname = row[i];
      if (!strcasecmp("exec_master_log_pos", fields[i].name)) {
        slavepos = row[i];
      } else if (!strcasecmp("relay_master_log_file", fields[i].name)) {
        slavelog = row[i];
      } else if (!strcasecmp("master_host", fields[i].name)) {
        slavehost = row[i];
      } else if (!strcasecmp("Executed_Gtid_Set", fields[i].name) ||
                 !strcasecmp("Gtid_Slave_Pos", fields[i].name)) {
        slavegtid = row[i];
      }
    }
    //如果是slave 也拿到slave到复制info 这个可以在metedata中查看
    if (slavehost) {
      fprintf(file, "SHOW SLAVE STATUS:");
      if (isms)
        fprintf(file, "\n\tConnection name: %s", connname);
      fprintf(file, "\n\tHost: %s\n\tLog: %s\n\tPos: %s\n\tGTID:%s\n\n",
              slavehost, slavelog, slavepos, slavegtid);
      g_message("Written slave status");
    }
  }
//文件落盘
  fflush(file);
  if (master)
    mysql_free_result(master);
  if (slave)
    mysql_free_result(slave);
  if (mdb)
    mysql_free_result(mdb);
}
//创建--thread参数指定个数的thread,并行获取mysql数据
void *process_queue(struct thread_data *td) {
  struct configuration *conf = td->conf;
  // mysql_init is not thread safe, especially in Connector/C
  g_mutex_lock(init_mutex);
  MYSQL *thrconn = mysql_init(NULL);
  g_mutex_unlock(init_mutex);

  configure_connection(thrconn, "mydumper");

  if (!mysql_real_connect(thrconn, hostname, username, password, NULL, port,
                          socket_path, 0)) {
    g_critical("Failed to connect to database: %s", mysql_error(thrconn));
    exit(EXIT_FAILURE);
  } else {
    g_message("Thread %d connected using MySQL connection ID %lu",
              td->thread_id, mysql_thread_id(thrconn));
  }
  //** Message: Thread 11 connected using MySQL connection ID 265
//bool use_savepoints
  if (use_savepoints && mysql_query(thrconn, "SET SQL_LOG_BIN = 0")) {
    g_critical("Failed to disable binlog for the thread: %s",
               mysql_error(thrconn));
    exit(EXIT_FAILURE);
  }
  //以下几步见general log 设置一个比较大的session级别的超时时间,设置RR级别开始快照读。 类似于single transcation
  //这一步是针对mysql的
  if ((detected_server == SERVER_TYPE_MYSQL) &&
      mysql_query(thrconn, "SET SESSION wait_timeout = 2147483")) {
    g_warning("Failed to increase wait_timeout: %s", mysql_error(thrconn));
  }
  if (mysql_query(thrconn,
                  "SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ")) {
    g_critical("Failed to set isolation level: %s", mysql_error(thrconn));
    exit(EXIT_FAILURE);
  }
  if (mysql_query(thrconn,
                  "START TRANSACTION /*!40108 WITH CONSISTENT SNAPSHOT */")) {
    g_critical("Failed to start consistent snapshot: %s", mysql_error(thrconn));
    exit(EXIT_FAILURE);
  }
  if (!skip_tz && mysql_query(thrconn, "/*!40103 SET TIME_ZONE='+00:00' */")) {
    g_critical("Failed to set time zone: %s", mysql_error(thrconn));
  }
//这一部分是针对tidb的
  if (detected_server == SERVER_TYPE_TIDB) {

    // Worker threads must set their tidb_snapshot in order to be safe
    // Because no locking has been used.

    gchar *query =
        g_strdup_printf("SET SESSION tidb_snapshot = '%s'", tidb_snapshot);

    if (mysql_query(thrconn, query)) {
      g_critical("Failed to set tidb_snapshot: %s", mysql_error(thrconn));
      exit(EXIT_FAILURE);
    }
    g_free(query);

    g_message("Thread %d set to tidb_snapshot '%s'", td->thread_id,
              tidb_snapshot);
  }

  /* Unfortunately version before 4.1.8 did not support consistent snapshot 4.1.8之前的版本不支持一致的快照
   * transaction starts, so we cheat */
  if (need_dummy_read) {
    mysql_query(thrconn,
                "SELECT /*!40001 SQL_NO_CACHE */ * FROM mysql.mydumperdummy");
    MYSQL_RES *res = mysql_store_result(thrconn);
    if (res)
      mysql_free_result(res);
  }
  if (need_dummy_toku_read) {
    mysql_query(thrconn,
                "SELECT /*!40001 SQL_NO_CACHE */ * FROM mysql.tokudbdummy");
    MYSQL_RES *res = mysql_store_result(thrconn);
    if (res)
      mysql_free_result(res);
  }
	mysql_query(thrconn, set_names_str);

  g_async_queue_push(conf->ready, GINT_TO_POINTER(1));

  struct job *job = NULL;
  struct table_job *tj = NULL;
  struct dump_database_job *ddj = NULL;
  struct create_database_job *cdj = NULL;
  struct schema_job *sj = NULL;
  struct view_job *vj = NULL;
  struct schema_post_job *sp = NULL;
#ifdef WITH_BINLOG
  struct binlog_job *bj = NULL;
#endif
  /* if less locking we need to wait until that threads finish
      progressively waking up these threads */
  if (less_locking) {
    g_mutex_lock(ll_mutex);

    while (less_locking_threads >= td->thread_id) {
      g_cond_wait(ll_cond, ll_mutex);
    }

    g_mutex_unlock(ll_mutex);
  }

  for (;;) {
//while(true)比for( ; ; )多了几步操作,先将括号中的结果给寄存器,然后寄存进行比较,若比较结果为0则跳转到循环的结束执行其他操作
//理解为where true
    GTimeVal tv;
//GTimeVal *end_time) { gpointer retval; //判断是否有数据在队列中,如果没有就要执行if语句相应的睡眠等待,直到被写进程唤醒
    g_get_current_time(&tv);
    g_time_val_add(&tv, 
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值