Debezium系列之:源码阅读之SnapshotReader
一、Mysql事务隔离级别
- 数据库事务隔离级别由低到高:Read uncommitted、Read committed、Repeatable read和Serializable四种
- mysql默认的事务隔离级别:Repeatable read
- Repeatable read重复读:可以避免脏读、不可重复读,但可能出现幻读。
- 事务隔离级别为可重复读时,如果检索条件有索引(包括主键索引)的时候,默认加锁方式是next-key 锁;如果检索条件没有索引,更新数据时会锁住整张表。一个间隙被事务加了锁,其他事务是不能在这个间隙插入记录的,这样可以防止幻读。
脏读:
- 两个事物A和B,A读取了已经被B更新但还没有被提交的数据。B回滚事物,A读取的数据就是脏数据。
不可重复读:
- 两个事物A和B,A多次读取同一数据,B在A多次读取的过程中对数据作了修改并提交,导致A多次读取同一数据时,结果不一致。
幻读:
- 两个事物A和B,A从一个表中读取了数据,然后B在该表中插入一些新数据,导致A再次读区同一个表,就会多处几行。
- 多次执行同样的读取操作,读到的结果集不一致。而 Next-key 通过对范围加锁就可以解决这个问题
二、设置数据库隔离级别
final long snapshotLockTimeout = context.getConnectorConfig().snapshotLockTimeout().getSeconds();
logger.info("Step 0: disabling autocommit, enabling repeatable read transactions, and setting lock wait timeout to {}",
snapshotLockTimeout);
mysql.setAutoCommit(false);
sql.set("SET TRANSACTION ISOLATION LEVEL REPEATABLE READ");
mysql.executeWithoutCommitting(sql.get());
sql.set("SET SESSION lock_wait_timeout=" + snapshotLockTimeout);
mysql.executeWithoutCommitting(sql.get());
try {
sql.set("SET SESSION innodb_lock_wait_timeout=" + snapshotLockTimeout);
mysql.executeWithoutCommitting(sql.get());
}
catch (SQLException e) {
logger.warn("Unable to set innodb_lock_wait_timeout", e);
}
三、设置数据库全局锁
if (!snapshotLockingMode.equals(MySqlConnectorConfig.SnapshotLockingMode.NONE) && useGlobalLock) {
try {
logger.info("Step 1: flush and obtain global read lock to prevent writes to database");
sql.set(snapshotLockingMode.getLockStatement());
mysql.executeWithoutCommitting(sql.get());
lockAcquired = clock.currentTimeInMillis();
metrics.globalLockAcquired();
isLocked = true;
}
catch (SQLException e) {
logger.info("Step 1: unable to flush and acquire global read lock, will use table read locks after reading table names");
// Continue anyway, since RDS (among others) don't allow setting a global lock
assert !isLocked;
}
// FLUSH TABLES resets TX and isolation level
sql.set("SET TRANSACTION ISOLATION LEVEL REPEATABLE READ");
mysql.executeWithoutCommitting(sql.get());
}
四、启动事务一致性读
logger.info("Step 2: start transaction with consistent snapshot");
sql.set("START TRANSACTION WITH CONSISTENT SNAPSHOT");
mysql.executeWithoutCommitting(sql.get());
五、加锁成果,读取binlog文件名和位点
if (isLocked) {
// Obtain the binlog position and update the SourceInfo in the context. This means that all source records
// generated as part of the snapshot will contain the binlog position of the snapshot.
readBinlogPosition(step++, source, mysql, sql);
}
readBinlogPosition代码如下所示:
protected void readBinlogPosition(int step, SourceInfo source, JdbcConnection mysql, AtomicReference<String> sql) throws SQLException {
if (context.isSchemaOnlyRecoverySnapshot()) {
// We are in schema only recovery mode, use the existing binlog position
if (Strings.isNullOrEmpty(source.binlogFilename())) {
// would like to also verify binlog position exists, but it defaults to 0 which is technically valid
throw new IllegalStateException("Could not find existing binlog information while attempting schema only recovery snapshot");
}
source.startSnapshot();
}
else {
logger.info("Step {}: read binlog position of MySQL primary server", step);
String showMasterStmt = "SHOW MASTER STATUS";
sql.set(showMasterStmt);
mysql.query(sql.get(), rs -> {
if (rs.next()) {
String binlogFilename = rs.getString(1);
long binlogPosition = rs.getLong(2);
source.setBinlogStartPoint(binlogFilename, binlogPosition);
if (rs.getMetaData().getColumnCount() > 4) {
// This column exists only in MySQL 5.6.5 or later ...
String gtidSet = rs.getString(5); // GTID set, may be null, blank, or contain a GTID set
source.setCompletedGtidSet(gtidSet);
logger.info("\t using binlog '{}' at position '{}' and gtid '{}'", binlogFilename, binlogPosition,
gtidSet);
}
else {
logger.info("\t using binlog '{}' at position '{}'", binlogFilename, binlogPosition);
}
source.startSnapshot();
}
else {
throw new IllegalStateException("Cannot read the binlog filename and position via '" + showMasterStmt
+ "'. Make sure your server is correctly configured");
}
});
}
}
六、获取快照表
logger.info("Step {}: read list of available tables in each database", step++);
List<TableId> knownTableIds = new ArrayList<>();
final List<TableId> capturedTableIds = new ArrayList<>();
final Filters createTableFilters = getCreateTableFilters(filters);
final Map<String, List<TableId>> createTablesMap = new HashMap<>();
final Set<String> readableDatabaseNames = new HashSet<>();
for (String dbName : databaseNames) {
try {
// MySQL sometimes considers some local files as databases (see DBZ-164),
// so we will simply try each one and ignore the problematic ones ...
sql.set("SHOW FULL TABLES IN " + quote(dbName) + " where Table_Type = 'BASE TABLE'");
mysql.query(sql.get(), rs -> {
while (rs.next() && isRunning()) {
TableId id = new TableId(dbName, null, rs.getString(1));
final boolean shouldRecordTableSchema = shouldRecordTableSchema(schema, filters, id);
// Apply only when the table include list is not dynamically reconfigured
if ((createTableFilters == filters && shouldRecordTableSchema) || createTableFilters.tableFilter().test(id)) {
createTablesMap.computeIfAbsent(dbName, k -> new ArrayList<>()).add(id);
}
if (shouldRecordTableSchema) {
knownTableIds.add(id);
logger.info("\t including '{}' among known tables", id);
}
else {
logger.debug("\t '{}' is not added among known tables", id);
}
if (filters.tableFilter().and(isAllowedForSnapshot).test(id)) {
capturedTableIds.add(id);
logger.info("\t including '{}' for further processing", id);
}
else {
logger.debug("\t '{}' is filtered out of capturing", id);
}
}
});
readableDatabaseNames.add(dbName);
}
catch (SQLException e) {
// We were unable to execute the query or process the results, so skip this ...
logger.warn("\t skipping database '{}' due to error reading tables: {}", dbName, e.getMessage());
}
}
七、表级加锁
数据库全局加锁失败,则对每个表加表级别锁,再调用readBinlogPosition方法
if (!isLocked) {
if (!snapshotLockingMode.equals(MySqlConnectorConfig.SnapshotLockingMode.NONE)) {
// ------------------------------------
// LOCK TABLES and READ BINLOG POSITION
// ------------------------------------
// We were not able to acquire the global read lock, so instead we have to obtain a read lock on each table.
// This requires different privileges than normal, and also means we can't unlock the tables without
// implicitly committing our transaction ...
if (!connectionContext.userHasPrivileges("LOCK TABLES")) {
// We don't have the right privileges
throw new ConnectException("User does not have the 'LOCK TABLES' privilege required to obtain a "
+ "consistent snapshot by preventing concurrent writes to tables.");
}
// We have the required privileges, so try to lock all of the tables we're interested in ...
logger.info("Step {}: flush and obtain read lock for {} tables (preventing writes)", step++, knownTableIds.size());
lockedTables = new HashSet<>(capturedTableIds);
String tableList = capturedTableIds.stream()
.map(tid -> quote(tid))
.reduce((r, element) -> r + "," + element)
.orElse(null);
if (tableList != null) {
sql.set("FLUSH TABLES " + tableList + " WITH READ LOCK");
mysql.executeWithoutCommitting(sql.get());
}
lockAcquired = clock.currentTimeInMillis();
metrics.globalLockAcquired();
isLocked = true;
tableLocks = true;
}
// Our tables are locked, so read the binlog position ...
readBinlogPosition(step++, source, mysql, sql);
}
八、获取表结构
try {
logger.info("Step {}: generating DROP and CREATE statements to reflect current database schemas:", step++);
schema.applyDdl(source, null, setSystemVariablesStatement, this::enqueueSchemaChanges);
// Add DROP TABLE statements for all tables that we knew about AND those tables found in the databases ...
knownTableIds.stream()
.filter(id -> isRunning()) // ignore all subsequent tables if this reader is stopped
.forEach(tableId -> schema.applyDdl(source, tableId.catalog(),
"DROP TABLE IF EXISTS " + quote(tableId),
this::enqueueSchemaChanges));
// Add a DROP DATABASE statement for each database that we no longer know about ...
schema.tableIds().stream().map(TableId::catalog)
.filter(Predicates.not(readableDatabaseNames::contains))
.filter(id -> isRunning()) // ignore all subsequent tables if this reader is stopped
.forEach(missingDbName -> schema.applyDdl(source, missingDbName,
"DROP DATABASE IF EXISTS " + quote(missingDbName),
this::enqueueSchemaChanges));
final Map<String, DatabaseLocales> databaseCharsets = connectionContext.readDatabaseCollations();
// Now process all of our tables for each database ...
for (Map.Entry<String, List<TableId>> entry : createTablesMap.entrySet()) {
if (!isRunning()) {
break;
}
String dbName = entry.getKey();
// First drop, create, and then use the named database ...
schema.applyDdl(source, dbName, "DROP DATABASE IF EXISTS " + quote(dbName), this::enqueueSchemaChanges);
final StringBuilder createDatabaseDddl = new StringBuilder("CREATE DATABASE " + quote(dbName));
final DatabaseLocales defaultDatabaseLocales = databaseCharsets.get(dbName);
if (defaultDatabaseLocales != null) {
defaultDatabaseLocales.appendToDdlStatement(dbName, createDatabaseDddl);
}
schema.applyDdl(source, dbName, createDatabaseDddl.toString(), this::enqueueSchemaChanges);
schema.applyDdl(source, dbName, "USE " + quote(dbName), this::enqueueSchemaChanges);
for (TableId tableId : entry.getValue()) {
if (!isRunning()) {
break;
}
// This is to handle situation when global read lock is unavailable and tables are locked instead of it.
// MySQL forbids access to an unlocked table when there is at least one lock held on another table.
// Thus when we need to obtain schema even for non-monitored tables (which are not locked as we might not have access privileges)
// we need to do it after the tables are unlocked
if (lockedTables.isEmpty() || lockedTables.contains(tableId)) {
readTableSchema(sql, mysql, schema, source, dbName, tableId);
}
else {
tablesToSnapshotSchemaAfterUnlock.add(tableId);
}
}
}
context.makeRecord().regenerate();
}
// most likely, something went wrong while writing the history topic
catch (Exception e) {
interrupted.set(true);
throw e;
}
得到每张表的表结构,如下所示:
private void readTableSchema(final AtomicReference<String> sql, final JdbcConnection mysql,
final MySqlSchema schema, final SourceInfo source, String dbName, TableId tableId)
throws SQLException {
sql.set("SHOW CREATE TABLE " + quote(tableId));
mysql.query(sql.get(), rs -> {
if (rs.next()) {
schema.applyDdl(source, dbName, rs.getString(2), this::enqueueSchemaChanges);
}
});
}
九、数据库解锁
if (snapshotLockingMode.usesMinimalLocking() && isLocked) {
if (tableLocks) {
// We could not acquire a global read lock and instead had to obtain individual table-level read locks
// using 'FLUSH TABLE <tableName> WITH READ LOCK'. However, if we were to do this, the 'UNLOCK TABLES'
// would implicitly commit our active transaction, and this would break our consistent snapshot logic.
// Therefore, we cannot unlock the tables here!
// https://dev.mysql.com/doc/refman/5.7/en/flush.html
logger.info("Step {}: tables were locked explicitly, but to get a consistent snapshot we cannot "
+ "release the locks until we've read all tables.", step++);
}
else {
// We are doing minimal blocking via a global read lock, so we should release the global read lock now.
// All subsequent SELECT should still use the MVCC snapshot obtained when we started our transaction
// (since we started it "...with consistent snapshot"). So, since we're only doing very simple SELECT
// without WHERE predicates, we can release the lock now ...
logger.info("Step {}: releasing global read lock to enable MySQL writes", step);
sql.set("UNLOCK TABLES");
mysql.executeWithoutCommitting(sql.get());
isLocked = false;
long lockReleased = clock.currentTimeInMillis();
metrics.globalLockReleased();
logger.info("Step {}: blocked writes to MySQL for a total of {}", step++,
Strings.duration(lockReleased - lockAcquired));
}
}
十、获取表的数据
获取表的数据量
SHOW TABLE STATUS LIKE 'retail_order_detail'
if (!isRunning()) {
return;
}
if (includeData) {
BufferedBlockingConsumer<SourceRecord> bufferedRecordQueue = BufferedBlockingConsumer.bufferLast(super::enqueueRecord);
// Dump all of the tables and generate source records ...
logger.info("Step {}: scanning contents of {} tables while still in transaction", step, capturedTableIds.size());
metrics.monitoredDataCollectionsDetermined(partition, capturedTableIds);
long startScan = clock.currentTimeInMillis();
AtomicLong totalRowCount = new AtomicLong();
int counter = 0;
int completedCounter = 0;
long largeTableCount = context.rowCountForLargeTable();
Iterator<TableId> tableIdIter = capturedTableIds.iterator();
while (tableIdIter.hasNext()) {
TableId tableId = tableIdIter.next();
AtomicLong rowNum = new AtomicLong();
if (!isRunning()) {
break;
}
// Obtain a record maker for this table, which knows about the schema ...
RecordsForTable recordMaker = context.makeRecord().forTable(tableId, null, bufferedRecordQueue);
if (recordMaker != null) {
// Switch to the table's database ...
sql.set("USE " + quote(tableId.catalog()) + ";");
mysql.executeWithoutCommitting(sql.get());
AtomicLong numRows = new AtomicLong(-1);
AtomicReference<String> rowCountStr = new AtomicReference<>("<unknown>");
StatementFactory statementFactory = this::createStatementWithLargeResultSet;
if (largeTableCount > 0) {
try {
// Choose how we create statements based on the # of rows.
// This is approximate and less accurate then COUNT(*),
// but far more efficient for large InnoDB tables.
sql.set("SHOW TABLE STATUS LIKE '" + tableId.table() + "';");
mysql.query(sql.get(), rs -> {
if (rs.next()) {
numRows.set(rs.getLong(5));
}
});
if (numRows.get() <= largeTableCount) {
statementFactory = this::createStatement;
}
rowCountStr.set(numRows.toString());
}
catch (SQLException e) {
// Log it, but otherwise just use large result set by default ...
logger.debug("Error while getting number of rows in table {}: {}", tableId, e.getMessage(), e);
}
}
// Scan the rows in the table ...
long start = clock.currentTimeInMillis();
logger.info("Step {}: - scanning table '{}' ({} of {} tables)", step, tableId, ++counter, capturedTableIds.size());
Map<TableId, String> selectOverrides = context.getConnectorConfig().getSnapshotSelectOverridesByTable();
String selectStatement = selectOverrides.getOrDefault(tableId, "SELECT * FROM " + quote(tableId));
logger.info("For table '{}' using select statement: '{}'", tableId, selectStatement);
sql.set(selectStatement);
try {
int stepNum = step;
mysql.query(sql.get(), statementFactory, rs -> {
try {
// The table is included in the connector's filters, so process all of the table records
// ...
final Table table = schema.tableFor(tableId);
final int numColumns = table.columns().size();
final Object[] row = new Object[numColumns];
while (rs.next()) {
for (int i = 0, j = 1; i != numColumns; ++i, ++j) {
Column actualColumn = table.columns().get(i);
row[i] = mysqlFieldReader.readField(rs, j, actualColumn, table);
}
recorder.recordRow(recordMaker, row, clock.currentTimeAsInstant()); // has no row number!
rowNum.incrementAndGet();
if (rowNum.get() % 100 == 0 && !isRunning()) {
// We've stopped running ...
break;
}
if (rowNum.get() % 10_000 == 0) {
if (logger.isInfoEnabled()) {
long stop = clock.currentTimeInMillis();
logger.info("Step {}: - {} of {} rows scanned from table '{}' after {}",
stepNum, rowNum, rowCountStr, tableId, Strings.duration(stop - start));
}
metrics.rowsScanned(partition, tableId, rowNum.get());
}
}
totalRowCount.addAndGet(rowNum.get());
if (isRunning()) {
if (logger.isInfoEnabled()) {
long stop = clock.currentTimeInMillis();
logger.info("Step {}: - Completed scanning a total of {} rows from table '{}' after {}",
stepNum, rowNum, tableId, Strings.duration(stop - start));
}
metrics.rowsScanned(partition, tableId, rowNum.get());
}
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
// We were not able to finish all rows in all tables ...
logger.info("Step {}: Stopping the snapshot due to thread interruption", stepNum);
interrupted.set(true);
}
});
}
finally {
metrics.dataCollectionSnapshotCompleted(partition, tableId, rowNum.get());
if (interrupted.get()) {
break;
}
}
}
++completedCounter;
}
// See if we've been stopped or interrupted ...
if (!isRunning() || interrupted.get()) {
return;
}
// We've copied all of the tables and we've not yet been stopped, but our buffer holds onto the
// very last record. First mark the snapshot as complete and then apply the updated offset to
// the buffered record ...
source.markLastSnapshot(configuration);
long stop = clock.currentTimeInMillis();
try {
bufferedRecordQueue.close(this::replaceOffsetAndSource);
if (logger.isInfoEnabled()) {
logger.info("Step {}: scanned {} rows in {} tables in {}",
step, totalRowCount, capturedTableIds.size(), Strings.duration(stop - startScan));
}
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
// We were not able to finish all rows in all tables ...
if (logger.isInfoEnabled()) {
logger.info("Step {}: aborting the snapshot after {} rows in {} of {} tables {}",
step, totalRowCount, completedCounter, capturedTableIds.size(), Strings.duration(stop - startScan));
}
interrupted.set(true);
}
}
else {
logger.info("Step {}: encountered only schema based snapshot, skipping data snapshot", step);
}
step++;
}
十一、提交或者roll back 事物
finally {
// No matter what, we always want to do these steps if necessary ...
boolean rolledBack = false;
// ------
// STEP 9
// ------
// Either commit or roll back the transaction, BEFORE releasing the locks ...
if (isTxnStarted) {
if (interrupted.get() || !isRunning()) {
// We were interrupted or were stopped while reading the tables,
// so roll back the transaction and return immediately ...
logger.info("Step {}: rolling back transaction after abort", step++);
mysql.connection().rollback();
metrics.snapshotAborted(partition);
rolledBack = true;
}
else {
// Otherwise, commit our transaction
logger.info("Step {}: committing transaction", step++);
mysql.connection().commit();
metrics.snapshotCompleted(partition);
}
}
else {
// Always clean up TX resources even if no changes might be done
mysql.connection().rollback();
}
十二、释放表级别的锁
if (isLocked && !rolledBack) {
if (tableLocks) {
logger.info("Step {}: releasing table read locks to enable MySQL writes", step++);
}
else {
logger.info("Step {}: releasing global read lock to enable MySQL writes", step++);
}
sql.set("UNLOCK TABLES");
mysql.executeWithoutCommitting(sql.get());
isLocked = false;
long lockReleased = clock.currentTimeInMillis();
metrics.globalLockReleased();
if (logger.isInfoEnabled()) {
if (tableLocks) {
logger.info("Writes to MySQL prevented for a total of {}", Strings.duration(lockReleased - lockAcquired));
}
else {
logger.info("Writes to MySQL tables prevented for a total of {}", Strings.duration(lockReleased - lockAcquired));
}
}
if (!tablesToSnapshotSchemaAfterUnlock.isEmpty()) {
logger.info("Step {}: reading table schema for non-whitelisted tables", step++);
for (TableId tableId : tablesToSnapshotSchemaAfterUnlock) {
if (!isRunning()) {
break;
}
readTableSchema(sql, mysql, schema, source, tableId.catalog(), tableId);
}
}
}
}