相关内容
《Mysql 数据库开启 binlog》:https://blog.csdn.net/qq_23845083/article/details/131833331
《Canal下载、部署和入门(详细)》:https://blog.csdn.net/qq_23845083/article/details/131834011
1 相关应用版本
- canal 1.5
- common-dbutils 1.7
- mysql 8
2 准备工作
2.1 pom.xml文件引入相关 jar 包
<!-- canal客户端 -->
<dependency>
<groupId>com.alibaba.otter</groupId>
<artifactId>canal.client</artifactId>
<version>1.1.5</version>
</dependency>
<!-- canal服务端端 -->
<dependency>
<groupId>com.alibaba.otter</groupId>
<artifactId>canal.server</artifactId>
<version>1.1.5</version>
</dependency>
<!-- 连接数据库 -->
<dependency>
<groupId>commons-dbutils</groupId>
<artifactId>commons-dbutils</artifactId>
<version>1.7</version>
</dependency>
2.2 配置 dockercompose.xml
canal:
image: canal/canal-server:v1.1.5
container_name: canal
ports:
#canal默认端口号
- 11111:11111
environment:
#设置从库id,不能与主库id相冲突
- canal.instance.mysql.slaveId=222
- canal.instance.master.address=192.168.1.207:23307
# mysql 复制账号
- canal.instance.dbUsername=root
- canal.instance.dbPassword=dev@xhkj
- canal.mq.topic=testTopic
- canal.auto.scan=false
- canal.destinations=test
#数据库名.表名
- canal.instance.filter.regex=xh_bimops.*
volumes:
- /data/docker/jar/canal/conf/:/usr/local/dockercompose/canal/conf/
- /data/docker/jar/canal/logs/:/usr/local/dockercompose/canal/logs/
restart: always
logging:
driver: "json-file"
options:
max-size: "20m"
代码实现 Canal 监控数据库表,动态同步数据
代码里都有相应的注释,我就不多写其他了,复制到项目里自己稍微调整下就行;
且不用自己针对某张表一个一个的写增删改方法,很便捷;
package com.gh.resource.config;
import com.alibaba.otter.canal.client.CanalConnector;
import com.alibaba.otter.canal.client.CanalConnectors;
import com.alibaba.otter.canal.protocol.CanalEntry.*;
import com.alibaba.otter.canal.protocol.Message;
import com.google.protobuf.InvalidProtocolBufferException;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.dbutils.DbUtils;
import org.apache.commons.dbutils.QueryRunner;
import org.apache.commons.lang3.StringUtils;
import org.springframework.stereotype.Component;
import javax.annotation.Resource;
import javax.sql.DataSource;
import java.net.InetSocketAddress;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.List;
import java.util.Queue;
import java.util.concurrent.ConcurrentLinkedQueue;
/**
* @Description:
* @Author: zhanleai
* @Date:2023/7/17 14:33
*/
@Component
@Slf4j
public class CanalClient {
// sql 队列,提升执行性能,减少 DB 压力
private Queue<String> SQL_QUEUE = new ConcurrentLinkedQueue<>();
@Resource
private DataSource dataSource;
/**
* canal入库方法
*/
public void run() {
log.info("canal 监听启动成功!");
CanalConnector connector = CanalConnectors.newSingleConnector(
new InetSocketAddress("192.168.1.207",11111), // canal 服务配置的 address 和 port(详见:dockercompose.yml)
"test", // canal 服务配置的 destinations(详见:dockercompose.yml)
"root", // canal 服务配置的 dbUsername(详见:dockercompose.yml)
"dev@xhkj"); // canal 服务配置的 dbPassword(详见:dockercompose.yml)
try {
connector.connect();
connector.subscribe("canal_bimops.canal_test"); // canal 服务配置的 topic(详见:dockercompose.yml)
connector.rollback();
try {
while (true) {
//尝试从 message 消息中拉取数据1000条记录,也可根据自己业务需求放大一些
Message message = connector.getWithoutAck(1000);
long batchId = message.getId();
int size = message.getEntries().size();
if (batchId == -1 || size == 0) {
Thread.sleep(1000);
} else {
dataHandle(message.getEntries());
}
connector.ack(batchId);
//当队列里面堆积的 sql 大于一定数值的时候就执行队列中的 sql ,可根据需求设置大一些
if (SQL_QUEUE.size() >= 1) {
executeQueueSql();
}
}
} catch (InterruptedException e) {
e.printStackTrace();
} catch (InvalidProtocolBufferException e) {
e.printStackTrace();
}
} finally {
connector.disconnect();
}
}
/**
* 执行队列里面的sql语句
*/
public void executeQueueSql() {
int size = SQL_QUEUE.size();
for (int i = 0; i < size; i++) {
String sql = SQL_QUEUE.poll();
log.info("----> " + sql);
this.execute(sql.toString());
}
}
/**
* 数据处理(增、删、改)
*
* @param entrys
*/
private void dataHandle(List<Entry> entrys) throws InvalidProtocolBufferException {
for (Entry entry : entrys) {
if (EntryType.ROWDATA == entry.getEntryType()) {
RowChange rowChange = RowChange.parseFrom(entry.getStoreValue());
EventType eventType = rowChange.getEventType();
if (eventType == EventType.DELETE) {
saveDeleteSql(entry);
} else if (eventType == EventType.UPDATE) {
saveUpdateSql(entry);
} else if (eventType == EventType.INSERT) {
saveInsertSql(entry);
}
}
}
}
/**
* 保存更新语句
*
* @param entry
*/
private void saveUpdateSql(Entry entry) {
try {
RowChange rowChange = RowChange.parseFrom(entry.getStoreValue());
List<RowData> rowDatasList = rowChange.getRowDatasList();
for (RowData rowData : rowDatasList) {
List<Column> newColumnList = rowData.getAfterColumnsList();
StringBuffer sql = new StringBuffer("update " + entry.getHeader().getTableName() + " set ");
for (int i = 0; i < newColumnList.size(); i++) {
if(StringUtils.isNotBlank(newColumnList.get(i).getValue())){
sql.append(" " + newColumnList.get(i).getName()
+ " = '" + newColumnList.get(i).getValue() + "'");
if (i != newColumnList.size() - 1) {
sql.append(",");
}
}
}
sql.append(" where ");
List<Column> oldColumnList = rowData.getBeforeColumnsList();
for (Column column : oldColumnList) {
if (column.getIsKey()) {
//暂时只支持单一主键
sql.append(column.getName() + "=" + column.getValue());
break;
}
}
SQL_QUEUE.add(sql.toString().replace(", where"," where"));
}
} catch (InvalidProtocolBufferException e) {
e.printStackTrace();
}
}
/**
* 保存删除语句
*
* @param entry
*/
private void saveDeleteSql(Entry entry) {
try {
RowChange rowChange = RowChange.parseFrom(entry.getStoreValue());
List<RowData> rowDatasList = rowChange.getRowDatasList();
for (RowData rowData : rowDatasList) {
List<Column> columnList = rowData.getBeforeColumnsList();
StringBuffer sql = new StringBuffer("delete from " + entry.getHeader().getTableName() + " where ");
for (Column column : columnList) {
if (column.getIsKey()) {
//暂时只支持单一主键
sql.append(column.getName() + "=" + column.getValue());
break;
}
}
SQL_QUEUE.add(sql.toString());
}
} catch (InvalidProtocolBufferException e) {
e.printStackTrace();
}
}
/**
* 保存插入语句
*
* @param entry
*/
private void saveInsertSql(Entry entry) {
try {
RowChange rowChange = RowChange.parseFrom(entry.getStoreValue());
List<RowData> rowDatasList = rowChange.getRowDatasList();
for (RowData rowData : rowDatasList) {
List<Column> columnList = rowData.getAfterColumnsList();
StringBuffer sql = new StringBuffer("insert into " + entry.getHeader().getTableName() + " (");
for (int i = 0; i < columnList.size(); i++) {
if(StringUtils.isNotBlank(columnList.get(i).getValue())){
sql.append(" " + columnList.get(i).getName());
if (i != columnList.size() - 1) {
sql.append(",");
}
}
}
sql.append(") VALUES (");
for (int i = 0; i < columnList.size(); i++) {
if(StringUtils.isNotBlank(columnList.get(i).getValue())){
sql.append("'" + columnList.get(i).getValue() + "'");
if (i != columnList.size() - 1) {
sql.append(",");
}
}
}
sql.append(")");
SQL_QUEUE.add(sql.toString().replace(",)"," )"));
}
} catch (InvalidProtocolBufferException e) {
e.printStackTrace();
}
}
/**
* 具体操作数据库 sql 语句的方法(执行)
* @param sql
*/
public void execute(String sql) {
Connection con = null;
try {
if(null == sql) return;
con = dataSource.getConnection();
QueryRunner qr = new QueryRunner();
qr.execute(con, sql);
} catch (SQLException e) {
e.printStackTrace();
} finally {
DbUtils.closeQuietly(con);
}
}
}