官网关于canal解释
译意为水道/管道/沟渠,主要用途是基于 MySQL 数据库增量日志解析,提供增量数据订阅和消费
基于日志增量订阅和消费的业务包括
- 数据库镜像
- 数据库实时备份
- 索引构建和实时维护(拆分异构索引、倒排索引等)
- 业务 cache 刷新
- 带业务逻辑的增量数据处理
当前的 canal 支持源端 MySQL 版本包括 5.1.x , 5.5.x , 5.6.x , 5.7.x , 8.0.x
1.docker 安装配置canal
1.1先进行添加一个mysql用户.用来进行数据的测试
mysql -uroot -proot
SHOW VARIABLES LIKE '%log_bin%';
进入本地数据库,查看数据库log_bin是否开启.开启为on 未开启为off
如何没有开启的话,需要进行配置
第一步:先进入mysql的容器中
vi /etc/mysql/mysql.conf.d/mysqld.cnf
# 如果没有vi命令 则安装
apt-get update
apt-get -y install vim
# 在[mysqld]下面添加
# [mysqld]
log-bin=mysql-bin
binlog-format=ROW
server_id=1
//创建mysql用户
create user canal@'%' IDENTIFIED by 'canal';
//指定权限
GRANT SELECT, REPLICATION SLAVE, REPLICATION CLIENT,SUPER ON *.* TO 'canal'@'%';
//开放全部权限--个人建议使用这个命令,避免后续出现一些别的问题
GRANT ALL ON *.* TO 'canal'@'%
FLUSH PRIVILEGES;
//进入该数据库
mysql -u canal -pcanal
最好重启mysql容器
1.3 下载创建canal容器
docker pull canal/canal-server:latest
docker run --name canal-server \
-e canal.instance.master.address=192.168.83.128:3306 \ -- 地址修改为自己所使用的数据库地址
-e canal.instance.dbUsername=canal \ -- 创建的mysql用户名称
-e canal.instance.dbPassword=canal \ -- 创建的mysql用户密码
-p 11111:11111 \ ---指定的端口号
-d canal/canal-server
按照我的配置步骤,配到这里我就可以正常使用了,
1.4 进行canal的测试
该测试代码可以直接从官网中进行获取
package com.alibaba.otter.canal.sample;
import java.net.InetSocketAddress;
import java.util.List;
import com.alibaba.otter.canal.client.CanalConnectors;
import com.alibaba.otter.canal.client.CanalConnector;
import com.alibaba.otter.canal.common.utils.AddressUtils;
import com.alibaba.otter.canal.protocol.Message;
import com.alibaba.otter.canal.protocol.CanalEntry.Column;
import com.alibaba.otter.canal.protocol.CanalEntry.Entry;
import com.alibaba.otter.canal.protocol.CanalEntry.EntryType;
import com.alibaba.otter.canal.protocol.CanalEntry.EventType;
import com.alibaba.otter.canal.protocol.CanalEntry.RowChange;
import com.alibaba.otter.canal.protocol.CanalEntry.RowData;
public class SimpleCanalClientExample {
public static void main(String args[]) {
// 创建链接
CanalConnector connector = CanalConnectors.newSingleConnector(new InetSocketAddress("您的ip地址 ",
11111), "example", "数据库账号", "数据库密码");
int batchSize = 1000;
int emptyCount = 0;
try {
connector.connect();
connector.subscribe(".*\\..*");
connector.rollback();
int totalEmptyCount = 120;
while (emptyCount < totalEmptyCount) {
Message message = connector.getWithoutAck(batchSize); // 获取指定数量的数据
long batchId = message.getId();
int size = message.getEntries().size();
if (batchId == -1 || size == 0) {
emptyCount++;
System.out.println("empty count : " + emptyCount);
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
}
} else {
emptyCount = 0;
// System.out.printf("message[batchId=%s,size=%s] \n", batchId, size);
printEntry(message.getEntries());
}
connector.ack(batchId); // 提交确认
// connector.rollback(batchId); // 处理失败, 回滚数据
}
System.out.println("empty too many times, exit");
} finally {
connector.disconnect();
}
}
private static void printEntry(List<Entry> entrys) {
for (Entry entry : entrys) {
if (entry.getEntryType() == EntryType.TRANSACTIONBEGIN || entry.getEntryType() == EntryType.TRANSACTIONEND) {
continue;
}
RowChange rowChage = null;
try {
rowChage = RowChange.parseFrom(entry.getStoreValue());
} catch (Exception e) {
throw new RuntimeException("ERROR ## parser of eromanga-event has an error , data:" + entry.toString(),
e);
}
EventType eventType = rowChage.getEventType();
System.out.println(String.format("================> binlog[%s:%s] , name[%s,%s] , eventType : %s",
entry.getHeader().getLogfileName(), entry.getHeader().getLogfileOffset(),
entry.getHeader().getSchemaName(), entry.getHeader().getTableName(),
eventType));
for (RowData rowData : rowChage.getRowDatasList()) {
if (eventType == EventType.DELETE) {
printColumn(rowData.getBeforeColumnsList());
} else if (eventType == EventType.INSERT) {
printColumn(rowData.getAfterColumnsList());
} else {
System.out.println("-------> before");
printColumn(rowData.getBeforeColumnsList());
System.out.println("-------> after");
printColumn(rowData.getAfterColumnsList());
}
}
}
}
private static void printColumn(List<Column> columns) {
for (Column column : columns) {
System.out.println(column.getName() + " : " + column.getValue() + " update=" + column.getUpdated());
}
}
}
在连接中的第二个参数,"example" 这个参数可以从canal的配置文件中进行查看
步骤:
1.今天canal的容器,
2.进入 admin/canal-server/conf/
3.使用cat canal.properties 查看
#################################################
######### destinations #############
#################################################
canal.destinations = example
# conf root dir
canal.conf.dir = ../conf
# auto scan instance dir add/remove and start/stop instance
配置完毕后,进行运行
然后将表单中插入一条数据,会出现
这样就代表配置完成
2.redis和数据库的同步
定义一个redisUtil类
package com.example.task_demo.canal.utils;
import redis.clients.jedis.HostAndPort;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.JedisCluster;
import java.util.HashSet;
import java.util.Set;
public class RedisUtil {
private static JedisCluster jedisCluster = null;
public static synchronized JedisCluster getJedis() {
if (jedisCluster == null) {
Set<HostAndPort> node =new HashSet<>();
// redis集群的配置
node.add(new HostAndPort("192.168.31.81",6379));
node.add(new HostAndPort("192.168.31.81",6380));
node.add(new HostAndPort("192.168.31.81",6381));
jedisCluster= new JedisCluster(node);
}
return jedisCluster;
}
public static boolean existKey(String key) {
return getJedis().exists(key);
}
public static void delKey(String key) {
getJedis().del(key);
}
public static String stringGet(String key) {
return getJedis().get(key);
}
public static String stringSet(String key, String value) {
return getJedis().set(key, value);
}
public static void hashSet(String key, String field, String value) {
getJedis().hset(key, field, value);
}
}
在canal的测试类中进行补充
package com.example.task_demo.canal;
import java.net.InetSocketAddress;
import java.util.List;
import com.alibaba.fastjson.JSONObject;
import com.alibaba.otter.canal.client.CanalConnectors;
import com.alibaba.otter.canal.client.CanalConnector;
import com.alibaba.otter.canal.protocol.Message;
import com.alibaba.otter.canal.protocol.CanalEntry.Column;
import com.alibaba.otter.canal.protocol.CanalEntry.Entry;
import com.alibaba.otter.canal.protocol.CanalEntry.EntryType;
import com.alibaba.otter.canal.protocol.CanalEntry.EventType;
import com.alibaba.otter.canal.protocol.CanalEntry.RowChange;
import com.alibaba.otter.canal.protocol.CanalEntry.RowData;
import com.example.task_demo.canal.utils.RedisUtil;
public class SimpleCanalClientExample {
public static void main(String[] args) {
CanalConnector connector = CanalConnectors.newSingleConnector
(new InetSocketAddress("192.168.31.81", 11111), "example", "canalq", "canal");
int batchSize = 1000;
int emptyCount = 0;
try {
connector.connect();
connector.subscribe(".*\\..*");
connector.rollback();
int totalEmptyCount = 120;
while (emptyCount < totalEmptyCount) {
Message message = connector.getWithoutAck(batchSize); // 获取指定数量的数据
long batchId = message.getId();
int size = message.getEntries().size();
if (batchId == -1 || size == 0) {
emptyCount++;
System.out.println("empty count : " + emptyCount);
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
}
} else {
emptyCount = 0;
// System.out.printf("message[batchId=%s,size=%s] \n", batchId, size);
printEntry(message.getEntries());
}
connector.ack(batchId); // 提交确认
// connector.rollback(batchId); // 处理失败, 回滚数据
}
System.out.println("empty too many times, exit");
} finally {
connector.disconnect();
}
}
private static void printEntry(List<Entry> entrys) {
for (Entry entry : entrys) {
if (entry.getEntryType() == EntryType.TRANSACTIONBEGIN || entry.getEntryType() == EntryType.TRANSACTIONEND) {
continue;
}
RowChange rowChage = null;
try {
rowChage = RowChange.parseFrom(entry.getStoreValue());
} catch (Exception e) {
throw new RuntimeException("ERROR ## parser of eromanga-event has an error , data:" + entry.toString(),
e);
}
EventType eventType = rowChage.getEventType();
System.out.println(String.format("================> binlog[%s:%s] , name[%s,%s] , eventType : %s",
entry.getHeader().getLogfileName(), entry.getHeader().getLogfileOffset(),
entry.getHeader().getSchemaName(), entry.getHeader().getTableName(),
eventType));
// 进行redis的数据同步
for (RowData rowData : rowChage.getRowDatasList()) {
if (eventType == EventType.DELETE) {
redisDelete(rowData.getBeforeColumnsList());
} else if (eventType == EventType.INSERT) {
redisInsert(rowData.getAfterColumnsList());
} else {
System.out.println("-------> before");
printColumn(rowData.getBeforeColumnsList());
System.out.println("-------> after");
printColumn(rowData.getAfterColumnsList());
}
}
}
}
// 进行redis中数据的增加
private static void redisInsert(List<Column> afterColumnsList) {
JSONObject jsonObject = new JSONObject();
for (Column column : afterColumnsList) {
jsonObject.put(column.getName(), column.getValue());
}
if (afterColumnsList.size() > 0) {
RedisUtil.stringSet("user" + afterColumnsList.get(0).getValue(), jsonObject.toJSONString());
}
}
// 进行redis中数据的删除
private static void redisDelete(List<Column> beforeColumnsList) {
JSONObject jsonObject = new JSONObject();
for (Column column : beforeColumnsList) {
jsonObject.put(column.getName(), column.getValue());
}
if (beforeColumnsList.size() > 0) {
RedisUtil.delKey("user" + beforeColumnsList.get(0).getValue());
}
}
private static void printColumn(List<Column> columns) {
for (Column column : columns) {
System.out.println(column.getName() + " : " + column.getValue() + " update=" + column.getUpdated());
}
}
}
这样就可以进行redis和数据库的信息同步