1. 实验环境
CPU:4
内存:8G
ip:192.168.0.187
开启iptables防火墙
关闭selinux
java >=1.5
使用yum方式安装的java,提前配置好JAVA_HOME环境变量
vim /etc/profile.d/java.sh #!/bin/bash export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk # 路径根据实际情况而定 export PATH=$PATH:$JAVA_HOME/bin source /etc/profile.d/java.sh
2. MySQL信息
mysql账号 :root
MySQL密码:liykpntuu9?C
操作
vim /etc/my.cnf [mysqld] log-bin=mysql-bin # 开启 binlog binlog-format=ROW # 选择 ROW 模式 server_id=1 # 配置 MySQL replaction 需要定义,不要和 canal 的 slaveId 重复 service mysqld restart
登陆数据库后操作
CREATE USER canal IDENTIFIED BY 'canal!%123AD'; GRANT SELECT, REPLICATION SLAVE, REPLICATION CLIENT ON *.* TO 'canal'@'%'; FLUSH PRIVILEGES;
# 查询MySQL数据库中某一个用户授予的权限 mysql> show grants for canal; +---------------------------------------------------------------------------+ | Grants for [email protected]% | +---------------------------------------------------------------------------+ | GRANT SELECT, REPLICATION SLAVE, REPLICATION CLIENT ON *.* TO 'canal'@'%' | +---------------------------------------------------------------------------+
3. canal操作
# 下载 wget https://github.com/alibaba/canal/releases/download/canal-1.1.4/canal.deployer-1.1.4.tar.gz mkdir -p /usr/local/canal tar -zxv -f canal.deployer-1.1.4.tar.gz -C /usr/local/canal # 修改连接数据库的配置文件 cd /usr/local/canal vim conf/example/instance.properties ## mysql serverId canal.instance.mysql.slaveId = 123 #position info,需要改成自己的数据库信息 canal.instance.master.address = 127.0.0.1:3306 canal.instance.master.journal.name = canal.instance.master.position = canal.instance.master.timestamp = #canal.instance.standby.address = #canal.instance.standby.journal.name = #canal.instance.standby.position = #canal.instance.standby.timestamp = #username/password,需要改成自己的数据库信息 canal.instance.dbUsername = canal canal.instance.dbPassword = canal!%123AD canal.instance.defaultDatabaseName = canal.instance.connectionCharset = UTF-8 #table regex canal.instance.filter.regex = .\*\\\\..\* # 启动 bash bin/startup.sh # 查看 server 日志 tail -n 30 logs/canal/canal.log 2019-09-20 09:48:46.987 [main] INFO com.alibaba.otter.canal.deployer.CanalLauncher - ## set default uncaught exception handler 2019-09-20 09:48:47.019 [main] INFO com.alibaba.otter.canal.deployer.CanalLauncher - ## load canal configurations 2019-09-20 09:48:47.028 [main] INFO com.alibaba.otter.canal.deployer.CanalStarter - ## start the canal server. 2019-09-20 09:48:47.059 [main] INFO com.alibaba.otter.canal.deployer.CanalController - ## start the canal server[192.168.0.187(192.168.0.187):11111] 2019-09-20 09:48:48.228 [main] INFO com.alibaba.otter.canal.deployer.CanalStarter - ## the canal server is running now ...... # 查看 instance 的日志 2019-09-20 09:48:47.395 [main] INFO c.a.o.c.i.spring.support.PropertyPlaceholderConfigurer - Loading properties file from class path resource [canal.properties] 2019-09-20 09:48:47.399 [main] INFO c.a.o.c.i.spring.support.PropertyPlaceholderConfigurer - Loading properties file from class path resource [example/instance.properties] 2019-09-20 09:48:47.580 [main] WARN o.s.beans.GenericTypeAwarePropertyDescriptor - Invalid JavaBean property 'connectionCharset' being accessed! Ambiguous write methods found next to actually used [public void com.alibaba.otter.canal.parse.inbound.mysql.AbstractMysqlEventParser.setConnectionCharset(java.lang.String)]: [public void com.alibaba.otter.canal.parse.inbound.mysql.AbstractMysqlEventParser.setConnectionCharset(java.nio.charset.Charset)] 2019-09-20 09:48:47.626 [main] INFO c.a.o.c.i.spring.support.PropertyPlaceholderConfigurer - Loading properties file from class path resource [canal.properties] 2019-09-20 09:48:47.626 [main] INFO c.a.o.c.i.spring.support.PropertyPlaceholderConfigurer - Loading properties file from class path resource [example/instance.properties] 2019-09-20 09:48:48.140 [main] INFO c.a.otter.canal.instance.spring.CanalInstanceWithSpring - start CannalInstance for 1-example 2019-09-20 09:48:48.147 [main] WARN c.a.o.canal.parse.inbound.mysql.dbsync.LogEventConvert - --> init table filter : ^.*\..*$ 2019-09-20 09:48:48.147 [main] WARN c.a.o.canal.parse.inbound.mysql.dbsync.LogEventConvert - --> init table black filter : 2019-09-20 09:48:48.165 [main] INFO c.a.otter.canal.instance.core.AbstractCanalInstance - start successful.... 2019-09-20 09:48:48.288 [destination = example , address = /127.0.0.1:3306 , EventParser] WARN c.a.o.c.p.inbound.mysql.rds.RdsBinlogEventParserProxy - ---> begin to find start position, it will be long time for reset or first position 2019-09-20 09:48:48.288 [destination = example , address = /127.0.0.1:3306 , EventParser] WARN c.a.o.c.p.inbound.mysql.rds.RdsBinlogEventParserProxy - prepare to find start position just show master status 2019-09-20 09:48:49.288 [destination = example , address = /127.0.0.1:3306 , EventParser] WARN c.a.o.c.p.inbound.mysql.rds.RdsBinlogEventParserProxy - ---> find start position successfully, EntryPosition[included=false,journalName=mysql-bin.000004,position=4,serverId=1,gtid=<null>,timestamp=1568943354000] cost : 989ms , the next step is binlog dump # 关闭 bash bin/stop.sh # 端口使用情况 ss -tlnp State Recv-Q Send-Q Local Address:Port Peer Address:Port LISTEN 0 50 *:11110 *:* users:(("java",pid=2078,fd=109)) LISTEN 0 50 *:11111 *:* users:(("java",pid=2078,fd=105)) LISTEN 0 3 *:11112 *:* users:(("java",pid=2078,fd=87)) # 端口号说明 # admin端口:11110 # tcp端口:11111 # metric端口:11112 # 端口号所在配置文件 # canal/conf/canal.properties # canal.port = 11111 # canal.metrics.pull.port = 11112 # canal/conf/canal_local.properties # canal.admin.port = 11110组建HA集群
地址:https://github.com/alibaba/canal/wiki/AdminGuide# 重新获得一份解压缩的目录,比如canal_2 # 修改原有的canal数据 vim canal.properties canal.zkServers=localhost:2181,localhost:2182,localhost:2183, canal.instance.global.spring.xml = classpath:spring/default-instance.xml # 然后把这个文件复制到canal_2对应目录下,在canal_2对应目录下再次修改 vim canal.properties canal.port = 11114 canal.metrics.pull.port = 11115 vim canal_local.properties canal.admin.port = 11113 # 把canal example目录下的instance.properties复制到canal_2对应目录下,并在canal_2对应目录下再次修改 vim instance.properties canal.instance.mysql.slaveId = 124 ##另外一台机器改成1235,保证slaveId不重复即可 # 注意: 两台机器上的instance目录的名字需要保证完全一致,HA模式是依赖于instance name进行管理,同时必须都选择default-instance.xml配置 # 然后分别启动 # 总结一下: # 1. 俩配置文件中使用的三个端口号需要不一样 # 2. 使用default-instance.xml文件 # 3. MySQL的slaveId不一样 # 4. 两台机器上的instance目录的名字需要保证完全一致 # 启动后,可以查看logs/example/example.log,只会看到一台机器上出现了启动成功的日志。 # 或者通过查看一下zookeeper中的节点信息,可以知道当前工作的节点为localhost:11111 # get /otter/canal/destinations/example/running# canal-admin 使用WEB UI界面查看管理canal # canal-admin的限定依赖: # MySQL,用于存储配置和节点等相关数据 # canal版本,要求>=1.1.4 (需要依赖canal-server提供面向admin的动态运维管理接口) wget https://github.com/alibaba/canal/releases/download/canal-1.1.4/canal.admin-1.1.4.tar.gz tar -zxv -f canal-1.1.4/canal.admin-1.1.4.tar.gz -C /usr/local/src/canal_admin vim conf/application.yml server: port: 8089 # 端口号,防火墙放行该端口号 spring: jackson: date-format: yyyy-MM-dd HH:mm:ss time-zone: GMT+8 spring.datasource: address: 127.0.0.1:3306 # 数据库地址和端口 database: canal_manager # 数据库名 username: canal_admin # 数据库账号 ,注意跟一开始创建的canal账号区分开,需要修改一下 password: ABC123,[email protected]#11 # 数据库密码 driver-class-name: com.mysql.jdbc.Driver url: jdbc:mysql://${spring.datasource.address}/${spring.datasource.database}?useUnicode=true&characterEncoding=UTF-8&useSSL=false hikari: maximum-pool-size: 30 minimum-idle: 1 canal: adminUser: admin # 平台账号 adminPasswd: admin # 平台密码 # 注意,数据库名,账号和密码需要提前创建好 # 若修改默认的数据库名,则示例sql文件中也需要修改 # 这里只修改默认的数据库账号和密码,其余保持默认 # 初始化元数据库 # 初始化SQL脚本里会默认创建canal_manager的数据库,建议使用root等有超级权限的账号进行初始化 b. canal_manager.sql默认会在conf目录下 mysql -hlocalhost -uroot -p mysql> source /usr/local/canal_admin/conf/canal_manager.sql; # 启动 bash bin/startup.sh # 查看 admin 日志 tail -n 30 logs/admin.log 2019-09-20 14:50:54.595 [main] INFO org.apache.coyote.http11.Http11NioProtocol - Starting ProtocolHandler ["http-nio-8089"] 2019-09-20 14:50:54.624 [main] INFO org.apache.tomcat.util.net.NioSelectorPool - Using a shared selector for servlet write/read 2019-09-20 14:50:54.812 [main] INFO o.s.boot.web.embedded.tomcat.TomcatWebServer - Tomcat started on port(s): 8089 (http) with context path '' 2019-09-20 14:50:54.818 [main] INFO com.alibaba.otter.canal.admin.CanalAdminApplication - Started CanalAdminApplication in 11.057 seconds (JVM running for 12.731) # 浏览器访问,防火墙放行8089端口号 # 地址:http://192.168.0.187:8089/ 访问,默认密码:admin/123456 # 登陆后可以修改密码 # 用户登陆密码是使用mysql5方式加密的,直接保存到数据库中的,canal_manager.sql文件中最后一条就是登陆账号密码 # 使用 # 创建一个集群,添加已有的canal # 可以通过修改端口号的方式在一台主机上启动多个canal组建集群 # 地址:https://github.com/alibaba/canal/wiki/AdminGuide # 停止 bash bin/stop.sh
4. zookeeper
zk 安装略
# 使用WEB UI查看监控集群-zk ui安装 cd /usr/local git clone https://github.com/DeemOpen/zkui.git yum install -y maven # 更换使用阿里云maven源 vim /etc/maven/settings.xml <mirrors> <mirror> <id>nexus-aliyun</id> <mirrorOf>central</mirrorOf> <name>Nexus aliyun</name> <url>http://maven.aliyun.com/nexus/content/groups/public</url> </mirror> </mirrors> cd zkui/ mvn clean install # 修改配置文件默认值 vim config.cfg serverPort=9090 #指定端口 zkServer=localhost:2181,localhost:2182,localhost:2183 # 不使用127.0.0.1 sessionTimeout=300 # userSet中是登陆web界面的用户名和密码 #管理员 #admin:manager #用户 #appconfig:appconfig # 启动程序至后台 vim start.sh #!/bin/bash nohup java -jar target/zkui-2.0-SNAPSHOT-jar-with-dependencies.jar & # 浏览器访问 # 防火墙放行9090端口,后期改用nginx代理 http://192.168.0.187:9090/
5. Kafka
kafka 安装略
kafka一键启动
# 有一个问题,需要在kafka-server-start.sh文件中配置端口,有如下三种办法 # 第一种:复制并修改kafka目录,比如kafka-1,kafka-2,kafka-3,然后再每个目录下修改kafka-server-start.sh文件 # 第二种:在启动脚本start.sh中添加指定端口 # 第三种:多复制几个kafka-server-start.sh文件,然后进行修改,最后在start.sh中修改一下 # 以下三种方法任选其一即可 # 第一种方式办法,相应行修改成如下形式,注意端口号不同 # 使用的是不同目录下的不同kafka-server-start.sh文件 # start.sh文件也需要做相应的修改 # kafka-1/bin/kafka-server-start.sh if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then # export KAFKA_HEAP_OPTS="-Xmx1G -Xms1G" export KAFKA_HEAP_OPTS="-server -Xms2G -Xmx2G -XX:PermSize=128m -XX:+UseG1GC -XX:MaxGCPauseMillis=200 -XX:ParallelGCThreads=8 -XX:ConcGCThreads=5 -XX:InitiatingHeapOccupancyPercent=70" export JMX_PORT="9997" fi # kafka-2/bin/kafka-server-start.sh if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then # export KAFKA_HEAP_OPTS="-Xmx1G -Xms1G" export KAFKA_HEAP_OPTS="-server -Xms2G -Xmx2G -XX:PermSize=128m -XX:+UseG1GC -XX:MaxGCPauseMillis=200 -XX:ParallelGCThreads=8 -XX:ConcGCThreads=5 -XX:InitiatingHeapOccupancyPercent=70" export JMX_PORT="9998" fi # kafka-3/bin/kafka-server-start.sh if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then # export KAFKA_HEAP_OPTS="-Xmx1G -Xms1G" export KAFKA_HEAP_OPTS="-server -Xms2G -Xmx2G -XX:PermSize=128m -XX:+UseG1GC -XX:MaxGCPauseMillis=200 -XX:ParallelGCThreads=8 -XX:ConcGCThreads=5 -XX:InitiatingHeapOccupancyPercent=70" export JMX_PORT="9999" fi # start.sh #!/bin/bash bash kafka-1/bin/kafka-server-start.sh -daemon config/server-1.properties bash kafka-2/bin/kafka-server-start.sh -daemon config/server-2.properties bash kafka-3/bin/kafka-server-start.sh -daemon config/server-3.properties # 第二种方法 # 使用的用一个目录下的同一个文件,只是在每个命令前指定端口号 vim start.sh #!/bin/bash JMX_PORT=9997 bash bin/kafka-server-start.sh -daemon config/server-1.properties JMX_PORT=9998 bash bin/kafka-server-start.sh -daemon config/server-2.properties JMX_PORT=9999 bash bin/kafka-server-start.sh -daemon config/server-3.properties # 第三种方法 # 使用的是同一个目录下的不同kafka-server-start文件 # start.sh文件也需要做相应的修改 cp kafka-server-start.sh kafka-server-start-1.sh cp kafka-server-start.sh kafka-server-start-2.sh cp kafka-server-start.sh kafka-server-start-3.sh vim kafka-server-start-1.sh if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then # export KAFKA_HEAP_OPTS="-Xmx1G -Xms1G" export KAFKA_HEAP_OPTS="-server -Xms2G -Xmx2G -XX:PermSize=128m -XX:+UseG1GC -XX:MaxGCPauseMillis=200 -XX:ParallelGCThreads=8 -XX:ConcGCThreads=5 -XX:InitiatingHeapOccupancyPercent=70" export JMX_PORT="9997" fi vim kafka-server-start-2.sh if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then # export KAFKA_HEAP_OPTS="-Xmx1G -Xms1G" export KAFKA_HEAP_OPTS="-server -Xms2G -Xmx2G -XX:PermSize=128m -XX:+UseG1GC -XX:MaxGCPauseMillis=200 -XX:ParallelGCThreads=8 -XX:ConcGCThreads=5 -XX:InitiatingHeapOccupancyPercent=70" export JMX_PORT="9998" fi vim kafka-server-start-3.sh if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then # export KAFKA_HEAP_OPTS="-Xmx1G -Xms1G" export KAFKA_HEAP_OPTS="-server -Xms2G -Xmx2G -XX:PermSize=128m -XX:+UseG1GC -XX:MaxGCPauseMillis=200 -XX:ParallelGCThreads=8 -XX:ConcGCThreads=5 -XX:InitiatingHeapOccupancyPercent=70" export JMX_PORT="9999" fi vim start.sh #!/bin/bash bash bin/kafka-server-start-1.sh -daemon config/server-1.properties bash bin/kafka-server-start-2.sh -daemon config/server-2.properties bash bin/kafka-server-start-3.sh -daemon config/server-3.properties vim stop.sh #!/bin/bash bash bin/kafka-server-stop.sh
监控kafka集群
# 监控kafka集群,开启监控趋势图使用
cd /usr/local/src wget https://github.com/smartloli/kafka-eagle-bin/archive/v1.3.9.tar.gz # 多次解压缩后得到kafka-eagle-web-1.3.9目录,然后把该目录复制到/usr/local目录下 cd /usr/local/kafka-eagle-web-1.3.9/conf vim system-config.properties kafka.eagle.zk.cluster.alias=cluster1 cluster1.zk.list=localhost:2181,localhost:2182,localhost:2183 kafka.eagle.metrics.charts=true # 开启监控趋势图,需要开启Kafka系统的JMX端口,设置该端口在$KAFKA_HOME/bin/kafka-server-start.sh脚本中 kafka.eagle.sql.fix.error=true # 开启错误日志信息 # 其余保持默认,数据库使用sqlite,注意路径需要事先创建好或修改成当前目录 # 数据库也可以更换成MySQL kafka.eagle.url=jdbc:sqlite:/usr/local/kafka-eagle-web-1.3.9/db/ke.db # 注意 # kafka.eagle.zk.cluster.alias的值需要跟下面的这个cluster1.zk.list小数点第一个保持一致,比如都是cluster1,否则获取不到数据 # 添加环境变量 vim /etc/profile.d/kafka_eagle.sh #!/bin/bash export KE_HOME=/usr/local/kafka-eagle-web-1.3.9 export PATH=$PATH:$KE_HOME/bin source /etc/profile.d/kafka_eagle.sh # 命令相关 bash bin/ke.sh start|stop|status|stats|restart # 启动 bash bin/ke.sh start ******************************************************************* * Kafka Eagle system monitor port successful... ******************************************************************* [2019-09-20 12:10:32] INFO: Status Code[0] [2019-09-20 12:10:32] INFO: [Job done!] Welcome to __ __ ___ ____ __ __ ___ ______ ___ ______ __ ______ / //_/ / | / __/ / //_/ / | / ____/ / | / ____/ / / / ____/ / ,< / /| | / /_ / ,< / /| | / __/ / /| | / / __ / / / __/ / /| | / ___ | / __/ / /| | / ___ | / /___ / ___ |/ /_/ / / /___ / /___ /_/ |_| /_/ |_|/_/ /_/ |_| /_/ |_| /_____/ /_/ |_|\____/ /_____//_____/ Version 1.3.9 ******************************************************************* * Kafka Eagle Service has started success. * Welcome, Now you can visit 'http://127.0.0.1:8048/ke' * Account:admin ,Password:123456 ******************************************************************* * <Usage> ke.sh [start|status|stop|restart|stats] </Usage> * <Usage> https://www.kafka-eagle.org/ </Usage> ******************************************************************* # 浏览器访问,防火墙放行该端口,后期改用Nginx代理 地址:192.168.0.187:8048/ke 账号:admin,密码:123456 # 在kafka eagle平台使用KSQL查询数据 # 左侧导航 # 1.先在Topic-List中查看到Topic Name和Partition Indexes的值 # ID Topic Name Partition Indexes Partition Numbers Created Modify Operate # 1 canal_manager [0] 1 2019-09-20 17:27:15 2019-09-20 17:27:15 # 2. 在Topic-KSQL中输入查询语句 # select * from "canal_manager" where "partition" in (0) limit 1 # 下方会显示查询所用时间和查询后的结果 # 在如下目录的文件中可以查看具体的异常信息,一般都会提示具体的错误 # tail -n 30 kafka-eagle-web-1.3.9/logs/ke_console.out
配置文件详细说明
# 配置文件详细说明 ###################################### # 配置多个Kafka集群所对应的Zookeeper ###################################### kafka.eagle.zk.cluster.alias=cluster1,cluster2 cluster1.zk.list=dn1:2181,dn2:2181,dn3:2181 cluster2.zk.list=tdn1:2181,tdn2:2181,tdn3:2181 ###################################### # 设置Zookeeper线程数 ###################################### kafka.zk.limit.size=25 ###################################### # 设置Kafka Eagle浏览器访问端口 ###################################### kafka.eagle.webui.port=8048 ###################################### # 如果你的offsets存储在Kafka中,这里就配置 # 属性值为kafka,如果是在Zookeeper中,可以 # 注释该属性。一般情况下,Offsets的也和你消 # 费者API有关系,如果你使用的Kafka版本为0.10.x # 以后的版本,但是,你的消费API使用的是0.8.2.x # 时的API,此时消费者依然是在Zookeeper中 ###################################### cluster1.kafka.eagle.offset.storage=kafka ###################################### # 如果你的集群一个是新版本(0.10.x以上), # 一个是老版本(0.8或0.9),可以这样设置, # 如果都是新版本,那么可以将值都设置成kafka ###################################### cluster2.kafka.eagle.offset.storage=zookeeper ###################################### # 是否启动监控图表,默认是不启动的 ###################################### kafka.eagle.metrics.charts=false ###################################### # 在使用Kafka SQL查询主题时,如果遇到错误, # 可以尝试开启这个属性,默认情况下,不开启 ###################################### kafka.eagle.sql.fix.error=false ###################################### # 邮件服务器设置,用来告警 ###################################### kafka.eagle.mail.enable=false kafka.eagle.mail.sa= kafka.eagle.mail.username= kafka.eagle.mail.password= kafka.eagle.mail.server.host= kafka.eagle.mail.server.port= ###################################### # 设置告警用户,多个用户以英文逗号分隔 ###################################### [email protected] ###################################### # 超级管理员删除主题的Token ###################################### kafka.eagle.topic.token=keadmin ###################################### # 如果启动Kafka SASL协议,开启该属性 ###################################### kafka.eagle.sasl.enable=false kafka.eagle.sasl.protocol=SASL_PLAINTEXT kafka.eagle.sasl.mechanism=PLAIN ###################################### # Kafka Eagle默认存储在Sqlite中,如果要使用 # MySQL可以替换驱动、用户名、密码、连接地址 ###################################### #kafka.eagle.driver=com.mysql.jdbc.Driver #kafka.eagle.url=jdbc:mysql://127.0.0.1:3306/ke?useUnicode=true&characterEncoding=UTF-8&zeroDateTimeBehavior=convertToNull #kafka.eagle.username=root #kafka.eagle.password=123456 kafka.eagle.driver=org.sqlite.JDBC kafka.eagle.url=jdbc:sqlite:/Users/dengjie/workspace/kafka-egale/db/ke.db kafka.eagle.username=root kafka.eagle.password=root
6. 投递数据到Kafka
# 先进行canal配置,改动配置文件canal.properties # serverMode改为kafka vim conf/canal.properties canal.serverMode = kafka canal.mq.servers = localhost:9092,localhost:9093,localhost:9094 vim conf/example/instance.propties # mq config canal.mq.topic=canal_manager # 填写数据库库名,表示这个数据库的所有表的操作都在这个topic下 # dynamic topic route by schema or table regex # canal.mq.dynamicTopic=.*\\..* canal.mq.partition=0 # hash partition config # canal.mq.partitionsNum=10 # canal.mq.partitionHash=.*\\..* # 以上具体规则详看官方文档 # kafka开启消息队列的自动创建topic模式,相关配置在kafka的server.properties echo 'auto.create.topics.enable=true' >> config/server-1.properties echo 'auto.create.topics.enable=true' >> config/server-2.properties echo 'auto.create.topics.enable=true' >> config/server-3.properties # 相关改动完成后重启canal和kafka # 使用canal_admin平台查看canal的状态 # Server管理,操作,日志 # 使用zu ui平台查看kafka的topic情况 # 左侧导航Topic-List查看生成的topic,这里显示的是canal_manager,上面设置的那个数据库库名 # 点开Topic Name可以查看具体的数据个数 # 使用命令行kafka-console-consumer.sh --topic canal_manager --bootstrap-server localhost:9092 --from-beginning查看canal传递给kafka的数据 # 插入一条数据 {"data":[{"id":"13","username":"13","password":"6BB4837EB74329105EE4568DDA7DC67ED2CA2AD9","name":"Canal Manager","roles":"admin","introduction":null,"avatar":null,"creation_date":"2019-07-14 00:05:28"}],"database":"canal_manager","es":1568972329000,"id":10,"isDdl":false,"mysqlType":{"id":"bigint(20)","username":"varchar(31)","password":"varchar(128)","name":"varchar(31)","roles":"varchar(31)","introduction":"varchar(255)","avatar":"varchar(255)","creation_date":"timestamp"},"old":null,"pkNames":["id"],"sql":"","sqlType":{"id":-5,"username":12,"password":12,"name":12,"roles":12,"introduction":12,"avatar":12,"creation_date":93},"table":"canal_user","ts":1568972329456,"type":"INSERT"} # 删除一条数据 {"data":[{"id":"13","username":"13","password":"6BB4837EB74329105EE4568DDA7DC67ED2CA2AD9","name":"Canal Manager","roles":"admin","introduction":null,"avatar":null,"creation_date":"2019-07-14 00:05:28"}],"database":"canal_manager","es":1568972368000,"id":11,"isDdl":false,"mysqlType":{"id":"bigint(20)","username":"varchar(31)","password":"varchar(128)","name":"varchar(31)","roles":"varchar(31)","introduction":"varchar(255)","avatar":"varchar(255)","creation_date":"timestamp"},"old":null,"pkNames":["id"],"sql":"","sqlType":{"id":-5,"username":12,"password":12,"name":12,"roles":12,"introduction":12,"avatar":12,"creation_date":93},"table":"canal_user","ts":1568972369005,"type":"DELETE"} # 更新一条数据 {"data":[{"id":"13","username":"13","password":"6BB4837EB74329105EE4568DDA7DC67ED2CA2AD9","name":"Canal Manager","roles":"1111","introduction":null,"avatar":null,"creation_date":"2019-09-20 23:52:09"}],"database":"canal_manager","es":1568994729000,"id":4,"isDdl":false,"mysqlType":{"id":"bigint(20)","username":"varchar(31)","password":"varchar(128)","name":"varchar(31)","roles":"varchar(31)","introduction":"varchar(255)","avatar":"varchar(255)","creation_date":"timestamp"},"old":[{"roles":"admin","creation_date":"2019-07-14 00:05:28"}],"pkNames":["id"],"sql":"","sqlType":{"id":-5,"username":12,"password":12,"name":12,"roles":12,"introduction":12,"avatar":12,"creation_date":93},"table":"canal_user","ts":1568994729999,"type":"UPDATE"} # 或者在kafka eagle平台使用KSQL查询数据,上述文档已经说过了