一、环境准备
kafka zookeeper
二、用户权限准备
sqlplus sys/top_secret@//localhost:1521/ORCLCDB as sysdba
CREATE TABLESPACE logminer_tbs DATAFILE '/opt/oracle/oradata/ORCLCDB/logminer_tbs.dbf'
SIZE 25M REUSE AUTOEXTEND ON MAXSIZE UNLIMITED;
exit;
sqlplus sys/top_secret@//localhost:1521/ORCLPDB1 as sysdba
CREATE TABLESPACE logminer_tbs DATAFILE '/opt/oracle/oradata/ORCLCDB/ORCLPDB1/logminer_tbs.dbf'
SIZE 25M REUSE AUTOEXTEND ON MAXSIZE UNLIMITED;
exit;
sqlplus sys/top_secret@//localhost:1521/ORCLCDB as sysdba
CREATE USER c##dbzuser IDENTIFIED BY dbz
DEFAULT TABLESPACE logminer_tbs
QUOTA UNLIMITED ON logminer_tbs
CONTAINER=ALL;
GRANT CREATE SESSION TO c##dbzuser CONTAINER=ALL;
GRANT SET CONTAINER TO c##dbzuser CONTAINER=ALL;
GRANT SELECT ON V_$DATABASE to c##dbzuser CONTAINER=ALL;
GRANT FLASHBACK ANY TABLE TO c##dbzuser CONTAINER=ALL;
GRANT SELECT ANY TABLE TO c##dbzuser CONTAINER=ALL;
GRANT SELECT_CATALOG_ROLE TO c##dbzuser CONTAINER=ALL;
GRANT EXECUTE_CATALOG_ROLE TO c##dbzuser CONTAINER=ALL;
GRANT SELECT ANY TRANSACTION TO c##dbzuser CONTAINER=ALL;
GRANT LOGMINING TO c##dbzuser CONTAINER=ALL;
GRANT CREATE TABLE TO c##dbzuser CONTAINER=ALL;
GRANT LOCK ANY TABLE TO c##dbzuser CONTAINER=ALL;
GRANT CREATE SEQUENCE TO c##dbzuser CONTAINER=ALL;
GRANT EXECUTE ON DBMS_LOGMNR TO c##dbzuser CONTAINER=ALL;
GRANT EXECUTE ON DBMS_LOGMNR_D TO c##dbzuser CONTAINER=ALL;
GRANT SELECT ON V_$LOG TO c##dbzuser CONTAINER=ALL;
GRANT SELECT ON V_$LOG_HISTORY TO c##dbzuser CONTAINER=ALL;
GRANT SELECT ON V_$LOGMNR_LOGS TO c##dbzuser CONTAINER=ALL;
GRANT SELECT ON V_$LOGMNR_CONTENTS TO c##dbzuser CONTAINER=ALL;
GRANT SELECT ON V_$LOGMNR_PARAMETERS TO c##dbzuser CONTAINER=ALL;
GRANT SELECT ON V_$LOGFILE TO c##dbzuser CONTAINER=ALL;
GRANT SELECT ON V_$ARCHIVED_LOG TO c##dbzuser CONTAINER=ALL;
GRANT SELECT ON V_$ARCHIVE_DEST_STATUS TO c##dbzuser CONTAINER=ALL;
GRANT SELECT ON V_$TRANSACTION TO c##dbzuser CONTAINER=ALL;
GRANT SELECT ON V_$MYSTAT TO c##dbzuser CONTAINER=ALL;
GRANT SELECT ON V_$STATNAME TO c##dbzuser CONTAINER=ALL;
exit;
三、实际操作
1. 进入kafka/config目录,编辑connect-distributed.properties文件
配置 plugin.path=
plugin.path=debezium-connector-oracle所在目录
debezium-connector-oracle 文件由 debezium-connector-oracle-1.6.4.Final-plugin.tar.gz 解压完成
2. 启动 connect-distributed服务 ./bin/connect-distributed.sh config/connect-distributed.properties &
3.配置oracle connect 监视Oracle数据库
//连接器配置
curl -H "Content-Type: application/json" -X POST -d '{
"name": "testXindai001", //随便起
"config": {
"connector.class" : "io.debezium.connector.oracle.OracleConnector",
"tasks.max" : "1",
"database.server.name" : "xxx", //随便起 最后生成的topic格式为 xindai.BCM.CUST_CORP_INFO(当前名称 . 表名)
"database.hostname" : "xx.xx.xx.xx", //要抽取的数据库的ip
"database.port" : "15210", //要抽取的数据库的端口
"database.user" : "C##BDP", //要抽取的数据库的用户
"database.password" : "password", //要抽取的数据库的密码
"database.dbname" : "bcmprod", //sid 通过select instance_name from v$instance;获取
"database.pdb.name" : "pdb_bcmprod",
"table.include.list": "BCM.CUST_CORP_INFO,BCM.CR_FAREP_MANAGE_CHILD,BCM.CR_FAREP_MANAGE_MAIN", //要抽取的表
"database.history.kafka.bootstrap.servers" : "xx.xx.xx.xx:9092", //生成的topic放到哪个kakfa中
"database.history.kafka.topic": "test",
"database.history.skip.unparseable.ddl":"true", //跳过ddl语句
"log.mining.strategy":"online_catalog"
}
}' http://xx.xx.xx.xx:8083/connectors/
connect连接器相关命令
//查看有哪些连接器
curl -i -X GET -H "Accept:application/json" -H "Content-Type:application/json" xx.xx.xx.xx:8083/connectors/
//查看当前连接器的状态
curl -i -X GET -H "Accept:application/json" -H "Content-Type:application/json" xx.xx.xx.xx:8083/connectors/"testXindai001"/status
//删除当前连接器
curl -i -X DELETE xx.xx.xx.xx:8083/connectors/"test001"/
//查看所有topic
./bin/kafka-topics.sh --bootstrap-server xx.xx.xx.xx:9092 --list
//查看topic的消费情况
./bin/kafka-console-consumer.sh --bootstrap-server xx.xx.xx.xx:9092 --topic xxx.test
//删除某一个topic
./bin/kafka-topics.sh --bootstrap-server xx.xx.xx.xx:9092 --delete --topic xxx.test
//这里是从kafka消费数据到下游数据的java程序
nohup java -jar kafka-input-oracle-test-1.0.jar>/opt/cloudera/parcels/CDH-6.3.21.cdh6.3.2.p0.1605554/lib/kafka/logs/test.log 2>&1 &