CDC之erdemcer

安装与下载

前提是有可用的oracle与kafka

##下载路径

erdemcer/kafka-connect-oracle: Kafka Source Connector For Oracle (github.com)
在这里插入图片描述

ORACLE准备工作

##查看数据库模式

如果已经为当前输出 则不需要进行开启归档模式步骤

select log_mode from v$database;

LOG_MODE
------------------------------------
ARCHIVELOG

SQL> select archiver from v$instance;

ARCHIVER
---------------------
STARTED

#为oracle开启归档模式

本次配置使用的为本地的windows环境下的oracle

##登录orcale

sqlplus / as sysdba

##暂时停止oracle服务

shutdown immediate
//输出结果
Database closed.
Database dismounted.
ORACLE instance shut down.

##开启mount服务

startup mount
//输出结果
ORACLE instance started.
//
Total System Global Area 1603411968 bytes
Fixed Size            2213776 bytes
Variable Size          989857904 bytes
Database Buffers      603979776 bytes
Redo Buffers            7360512 bytes
Database mounted.

##开启数据归档

alter database archivelog;
//输出结果
Database altered.

##查看归档结果

archive log list
//输出结果
Database log mode           Archive Mode
Automatic archival           Enabled
Archive destination           /u01/app/oracle/archive_log
Oldest online log sequence     244
Next log sequence to archive   246
Current log sequence           246

##开启自动归档

alter system archive log start;

##开启强制归档

ALTER DATABASE FORCE LOGGING;

##归档结束 打开数据库

alter database open;
//
Database altered.

##确认数据库为归档模式

SQL> select log_mode from v$database;

LOG_MODE
------------------------------------
ARCHIVELOG

SQL> select archiver from v$instance;

ARCHIVER
---------------------
STARTED
————————————————

#补充日志
##开启补充日志

SQL> alter database add supplemental log data ;
 
Database altered.

##开启全体字段补充日志

SQL> alter database add supplemental log data (all) columns; 
 
Database altered.

##确认是否开启

select SUPPLEMENTAL_LOG_DATA_MIN min,
       SUPPLEMENTAL_LOG_DATA_PK  pk,
       SUPPLEMENTAL_LOG_DATA_UI  ui,
       SUPPLEMENTAL_LOG_DATA_FK  fk,
       SUPPLEMENTAL_LOG_DATA_ALL "all"
  6  from v$database;

MIN			 PK	   UI	     FK        all
------------------------ --------- --------- --------- ---------
YES			 NO	   NO	     NO        YES
————————————————

##创建erdemcer相关用户并授权

#这条语句中的c为用户名 yyh为密码 testbi为表空间。根据个人所需选择。表空间也可以手动创建
CREATE USER c identified by yyh DEFAULT TABLESPACE testbi QUOTA UNLIMITED ON testbi;
GRANT CREATE SESSION TO c;
GRANT SET CONTAINER TO c;
GRANT SELECT ON V_$DATABASE to c;
GRANT FLASHBACK ANY TABLE TO c;
GRANT SELECT ANY TABLE TO c;
GRANT SELECT_CATALOG_ROLE TO c;
GRANT EXECUTE_CATALOG_ROLE TO c;
GRANT SELECT ANY TRANSACTION TO c;
GRANT LOGMINING TO c;


GRANT CREATE TABLE TO c;
GRANT LOCK ANY TABLE TO c;
GRANT ALTER ANY TABLE TO c;
GRANT CREATE SEQUENCE TO c;

GRANT EXECUTE ON DBMS_LOGMNR TO c;
GRANT EXECUTE ON DBMS_LOGMNR_D TO c;

GRANT SELECT ON V_$LOG TO c;
GRANT SELECT ON V_$LOG_HISTORY TO c;
GRANT SELECT ON V_$LOGMNR_LOGS TO c;
GRANT SELECT ON V_$LOGMNR_CONTENTS TO c;
GRANT SELECT ON V_$LOGMNR_PARAMETERS TO c;
GRANT SELECT ON V_$LOGFILE TO c;
GRANT SELECT ON V_$ARCHIVED_LOG TO c;
GRANT SELECT ON V_$ARCHIVE_DEST_STATUS TO c;

create role logmnr_role;
grant create session to logmnr_role;
grant  execute_catalog_role,select any transaction ,select any dictionary to logmnr_role;
create user kminer identified by kminerpass;
grant  logmnr_role to c;

##创建前置topic

kafka-topics.sh --create --zookeeper hadoop52:2181/kafka_root --topic connect-configs --replication-factor 3 --partitions 1 --config cleanup.policy=compact
kafka-topics.sh --create --zookeeper hadoop53:2181/kafka_root --topic connect-offsets --replication-factor 3 --partitions 50 --config cleanup.policy=compact
kafka-topics.sh --create --zookeeper localhost:2181/kafka_root --topic connect-status --replication-factor 3 --partitions 10 --config cleanup.policy=compact

编译erdemcer与kafka连接

创建erdemcer目录

mkdir /opt/module/kafkaConnectors
将下载的zip包上传至此目录

mvn clean package

将下载的zip包编译为 kafka-connect-oracle-1.0.71.jar
通过idea等工具亦可
在这里插入图片描述

拷贝jar与配置文件

cp config/OracleSourceConnector.properties $KAFKA_HOME/config/
cp lib/ojdbc7.jar $KAFKA_HOME/libs/
– 将编译后的jar包也拷贝至kafka的libs下
cp kafka-connect-oracle-1.0.71.jar $KAFKA_HOME/libs/

编写配置文件

cd $KAFKA_HOME/config/
vim OracleSourceConnector.properties

# 连接(connectors)名称 
name=oracle-logminer-connector2
#连接主类
connector.class=com.ecer.kafka.connect.oracle.OracleSourceConnector
#别名
db.name.alias=orcl
#最大任务数
tasks.max=1
#接受数据的kakfa主题名  手动创建 或者开启消费自动创建
topic=oracle-kafka2
#监控的dbname
db.name=ORCL
#oracle主机号
db.hostname=192.168.0.87
#端口
db.port=1521
#上面创建的用户
db.user=c
#上面创建的用户密码
db.user.password=yyh
db.fetch.size=1
# `必填 必大写` 监控的表 多个,隔开
table.whitelist=HR.*
#黑名单 可不填
table.blacklist=
#dml
parse.dml.data=true
#手动控制偏移量
reset.offset=false
start.scn=
#多租户
multitenant=false

更改kafka connect-standalone.properties

vim connect-standalone.properties
单节点

#本机
bootstrap.servers=hadoop51:9092


key.converter.schemas.enable=false
value.converter.schemas.enable=false
#偏移量文件 
offset.storage.file.filename=/home/kafka/connect.offsets
#erdemcer目录
plugin.path=/opt/module/kafkaConnectors/kafka-connect-oracle-master

更改connect-distributed.properties

vim connect-distributed.properties
多节点

bootstrap.servers=hadoop51:9092,hadoop52:9092,hadoop53:9092


group.id=connect-cluster2

key.converter=org.apache.kafka.connect.json.JsonConverter
value.converter=org.apache.kafka.connect.json.JsonConverter



offset.storage.replication.factor=3
offset.storage.partitions=3

config.storage.topic=connect-configs
config.storage.replication.factor=3


status.storage.topic=connect-status
status.storage.replication.factor=1



offset.flush.interval.ms=10000

rest.advertised.host.name=hadoop51



plugin.path=/opt/module/kafkaConnectors/kafka-connect-oracle-master

启动

启动kafka

#建议写为脚本
nohup kafka-server-start.sh /usr/local/kafka/kafka_2.12-2.2.0/config/server.properties >server.log &

根据配置文件启动kafka-connector 单节点

#建议写为脚本
nohup bin/connect-standalone.properties config/connect-distributed.properties config/OracleSourceConnector.properties >kafkaConnector.log &
#查看是否成功
curl -s localhost:8083/connectors/

启动多节点 kafka-connector

nohup bin/connect-distributed.sh config/connect-distributed.properties config/OracleSourceConnector.properties >kafkaConnector.log &
#查看是否成功
curl -s localhost:8083/connectors/
#如果不存在则手动创建
curl -X POST http://localhost:8083/connectors -H "Content-Type: application/json" -d '{
"name": "oracle-logminer-connector2",
"config": {
"connector.class":"com.ecer.kafka.connect.oracle.OracleSourceConnector",
"db.name.alias":"orcl",
"tasks.max":"1",
"topic":"oracle-kafka2",
"db.name":"ORCL",
"db.hostname":"192.168.0.87",
"db.port":"1521",
"db.user":"c",
"db.user.password":"yyh",
"db.fetch.size":"1",
"table.whitelist":"HR.*",
"table.blacklist":"",
"parse.dml.data":"true",
"reset.offset":"false",
"start.scn":"",
"multitenant":"false"
}
}'

验证

开启消费者
对oracle ORCL下的HR.TABLE进行操作

##成功截图
[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-hXs4Kqct-1625656899676)(/tdl/tfl/pictures/202107/tapd_23627501_1625656476_79.png)]

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 2
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值