centos7 cdh5.13 部署 confluent

版本信息
cdh5.13.0
confluent-4.0.3

集群规划:

机器角色端口
node1,node2,node3kafka9092
node1,node2,node3zookeeper2181
node1,node2,node3schema registry8081
node1,node2,node3kafka rest proxy8082
node1,node2,node3confluent connector8083
node1,node2,node3ksql8088

node1中: 解压tar包 并配置环境变量

0. JDK1.8(略)
1. 解压tar包
tar -zxvf confluent-4.0.3-2.11.tar.gz -C /data/soft/
 
2. 配置环境变量
vim /etc/profile
----------------
export CONFLUENT_HOME=/data/soft/confluent-4.0.3

export PATH=$PATH:$CONFLUENT_HOME/bin
----------------
 
3. 刷新环境变量
source /etc/profile

node1中: 配置zookeeper和kafka

cd $CONFLUENT_HOME/etc/kafka
# 备份confluent的配置文件
mv zookeeper.properties zookeeper.properties.bak
mv server.properties server.properties.bak
 
# 将已有的kafka和zookeeper集群的配置文件复制过来
cp /opt/cloudera/parcels/KAFKA-3.1.1-1.3.1.1.p0.2/etc/kafka/conf.dist/server.properties .
cp /opt/cloudera/parcels/CDH-5.13.0-1.cdh5.13.0.p0.29/etc/zookeeper/conf.dist/zoo.cfg zookeeper.properties
 
# 修改kafka配置,新增下列confluent参数
vim $CONFLUENT_HOME/etc/kafka/server.properties
--------------------------------------------
metric.reporters=io.confluent.metrics.reporter.ConfluentMetricsReporter
confluent.metrics.reporter.bootstrap.servers=node1:9092,node2:9092,node3:9092
confluent.metrics.reporter.topic.replicas=2
confluent.support.metrics.enable=true
confluent.support.customer.id=anonymous
delete.topic.enable=true
 
# 如果使用confluent自带的zk和kafka的话,只需要参考kafka和zookeeper集群的方式修改刚才备份的那两个配置文件即可

node1中: 配置schema-registry

vim $CONFLUENT_HOME/etc/kafka-rest/kafka-rest.properties
---------------------------------
#修改下面的参数,其他参数保持不变
kafkastore.bootstrap.servers=PLAINTEXT://node1:9092,node2:9092,node3:9092

node1中:配置ksql

vim $CONFLUENT_HOME/etc/ksqldb/ksql-server.properties
---------------------------------
#修改下面的参数,其他参数保持不变
bootstrap.servers=node1:9092,node2:9092,node3:9092
ksql.schema.registry.url=http://node1:8081,http://node2:8081,http://node3:8081

node1中:配置kafka rest

vim $CONFLUENT_HOME/etc/kafka-rest/kafka-rest.properties
---------------------------------
#修改下面的参数,其他参数保持不变
schema.registry.url=http://node1:8081,http://node2:8081,http://node3:8081
zookeeper.connect=node1:2181,node2:2181,node3:2181
bootstrap.servers=PLAINTEXT://node1:9092,node2:9092,node3:9092
listeners=http://0.0.0.0:8082
consumer.interceptor.classes=io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor
producer.interceptor.classes=io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor

node1中:配置confluent connectors

vim $CONFLUENT_HOME/etc/schema-registry/connect-avro-distributed.properties
---------------------------------
#修改下面的参数,其他参数保持不变
bootstrap.servers=node1:9092,node2:9092,node3:9092
key.converter.schema.registry.url==http://node1:8081,http://node2:8081,http://node3:8081
value.converter.schema.registry.url=http://node1:8081,http://node2:8081,http://node3:8081
config.storage.replication.factor=3
offset.storage.replication.factor=3
status.storage.replication.factor=3
rest.port=8083
rest.advertised.port=8083
plugin.path=$CONFLUENT_HOME/share/java

node1中:将confluent 分发到node2,node3节点,并在node2,node3也配置环境变量

1. node1:分发confluent 文件夹
scp -r /data/soft/confluent-4.0.3 root@node2:/data/soft/
scp -r /data/soft/confluent-4.0.3 root@node3:/data/soft/
 
2.node2 & node3:配置环境变量
vim /etc/profile
----------------
export CONFLUENT_HOME=/data/soft/confluent-4.0.3

export PATH=$PATH:$CONFLUENT_HOME/bin
----------------
 
3.node2 & node3:刷新环境变量
source /etc/profile

node2 & node3:修改配置文件中的ID

#node2: 修改kafka和 control center中的id
vim $CONFLUENT_HOME/etc/kafka/server.properties
---------------------------------
#修改下面的参数,其他参数保持不变
broker.id=2
 
vim $CONFLUENT_HOME/etc/confluent-control-center/control-center-dev.properties
---------------------------------
#修改下面的参数,其他参数保持不变
confluent.controlcenter.id=2    # 每台confluent机器的ID不能有重复
confluent.controlcenter.ksql.ksqlDB.url=http://node2:8088  # 每台confluent机器配置各自的IP
 
-------------------------------------------------------
 
# node3: 修改kafka和 control center中的id
vim $CONFLUENT_HOME/etc/kafka/server.properties
---------------------------------
#修改下面的参数,其他参数保持不变
broker.id=3
 
vim $CONFLUENT_HOME/etc/confluent-control-center/control-center-dev.properties
---------------------------------
#修改下面的参数,其他参数保持不变
confluent.controlcenter.id=3    # 每台confluent机器的ID不能有重复
confluent.controlcenter.ksql.ksqlDB.url=http://node3:8088  # 每台confluent机器配置各自的IP
vi /data/soft/confluent-4.0.3/etc/schema-registry/schema-registry.properties
kafkastore.bootstrap.servers=PLAINTEXT://node1:9092,node2:9092,node3:9092
sh /data/soft/confluent-4.0.3/bin/schema-registry-start -daemon /data/soft/confluent-4.0.3/etc/schema-registry/schema-registry.properties

sh /data/soft/confluent-4.0.3/bin/kafka-rest-start -daemon /data/soft/confluent-4.0.3/etc/kafka-rest/kafka-rest.properties

sh /data/soft/confluent-4.0.3/bin/connect-distributed -daemon /data/soft/confluent-4.0.3/etc/schema-registry/connect-avro-distributed.properties

启动顺序

1. Starting zookeeper
2. Starting kafka
3. Starting schema-registry
4. Starting kafka-rest
5. Starting connect
6. Starting ksql-server

输入 jps 若成功则如下

在这里插入图片描述
停止顺序

1. Stopping ksql-server
2. Stopping connect
3. Stopping kafka-rest
4. Stopping schema-registry
5. Stopping kafka
6. Stopping zookeeper

整合debezium

[root@node1 cdcPack]# ls
debezium-connector-mongodb-1.6.3.Final-plugin.tar.gz
debezium-connector-mysql-1.6.3.Final-plugin.tar.gz
debezium-connector-oracle-1.6.3.Final-plugin.tar.gz
debezium-connector-postgres-1.6.3.Final-plugin.tar.gz
debezium-connector-sqlserver-1.6.3.Final-plugin.tar.gz

传输到其它节点

scp -r cdcPack/ root@node2:~
scp -r cdcPack/ root@node3:~

解压到confluent目录

 tar -zxvf ./cdcPack/debezium-connector-mongodb-1.6.3.Final-plugin.tar.gz -C /data/soft/confluent-4.0.3/share/java/

 tar -zxvf ./cdcPack/debezium-connector-mysql-1.6.3.Final-plugin.tar.gz -C /data/soft/confluent-4.0.3/share/java/
 
 tar -zxvf ./cdcPack/debezium-connector-oracle-1.6.3.Final-plugin.tar.gz -C /data/soft/confluent-4.0.3/share/java/

 tar -zxvf ./cdcPack/debezium-connector-postgres-1.6.3.Final-plugin.tar.gz -C /data/soft/confluent-4.0.3/share/java/

 tar -zxvf ./cdcPack/debezium-connector-sqlserver-1.6.3.Final-plugin.tar.gz -C /data/soft/confluent-4.0.3/share/java/

修改connect配置脚本
https://www.cnblogs.com/eric-ln/p/12392696.html
connect-avro-distributed.properties 配置文件

同步至node2 node3

scp /data/soft/confluent-4.0.3/etc/schema-registry/connect-avro-distributed.properties root@node2:/data/soft/confluent-4.0.3/etc/schema-registry/connect-avro-distributed.properties
scp /data/soft/confluent-4.0.3/etc/schema-registry/connect-avro-distributed.properties root@node3:/data/soft/confluent-4.0.3/etc/schema-registry/connect-avro-distributed.properties

修改connect-distributed脚本
https://blog.csdn.net/sweatott/article/details/79101425
修改bin/connect-distributed脚本, 添加debezium-connector-mysql到for library …所在行,主要为了debezium能被加载, 如下

for library in "kafka" "confluent-common" "kafka-serde-tools" "monitoring-interceptors" "debezium-connector-mysql"; 
do
 ...

同步至node2/node3

scp /data/soft/confluent-4.0.3/bin/connect-distributed root@node2:/data/soft/confluent-4.0.3/bin/connect-distributed
scp /data/soft/confluent-4.0.3/bin/connect-distributed root@node3:/data/soft/confluent-4.0.3/bin/connect-distributed

kafka可用性验证

#topic列表   注意 2181/kafka  路径
kafka-topics --list --zookeeper node2:2181/kafka
#创建Topic
kafka-topics --create --zookeeper node1:2181/kafka --replication-factor 1 --partitions 1 --topic test

#控制台进入topic 成为生产者
kafka-console-producer --broker-list node1:9092 -topic test

#控制台进入topic 成为消费者
 kafka-console-consumer --bootstrap-server node2:9092 -topic test --from-beginning
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值