mysql同步数据到flume,然后flume同步数据到kafka

自定义插件包下载地址 https://download.csdn.net/download/a1213353721/10935724

下面是flume的配置信息,我是在CDH集群环境上配置的,经测试可用

tier1.sources = s-1
tier1.channels = c-1
tier1.sinks = k-1 k-2
#这个是配置failover的关键,需要有一个sink group
tier1.sinkgroups = g-1
tier1.sinkgroups.g-1.sinks = k-1 k-2
#处理的类型是failover
tier1.sinkgroups.g-1.processor.type = failover
#优先级,数字越大优先级越高,每个sink的优先级必须不相同
tier1.sinkgroups.g-1.processor.priority.k-1 = 5
tier1.sinkgroups.g-1.processor.priority.k-2 = 10
#设置为10秒,当然可以根据你的实际状况更改成更快或者很慢
tier1.sinkgroups.g-1.processor.maxpenalty = 10000
########## 数据通道的定义
#数据量不大,直接放内存。其实还可以放在JDBC,kafka或者磁盘文件等
tier1.channels.c-1.type = memory
#通道队列的最大长度
tier1.channels.c-1.capacity = 100000
#putList和takeList队列的最大长度,sink从capacity中抓取batchsize个event,放到这个队列。所以此参数最好比capacity小,比sink的batchsize大。
#官方定义:The maximum number of events the channel will take from a source or give to a sink per transaction.
tier1.channels.c-1.transactionCapacity = 1000
tier1.channels.c-1.byteCapacityBufferPercentage = 20
###默认值的默认值等于JVM可用的最大内存的80%,可以不配置
#tier1.channels.c-1.byteCapacity = 800000

#########sql source#################
#source s-1用到的通道,和sink的通道要保持一致,否则就GG了
tier1.sources.s-1.channels=c-1
######### For each one of the sources, the type is defined
tier1.sources.s-1.type = org.keedio.flume.source.SQLSource
tier1.sources.s-1.hibernate.connection.url = jdbc:mysql://10.20.1.18:3306/rdms
######### Hibernate Database connection properties
tier1.sources.s-1.hibernate.connection.user = root
tier1.sources.s-1.hibernate.connection.password = bigdata
tier1.sources.s-1.hibernate.connection.autocommit = true
tier1.sources.s-1.hibernate.dialect = org.hibernate.dialect.MySQL5Dialect
tier1.sources.s-1.hibernate.connection.driver_class = com.mysql.jdbc.Driver
tier1.sources.s-1.run.query.delay=10000
tier1.sources.s-1.status.file.path = /usr/lib/flume-ng/
#tier1.sources.s-1.status.file.name = SQLSource.status
tier1.sources.s-1.status.file.name.prefix = SQLSource
tier1.sources.s-1.status.file.name.suffix = .status
######## Custom query
tier1.sources.s-1.start.from = 0
#tier1.sources.s-1.table = imsi_test
tier1.sources.s-1.table.prefix = imsi
#使用完整的query时,此配置中的所有带前后缀的配置都不生效,‘id’ 字段设置别名后,order.by的属性需要与id字段名或别名一至
#tier1.sources.s-1.custom.query = select id as ‘tid’,area as ‘fee’ from imsi_test where id > @ @ @ order by id asc
#使用前后缀查询语句时order.by 属性需要注释
#tier1.sources.s-1.order.by = tid
tier1.sources.s-1.custom.query.prefix = select * from
tier1.sources.s-1.custom.query.suffix = where id > @ @ @ order by id asc
tier1.sources.s-1.batch.size = 100
tier1.sources.s-1.max.rows = 100
tier1.sources.s-1.hibernate.connection.provider_class = org.hibernate.connection.C3P0ConnectionProvider
tier1.sources.s-1.hibernate.c3p0.min_size=5
tier1.sources.s-1.hibernate.c3p0.max_size=20

######### sinks 1
#sink k-1用到的通道,和source的通道要保持一致,否则取不到数据
tier1.sinks.k-1.channel = c-1
tier1.sinks.k-1.type = org.apache.flume.sink.kafka.KafkaSink
tier1.sinks.k-1.topic = topic_kafkajdbc
tier1.sinks.k-1.kafka.bootstrap.servers = admin:9092,dn02:9092,nn01:9092
tier1.sinks.k-1.kafka.producer.acks = 1

#每批次处理的event数量
tier1.sinks.k-1.batchSize = 100

######### sinks 2
#sink k-2用到的通道,和source的通道要保持一致,否则取不到数据
tier1.sinks.k-2.channel = c-1
tier1.sinks.k-2.type = org.apache.flume.sink.kafka.KafkaSink
tier1.sinks.k-2.kafka.topic = topic_kafkajdbc
tier1.sinks.k-2.kafka.bootstrap.servers = admin:9092,dn02:9092,nn01:9092
tier1.sinks.k-2.kafka.producer.acks = 1
tier1.sinks.k-2.batchSize = 100

  • 1
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值