flume agents sources channels sinks .flu配置方式组合样例

#flume-ng agent --name agent0 --conf-file ./flume.flu -Dflume.root.logger=INFO,console
#一个文件file.txt由source传输完毕后标记为file.text.COMPLTE
#为了解决文件未完全输出就被source传输的问题在文件前加.可以声明为隐藏文件.file

#agent spooldir file logger
agent0.sources=source0
agent0.channels=channel00 channel01
agent0.sink=sink00 sink01

agent0.sources.source0.type=spooldir
agent0.sources.source0.spooldir=/spooldir
agent0.sources.source0.channels=channel00 channel01
agent0.sources.source0.fileHeader=true
agent0.sources.source0.selector.type=replicating

agent0.channels.channel00.type=file
agent0.channels.channel00.chenkpointDir=/checkpoint/00
agent0.channels.channel00.dataDirs=/datas/00
agent0.channels.channel01.type=memroy
agent0.channels.channel01.chenkpointDir=/checkpoint/01
agent0.channels.channel01.dataDirs=/datas/01

agent0.sinks.sink00.type=logger
agent0.sinks.sink00.channel=channel0
agent0.sinks.sink01.type=logger
agent0.sinks.sink01.channel=channel0

# agent exec memory logger
agent1.sources=source1
agent1.channels=channel1
agent1.sinks=sink1

agent1.sources.source1.type=exec
agent1.sources.source1.command=tail -F /spooldir/file.log
agent1.sources.source1.channels=channel1

agent1.channels.channel1.type=memory
agent1.channels.channel1.capacity=10000
agent1.channels.channel1.transactionCapacity=10000
agent1.channels.channel1.byteCapcityBufferPercentage=20
agent1.channels.channel1.byteCapacity=800000

agent1.sinks.sink1.type=logger
agent1.sinks.sink1.channel=channel1

#agent2 syslog file
agent2.sources=source2
agent2.channels=channel2
agent2.sinks=sink2

agent2.sources.source2.type=multiport_syslogtcp #syslogtcp,syslogudp
agent2.sources.source2.host=DNS
agent2.sources.source2.port=PORT PORT PORT
agent2.sources.source2.channels=channel2

agent2.channels.channel2.type=SPILLABLEMEMORY
agent2.channels.c1.memoryCapacity=10000
agent2.channels.c1.overflowCapacity=1000000
agent2.channels.c1.byteCapacity=800000
agent2.channels.c1.checkpointDir=/checkpoint
agent2.channels.c1.dataDirs=/datas

agent2.sinks.sink2.type=file_roll
agent2.sinks.sink2.channel=channel2
agent2.sinks.sink2.directory=/sinkdir
agent2.sinks.sink2.sink.rollInterval=0
agent2.sinks.sink2.sink.pathManager.prefix=file_roll

#agent syslog memory hdfs
agent3.sources=source3
agent3.channels=channel3
agent3.sinks=sink3

agent3.sources.source3.type= syslogtcp
agent3.sources.source3.prot=PORT
agent3.sources.source3.host=DNS
agent3.sources.source3.channels=channel3

agent3.channels.channel3.type=SPILLABLEMEMORY
agent3.channels.channel3.memoryCapacity=10000
agent3.channels.channel3.overflowCapcity=10000000
agent3.channels.channel3.byteCapacity=8000000
agent3.channels.channel3.checkpointDir=/checkpoint
agent3.channels.channel3.dataDirs=/datas

agent3.sinks.sinke3.type=hdfs
agent3.sinks.sinke3.channel=channel3
agent3.sinks.sink3.hdfs.path=/
agent3.sinks.sink3.hdfs.filePrefix=prefix
agent3.sinks.sink3.hdfs.fileSuffix=.suffix
agent3.sinks.sink3.hdfs.inUsePrefix=inUse-
agent3.sinks.sink3.hdfs.inUsesuffix=.inUseTmp
agent3.sinks.sink3.hdfs.round=true
agent3.sinks.sink3.hdfs.roundValue=10
agent3.sinks.sink3.hdfs.roundUnit=minute
agent3.sinks.sink3.hdfs.rollInterval=0
agent3.sinks.sink3.hdfs.rollSize=0
agent3.sinks.sink3.hdfs.rooCount=0
agent3.sinks.sink3.hdfs.idleTimeout=0
agent3.sinks.sink3.hdfs.fileType=dataStream
agent3.sinks.sink3.hdfs.writeFormat=Text
agent3.sinks.sink3.hdfs.timeZone=Asia/shanghai
agent3.sinks.sink3.hdfs.useLocalTimeStamp=true

#agent syslog memory hbase
agent4.sources=source4
agent4.channels=channel4
agent4.sinks=sink4

agent4.sources.source4.type= avro
agent4.sources.source4.prot=PORT
agent4.sources.source4.host=DNS
agent4.sources.source4.channels=channel4

agent4.channels.channel4.type=SPILLABLEMEMORY
agent4.channels.channel4.memoryCapacity=10000
agent4.channels.channel4.overflowCapcity=10000000
agent4.channels.channel4.byteCapacity=8000000
agent4.channels.channel4.checkpointDir=/checkpoint
agent4.channels.channel4.dataDirs=/datas

agent4.sinks.sink4.type=hbase
agent4.sinks.sink4.table=namespace:table
agent4.sinks.sink4.columnFamily=cf
agent4.sinks.sink4.channel=channel4
agent4.sinks.sink4.serializer=org.apache.flume.sink.hbase.RegexHbaseEventSerializer
agent4.sinks.sink4.serializer.regx=REGX
agent4.sinks.sink4.serializer.colNames=ROW_KEY,name,sex
agent4.sinks.sink4.serializer.rowKeyIndex=0

#agent 多路复用+多路复制
agent5.sources=source5
agent5.channels=channel50 channel51 channel52
agent5.sinks=sink50 sink51

agent5.sources.source5.type=http
agent5.sources.source5.port=PORT
agent5.sources.source5.channels=channel50 channel51 channel52
agent5.sources.source5.handler=org.apache.flume.source.http.JSONHandler
agent5.sources.source5.selector.type=mutiplexing
agent5.sources.source5.selector.header=state
agent5.sources.source5.selector.mapping.CZ=channel50
agent5.sources.source5.selector.mapping.US=channel51
agent5.sources.source5.default=channel52

agent5.sinks.sink50.type=file
agent5.sinks.sink50.channel=channel50
agent5.sinks.sink51.type=file_roll
agent5.sinks.sink51.channel=channel50

转载于:https://www.cnblogs.com/mrerror/p/10856818.html

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
以下是一个flume的conf文件,请帮我逐行解释一下代码:“#定义三大组件的名称 a.sources = r a.sinks = k1 k2 k3 a.channels = c1 c2 c3 #将数据流复制给所有channel a.sources.r.selector.type = replicating  # 配置Source组件 a.sources.r.type = exec a.sources.r.command = cat /home/bit/novel/novel.csv # kafka a.sinks.k1.type = org.apache.flume.sink.kafka.KafkaSink a.sinks.k1.kafka.topic = data a.sinks.k1.kafka.bootstrap.servers = localhost:9092 a.sinks.k1.kafka.flumeBatchSize = 20 a.sinks.k1.kafka.producer.acks = 1 a.sinks.k1.kafka.producer.linger.ms = 1 a.sinks.k1.kafka.producer.compression.type = snappy a.channels.c1.type = memory a.channels.c1.capacity = 100000 a.channels.c1.transactionCapacity = 100 # mysql a.sinks.k2.type =com.us.flume.MysqlSink a.sinks.k2.hostname=localhost a.sinks.k2.port=3306 a.sinks.k2.databaseName=novel a.sinks.k2.tableName=table1 a.sinks.k2.user=bit a.sinks.k2.password=123456 a.channels.c2.type = memory a.channels.c2.capacity = 100000 a.channels.c2.transactionCapactiy = 2000 # hdfs a.sinks.k3.type = hdfs a.sinks.k3.hdfs.path = hdfs://localhost:9000/user/bit/novel #积攒多少个Event才flush到HDFS一次 a.sinks.k3.hdfs.batchSize = 100 #设置文件类型,可支持压缩 a.sinks.k3.hdfs.fileType = DataStream #多久生成一个新的文件 a.sinks.k3.hdfs.rollInterval = 5 a.channels.c3.type = memory a.channels.c3.capacity =100000 a.channels.c3.transactionCapacity = 100 # Bind the source and sink to the channel a.sources.r.channels = c1 c2 c3 a.sinks.k1.channel = c1 a.sinks.k2.channel = c2 a.sinks.k3.channel = c3”
05-24
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值