3台机器,2台 flume 往kafka里面采集,一台从kafka里面拿 放到hafs里面
第一台机器
大概是
#taildir source
#为各个组件命名
a1.sources = r1
a1.channels = c1
#声明source
a1.sources.r1.type = TAILDIR
a1.sources.r1.filegroups = f1
#监控的目录
a1.sources.r1.filegroups.f1 = /log/app.*
#开启断点续传,不配置 默认开启
a1.sources.r1.positionFile= /flume/taildir_position.json
#拦截器
a1.sources.r1.interceptors = i1
a1.sources.r1.interceptors.i1.type = com.log.interceptor.LogEtlInterceptor$Mybuilder
#kafka channel
#channels
a1.channels.c1.type = org.apache.flume.channel.kafka.KafkaChannel
a1.channels.c1.kafka.bootstrap.servers = kafkaip地址:9092,kafkaip地址:9092,kafkaip地址:9092
a1.channels.c1.kafka.topic = topic_log
a1.channels.c1.parseAsFlumeEvent = false
#绑定source和channel以及sink和channel的关系
a1.sources.r1.channels = c1
第二台机器配置,与第一台相同
略
第三天机器配置
a1.channels = c1
a1.sinks= k1
#channels
a1.channels.c1.type = org.apache.flume.channel.kafka.KafkaChannel
a1.channels.c1.kafka.bootstrap.servers = hadoop102:9092,hadoop103:9092,hadoop104:9092
a1.channels.c1.kafka.topic = topic_log
a1.channels.c1.parseAsFlumeEvent = false
#hdfs sink
# sink1
a1.sinks.k1.type = hdfs
a1.sinks.k1.hdfs.path = /origin_data/gmall/log/topic_log/%Y-%m-%d
a1.sinks.k1.hdfs.filePrefix = a-
#设置文件夹滚动,假设有header里面有 timestamp字段,(timestamp 字段是在source 拦截器里面设置的)可以 设置为false,没有的话必须开启这个,用本地时间
a1.sinks.k1.hdfs.round = true
a1.sinks.k1.hdfs.roundValue = 1
a1.sinks.k1.hdfs.roundUnit = hour
a1.sinks.k1.hdfs.useLocalTimeStamp = true
# 设置文件滚动,防止产生过多小文件
a1.sinks.k1.hdfs.rollInterval = 100
a1.sinks.k1.hdfs.rollSize = 134217728
a1.sinks.k1.hdfs.rollCount = 0
a1.sinks.k1.channel = c1