1.监控dir 输出到hdfs上
# 设置sink channel source
a1.sources = r1
a1.sinks = k1
a1.channels = c1
# 设置 source 使用spooldir 监控文件目录
a1.sources.r1.type = spooldir
a1.sources.r1.spoolDir = /opt/module/flume/upload
# 设置sink
a1.sinks.k1.type = hdfs
a1.sinks.k1.hdfs.path = hdfs://hadoop202:8020/flume/%Y%m%d/%H
a1.sinks.k1.hdfs.filePrefix = logs-
a1.sinks.k1.hdfs.rollInterval = 20
a1.sinks.k1.hdfs.rollSize = 134217700
a1.sinks.k1.hdfs.rollCount = 0
a1.sinks.k1.hdfs.batchSize = 100
a1.sinks.k1.hdfs.round = true
a1.sinks.k1.hdfs.roundValue = 1
a1.sinks.k1.hdfs.roundUnit = hour
a1.sinks.k1.hdfs.useLocalTimeStamp = true
a1.sinks.k1.hdfs.fileType = DataStream
# 设置channel
a1.channels.c1.type = memory
a1.channels.c1.capacity = 1000
a1.channels.c1.transactionCapacity = 100
# 设置绑定关系
a1.sinks.k1.channel = c1
a1.sources.r1.channels = c1
- 负载均衡配置
# agent
a1.sources = r1
a1.channels = c1
a1.sinks = k1 k2
a1.sinkgroups = g1
# sources exec
a1.sources.r1.type = netcat
a1.sources.r1.bind = hadoop202
a1.sources.r1.port = 44444
# Describe the channel
a1.channels.c1.type = memory
a1.channels.c1.capacity = 1000
a1.channels.c1.transactionCapacity = 100
# sink group 负载均衡
a1.sinks.k1.type = avro
a1.sinks.k1.hostname = hadoop202
a1.sinks.k1.port = 4441
a1.sinks.k2.type = avro
a1.sinks.k2.hostname = hadoop202
a1.sinks.k2.port = 4442
a1.sinkgroups.g1.sinks = k1 k2
a1.sinkgroups.g1.processor.type = load_balance
a1.sinkgroups.g1.processor.backoff = true
a1.sinkgroups.g1.processor.selector = random
#设置绑定
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1
a1.sinks.k2.channel = c1
- 故障转移
#agent
a1.sources = r1
a1.channels = c1
a1.sinks = k1 k2
a1.sinkgroups = g1
#sources exec
a1.sources.r1.type = netcat
a1.sources.r1.bind = hadoop202
a1.sources.r1.port = 44444
#Describe the channel
a1.channels.c1.type = memory
a1.channels.c1.capacity = 1000
a1.channels.c1.transactionCapacity = 100
#sink group 故障转移
a1.sinks.k1.type = avro
a1.sinks.k1.hostname = hadoop202
a1.sinks.k1.port = 4441
a1.sinks.k2.type = avro
a1.sinks.k2.hostname = hadoop202
a1.sinks.k2.port = 4442
a1.sinkgroups.g1.sinks = k1 k2
a1.sinkgroups.g1.processor.type = failover
a1.sinkgroups.g1.processor.priority.k1 = 5
a1.sinkgroups.g1.processor.priority.k2 = 10
a1.sinkgroups.g1.processor.maxpenalty = 10000
#设置绑定
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1
a1.sinks.k2.channel = c1
- flume 对接kafka 案列配置
# agent
a1.sources = r1
a1.channels = c1
a1.sinks = k1
# source
a1.sources.r1.type = netcat
a1.sources.r1.bind = hadoop202
a1.sources.r1.port = 44444
# interceptor 不需要配置 mapping 那个, 但是自定义的拦截器需要将头部信息为 topic, value 为具体的所要分配的值
a1.sources.r1.interceptors = i1
a1.sources.r1.interceptors.i1.type = Interceptor.KafkaInterceptor$Builder
# channels
a1.channels.c1.type = memory
a1.channels.c1.capacity = 10000
a1.channels.c1.transactionCapacity = 1000
# sink kafka主题上面如果配置拦截器的话,下面的这个主题配置就没必要了
a1.sinks.k1.type = org.apache.flume.sink.kafka.KafkaSink
#a1.sinks.k1.kafka.topic = test2
a1.sinks.k1.kafka.bootstrap.servers = hadoop202:9092,hadoop203:9092,hadoop204:9092
a1.sinks.k1.kafka.flumeBatchSize = 20
a1.sinks.k1.kafka.producer.acks = 1
a1.sinks.k1.kafka.producer.linger.ms = 1
a1.sinks.ki.kafka.producer.compression.type = snappy
# 绑定
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1
- taildir 检测日志数据输出到多个(2个)kafka主题当中,并使用拦截器
其他可以都可以参照官方文档去查看