flume 场景配置

1.监控dir 输出到hdfs上

# 设置sink  channel source
a1.sources = r1
a1.sinks = k1
a1.channels = c1

# 设置 source   使用spooldir 监控文件目录 
a1.sources.r1.type =  spooldir
a1.sources.r1.spoolDir = /opt/module/flume/upload

# 设置sink
a1.sinks.k1.type = hdfs
a1.sinks.k1.hdfs.path = hdfs://hadoop202:8020/flume/%Y%m%d/%H
a1.sinks.k1.hdfs.filePrefix =  logs-
a1.sinks.k1.hdfs.rollInterval =  20
a1.sinks.k1.hdfs.rollSize = 134217700
a1.sinks.k1.hdfs.rollCount =  0
a1.sinks.k1.hdfs.batchSize =  100
a1.sinks.k1.hdfs.round =  true
a1.sinks.k1.hdfs.roundValue =  1
a1.sinks.k1.hdfs.roundUnit =  hour
a1.sinks.k1.hdfs.useLocalTimeStamp = true
a1.sinks.k1.hdfs.fileType = DataStream

# 设置channel
a1.channels.c1.type = memory
a1.channels.c1.capacity = 1000
a1.channels.c1.transactionCapacity = 100

# 设置绑定关系
a1.sinks.k1.channel = c1
a1.sources.r1.channels = c1

  1. 负载均衡配置
#  agent
a1.sources = r1
a1.channels = c1
a1.sinks = k1 k2
a1.sinkgroups = g1

# sources exec 
a1.sources.r1.type = netcat
a1.sources.r1.bind = hadoop202
a1.sources.r1.port = 44444

# Describe the channel 
a1.channels.c1.type = memory
a1.channels.c1.capacity = 1000
a1.channels.c1.transactionCapacity = 100

# sink group  负载均衡
a1.sinks.k1.type = avro
a1.sinks.k1.hostname = hadoop202
a1.sinks.k1.port = 4441

a1.sinks.k2.type = avro
a1.sinks.k2.hostname = hadoop202
a1.sinks.k2.port = 4442

a1.sinkgroups.g1.sinks = k1 k2
a1.sinkgroups.g1.processor.type = load_balance
a1.sinkgroups.g1.processor.backoff = true
a1.sinkgroups.g1.processor.selector = random

#设置绑定
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1
a1.sinks.k2.channel = c1

  1. 故障转移

#agent
a1.sources = r1
a1.channels = c1
a1.sinks = k1 k2
a1.sinkgroups = g1

#sources exec 
a1.sources.r1.type = netcat
a1.sources.r1.bind = hadoop202
a1.sources.r1.port = 44444

#Describe the channel 
a1.channels.c1.type = memory
a1.channels.c1.capacity = 1000
a1.channels.c1.transactionCapacity = 100

#sink group  故障转移
a1.sinks.k1.type = avro
a1.sinks.k1.hostname = hadoop202
a1.sinks.k1.port = 4441

a1.sinks.k2.type = avro
a1.sinks.k2.hostname = hadoop202
a1.sinks.k2.port = 4442

a1.sinkgroups.g1.sinks = k1 k2
a1.sinkgroups.g1.processor.type = failover
a1.sinkgroups.g1.processor.priority.k1 = 5
a1.sinkgroups.g1.processor.priority.k2 = 10
a1.sinkgroups.g1.processor.maxpenalty = 10000

#设置绑定
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1
a1.sinks.k2.channel = c1
  1. flume 对接kafka 案列配置

# agent 
a1.sources = r1
a1.channels = c1
a1.sinks = k1
# source 
a1.sources.r1.type = netcat
a1.sources.r1.bind = hadoop202
a1.sources.r1.port = 44444

# interceptor  不需要配置 mapping 那个,  但是自定义的拦截器需要将头部信息为 topic,  value 为具体的所要分配的值
a1.sources.r1.interceptors = i1
a1.sources.r1.interceptors.i1.type = Interceptor.KafkaInterceptor$Builder

# channels
a1.channels.c1.type = memory
a1.channels.c1.capacity = 10000
a1.channels.c1.transactionCapacity = 1000


# sink  kafka主题上面如果配置拦截器的话,下面的这个主题配置就没必要了
a1.sinks.k1.type = org.apache.flume.sink.kafka.KafkaSink
#a1.sinks.k1.kafka.topic = test2
a1.sinks.k1.kafka.bootstrap.servers = hadoop202:9092,hadoop203:9092,hadoop204:9092
a1.sinks.k1.kafka.flumeBatchSize = 20
a1.sinks.k1.kafka.producer.acks = 1
a1.sinks.k1.kafka.producer.linger.ms = 1
a1.sinks.ki.kafka.producer.compression.type = snappy


# 绑定
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1
  1. taildir 检测日志数据输出到多个(2个)kafka主题当中,并使用拦截器


其他可以都可以参照官方文档去查看

点击此处跳转到官方文档

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值