Kafka和Flume的整合
vim taildir-memory-kafka.conf
a1.sources = r1
a1.channels = c1
a1.sinks = k1
a1.sources.r1.type = TAILDIR
a1.sources.r1.positionFile = /var/log/flume/taildir_position.json
a1.sources.r1.filegroups = f1
a1.sources.r1.filegroups.f1 = /root/example.log
a1.sinks.k1.type = org.apache.flume.sink.kafka.KafkaSink
a1.sinks.k1.kafka.topic = flumeA
a1.sinks.k1.kafka.bootstrap.servers = node-1:9092,node-2:9092,node-3:9092
a1.sinks.k1.kafka.flumeBatchSize = 20
a1.sinks.k1.kafka.producer.acks = 1
a1.sinks.k1.kafka.producer.linger.ms = 1
a1.channels.c1.type = memory
a1.channels.c1.capacity = 1000
a1.channels.c1.transactionCapacity = 100
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1
bin/flume-ng agent --conf conf --conf-file conf/taildir-memory-kafka.conf --name a1 -Dflume.root.logger=INFO,console
bin/kafka-topics.sh --create --topic flumeA --zookeeper node-1:2181 --partitions 3 --replication-factor 2
bin/kafka-console-consumer.sh --bootstrap-server node-1:9092 --topic flumeA --from-beginning