1.读取kafka的数据,KafkaSource
fk.conf的配置如下:
# example.conf: A single-node Flume configuration
# Name the components on this agent
a1.sources = r1
a1.sinks = k1
a1.channels = c1
# Describe/configure the source
a1.sources.r1.type = org.apache.flume.source.kafka.KafkaSource
a1.sources.r1.zookeeperConnect = i:2181,i-3:2181,i-7:2181
a1.sources.r1.groupId = flume
a1.sources.r1.topic = flume
a1.sources.r1.kafka.consumer.timeout.ms = 100
# Describe the sink
a1.sinks.k1.type=hdfs
a1.sinks.k1.hdfs.path=hdfs://ip:8020/tmp
a1.sinks.k1.hdfs.rollSize=10240000
a1.sinks.k1.hdfs.rollInterval=0
a1.sinks.k1.hdfs.rollCount=0
#a1.sinks.k1.hdfs.idleTimeout=5
a1.sinks.k1.hdfs.fileType=DataStream
a1.sinks.k1.hdfs.useLocalTimeStamp=true
# Use a channel which buffers events in memory
a1.channels.c1.type = memory
a1.channels.c1.capacity = 1000
a1.channels.c1.transactionCapacity = 100
# Bind the source and sink to the channel
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1
2.启动flume的命令:
./bin/flume-ng agent -c conf -f conf/cf.conf -Dflume.root.logger=DEBUG,console -n a1
3.在flume的bin目录下创建一个脚本flume1.sh
[root@i-oz1t91xy bin]# ls
flume1.sh flume-ng flume-ng.distro
此时flume1.sh执行不了,需要添加权限
[root@i-oz1t91xy bin]# ls
flume1.sh flume-ng flume-ng.distro
[root@i-oz1t91xy bin]# chmod +x flume1.sh
[root@i-oz1t91xy bin]# ls
flume1.sh flume-ng flume-ng.distro
脚本的内容:
#!/bin/bash
./flume-ng agent -c conf -f conf/cf.conf -Dflume.root.logger=DEBUG,console -n a1