#agent1(hadoop102)a1.sources = r1a1.channels = c1 c2a1.sinks = k1 k2
a1.sources.r1.type = execa1.sources.r1.command = tail -F /opt/module/flume/demo/123.log
#配置channelSelector - replicating(复制-默认不配也可以)#a1.sources.r1.selector.type = replicating
#复用a1.sources.r1.selector.type = multiplexing#event(headers | body)根据headers中的key和value进行数据的发送#state指的是headers,key的值a1.sources.r1.selector.header = state#CZ指的是key对应的value值那么就发送到c1a1.sources.r1.selector.mapping.CZ = c1#US指的是key对应的value值那么就发送到c2a1.sources.r1.selector.mapping.US = c2
#需求:给event中的headers添加数据#static拦截器可以给所有的eventheaders设置我们自定义的key和valuea1.sources.r1.interceptors = i1a1.sources.r1.interceptors.i1.type = static#设置key值a1.sources.r1.interceptors.i1.key = state#设置value值a1.sources.r1.interceptors.i1.value = CZ
a1.channels.c1.type = memorya1.channels.c2.type = memory
a1.sinks.k1.type = avroa1.sinks.k1.hostname = hadoop103a1.sinks.k1.port = 33333a1.sinks.k2.type = avroa1.sinks.k2.hostname = hadoop104a1.sinks.k2.port = 44444
#一个source对接两个channela1.sources.r1.channels = c1 c2 a1.sinks.k1.channel = c1a1.sinks.k2.channel = c2
-----------------------------------------------------------------
#agent2(hadoop103)a1.sources = r1a1.channels = c1a1.sinks = k1
a1.sources.r1.type = avroa1.sources.r1.bind = hadoop103a1.sources.r1.port = 33333
a1.channels.c1.type = memorya1.channels.c1.capacity = 1000
a1.sinks.k1.type = logger
a1.sources.r1.channels = c1a1.sinks.k1.channel = c1
-----------------------------------------------------------------
#agent3(hadoop104)a1.sources = r1a1.channels = c1a1.sinks = k1
a1.sources.r1.type = avroa1.sources.r1.bind = hadoop104a1.sources.r1.port = 44444
a1.channels.c1.type = memorya1.channels.c1.capacity = 1000
#a1.sinks.k1.type = logger#将event数据存储到本地磁盘上a1.sinks.k1.type = file_roll#event存放的目录a1.sinks.k1.sink.directory = /opt/module/flume/demo#多久时间滚动一个新文件(30秒)a1.sinks.k1.sink.rollInterval = 30
a1.sources.r1.channels = c1a1.sinks.k1.channel = c1