flume-conf.properties

# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.


# The configuration file needs to define the sources,
# the channels and the sinks.
# Sources, channels and sinks are defined per agent,
# in this case called 'agent'

#new method ,TAILDIR
logcollect.sources = taildir-source
logcollect.sinks = userOperLogSink userBehaviorSink
logcollect.channels = userOperLogChannel userBehaviorChannel

# Describe/configure the source
logcollect.sources.taildir-source.type = TAILDIR
logcollect.sources.taildir-source.channels =userOperLogChannel userBehaviorChannel
logcollect.sources.taildir-source.channels.skipToEnd = True
logcollect.sources.taildir-source.positionFile = /home/logcollect/flume/taildir_position.json
# throught Space-separated list file dir which will been tail
logcollect.sources.taildir-source.filegroups = f1
logcollect.sources.taildir-source.kafka.consumer.timeout.ms = 60000

logcollect.sources.taildir-source.filegroups.f1 = /home/logcollect/server/log/.*.log
logcollect.sources.taildir-source.headers.f1.headerKey1 = value1
#logcollect.sources.taildir-source.filegroups.f2 = /usr/local/tomcat/logs/gi/gi.log
#logcollect.sources.taildir-source.headers.f2.headerKey1 =value2
#logcollect.sources.taildir-source.headers.f2.headerKey2 =value2-2
logcollect.sources.taildir-source.fileHeader = true


# interceptor
# 拦截器
logcollect.sources.taildir-source.interceptors = interceptor
# 拦截器类型(正则) regex_extractor
logcollect.sources.taildir-source.interceptors.interceptor.type = regex_extractor
# 接收日志中包含KCAELBDDCYOXSLGR或DXADXHZWEXTGCFQB的数据
logcollect.sources.taildir-source.interceptors.interceptor.regex = .*(userOperLog|UserBehaviorLog).*
logcollect.sources.taildir-source.interceptors.interceptor.serializers = s1
logcollect.sources.taildir-source.interceptors.interceptor.serializers.s1.name = key

# selector
# 选择器
logcollect.sources.taildir-source.selector.type = multiplexing
logcollect.sources.taildir-source.selector.header = key
# 日志含有KCAELBDDCYOXSLGR的写入到 risk-channel 通道
logcollect.sources.taildir-source.selector.mapping.userOperLog = userOperLogChannel
# 日志含有DXADXHZWEXTGCFQB的写入到 user-channel 通道
logcollect.sources.taildir-source.selector.mapping.UserBehaviorLog= userBehaviorChannel

 

# Describe/configure the sink
# sink 类型
logcollect.sinks.userOperLogSink.type = org.apache.flume.sink.kafka.KafkaSink
# kafka topic
logcollect.sinks.userOperLogSink.topic = user_oper_log
logcollect.sinks.userOperLogSink.brokerList = 10.168.79.166:9092,10.168.30.114:9092,10.168.92.222:9092
logcollect.sinks.userOperLogSink.batchSize = 200
logcollect.sinks.userOperLogSink.requiredAcks = 1
# sink 对应的 channel
logcollect.sinks.userOperLogSink.channel = userOperLogChannel
# user-sink
logcollect.sinks.userBehaviorSink.type = org.apache.flume.sink.kafka.KafkaSink
logcollect.sinks.userBehaviorSink.topic = user_behavior_log
logcollect.sinks.userBehaviorSink.brokerList = 10.168.79.166:9092,10.168.30.114:9092,10.168.92.222:9092
logcollect.sinks.userBehaviorSink.batchSize = 200
logcollect.sinks.userBehaviorSink.requiredAcks = 1
# sink 对应的 channel
logcollect.sinks.userBehaviorSink.channel = userBehaviorChannel


# Use a channel which buffers events in memory
# Describe/configure the channel
# risk-channel
# channel 类型为 memory 内存
logcollect.channels.userOperLogChannel.type = memory
# 存储在通道中的最大事件数
logcollect.channels.userOperLogChannel.capacity=10000
# 字节缓冲区百分比和信道中所有事件的估计总大小
logcollect.channels.userOperLogChannel.byteCapacityBufferPercentage=2000
# user-channel
logcollect.channels.userBehaviorChannel.type = memory
logcollect.channels.userBehaviorChannel.capacity=10000
logcollect.channels.userBehaviorChannel.byteCapacityBufferPercentage=2000

转载于:https://www.cnblogs.com/junglecat/p/10018719.html

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值