中间件Kafka+Flume配置
a1.sources = s1
a1.sinks =sk1
a1.channels = c1
a1.sources.s1.type = avro
a1.sources.s1.channels = c1
a1.sources.s1.bind = CentOS
a1.sources.s1.port = 44444
a1.sources.s1.interceptors = i1
a1.sources.s1.interceptors.i1.type = regex_filter
a1.sources.s1.interceptors.i1.regex = .*(EVALUATE|SUCCESS).*
a1.sources.s1.interceptors.i1.excludeEvents = false
a1.channels.c1.type = memory
a1.sinks.sk1.channel = c1
a1.sinks.sk1.type = org.apache.flume.sink.kafka.KafkaSink
a1.sinks.sk1.kafka.topic = UserRiskEvaluate
a1.sinks.sk1.kafka.bootstrap.servers = CentOS:9092
a1.sinks.sk1.kafka.flumeBatchSize = 10
a1.sinks.sk1.kafka.batchSize=10
a1.sinks.sk1.kafka.producer.acks = -1
a1.sinks.sk1.kafka.producer.linger.ms = 10
整合Flume + SpringBoot logback
参考:https://github.com/gilt/logback-flume-appender
<dependency>
<groupId>org.apache.flume</groupId>
<artifactId>flume-ng-sdk</artifactId>
<version>1.9.0</version>
</dependency>
将github的开源项目引入到工程中,在项目的logback.xml添加如下内容
<?xml version="1.0" encoding="UTF-8"?>
<configuration scan="true" scanPeriod="60 seconds" debug="false">
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender" >
<encoder>
<pattern>%p %c#%M %d{yyyy-MM-dd HH:mm:ss} %m%n</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>logs/evaluate-%d{yyyyMMdd}.log</fileNamePattern>
<maxHistory>30</maxHistory>
</rollingPolicy>
<encoder>
<pattern>%p %c#%M %d{yyyy-MM-dd HH:mm:ss} %m%n</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="flume" class="com.baizhi.logback.flume.FlumeLogstashV1Appender">
<!--配置Flume 连接参数-->
<flumeAgents>
CentOS:44444,
CentOS:44444,
CentOS:44444,
CentOS:44444
</flumeAgents>
<!--配置线程池大小-->
<reporterMaxThreadPoolSize>120</reporterMaxThreadPoolSize>
<!--设置发送日志event队列最大长度-->
<reporterMaxQueueSize>100</reporterMaxQueueSize>
<!--配置Rpc Client的连接属性-->
<flumeProperties>
connect-timeout=4000;
request-timeout=8000
</flumeProperties>
<!--一次性发送批次大小-->
<batchSize>100</batchSize>
<!--发送数据时间窗口|间隔-->
<reportingWindow>1000</reportingWindow>
<!--会在Event中添加header信息 application=xxxx-->
<application>xxxx</application>
<!--格式化输出信息-->
<layout class="ch.qos.logback.classic.PatternLayout">
<pattern>%p %c#%M %d{yyyy-MM-dd HH:mm:ss} %m</pattern>
</layout>
</appender>
<!-- 控制台输出日志级别 -->
<root level="ERROR">
<appender-ref ref="STDOUT" />
</root>
<logger name="com.baizhi.controller" level="info" additivity="false">
<appender-ref ref="flume"/>
</logger>
</configuration>