log4j配置
log4j2
pom.xml导入的jar
<dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-log4j2</artifactId> </dependency> <!-- flume --> <dependency> <groupId>org.apache.logging.log4j</groupId> <artifactId>log4j-flume-ng</artifactId> <version>2.8.2</version> </dependency> <dependency> <groupId>org.apache.flume.flume-ng-clients</groupId> <artifactId>flume-ng-log4jappender</artifactId> <version>1.7.0</version> </dependency>
log4j2.xml
<configuration status="WARN" monitorInterval="30"> <!--先定义所有的appender--> <appenders> <!--这个输出控制台的配置--> <console name="Console" target="SYSTEM_OUT"> <!--输出日志的格式--> <PatternLayout pattern="[%d{HH:mm:ss:SSS}] [%p] - %l - %m%n"/> </console> <!--输出到flume--> <Flume name="FlumeInfo" compress="false" type="avro" ignoreExceptions="false"> <Agent host="flume开启ip地址" port="44444"/> <!--输出方式为json--> <!--<JSONLayout/>--> <PatternLayout pattern="%m%n"/> <!--<PatternLayout pattern="[%d{HH:mm:ss:SSS}] [%p] - %l - %m%n"/>--> </Flume> </appenders> <!--然后定义logger,只有定义了logger并引入的appender,appender才会生效--> <loggers> <logger name="FlumeInfo" level="INFO" additivity="false"> <appender-ref ref="FlumeInfo"/> </logger> <root level="INFO"> <appender-ref ref="Console"/> </root> </loggers> </configuration>
使用
@SpringBootApplication @RestController public class HaizeiApplication { private static Logger logger = LoggerFactory.getLogger("FlumeInfo"); public static void main(String[] args) throws Exception { logger.info("***************************123"); SpringApplication.run(HaizeiApplication.class, args); } }
log4j1
pom.xml导入jar包
<dependency> <groupId>org.slf4j</groupId> <artifactId>slf4j-log4j12</artifactId> <version>1.7.25</version> </dependency> <dependency> <groupId>org.apache.flume.flume-ng-clients</groupId> <artifactId>flume-ng-log4jappender</artifactId> <version>1.7.0</version> </dependency>
log4j.xml
<!-- 单节点 --> <appender name="flume" class="org.apache.flume.clients.log4jappender.Log4jAppender"> <layout class="org.apache.log4j.PatternLayout"> <param name="ConversionPattern" value="%m%n" /> </layout> <param name="Hostname" value="192.168.11.12"/> <param name="Port" value="44444"/> </appender> <!-- 负载均衡方式 --> <appender name="flume" class="org.apache.flume.clients.log4jappender.LoadBalancingLog4jAppender"> <!-- 定义控制台日志级别入口 --> <param name="hosts" value="192.168.12.11:6666 192.168.12.12:6666" /> <param name="Selector" value="RANDOM" /> <param name="maxBackoff" value="30000" /> <layout class="org.apache.log4j.PatternLayout"> <param name="ConversionPattern" value="%d{yyyy-MM-dd hh:mm:ss,SSS} [%p][%t] %-20.30c{1} %4L %m%n" /> </layout> </appender>
log4j.properties
#log4j输出到flume单机模式配置 log4j.appender.loadbalance = org.apache.flume.clients.log4jappender.Log4jAppender log4j.appender.loadbalance.hostname = hadoop011 log4j.appender.loadbalance.port = 6666 #log4j输出到flume负载均衡方式配置 log4j.appender.loadbalance = org.apache.flume.clients.log4jappender.LoadBalancingLog4jAppender log4j.appender.loadbalance.Hosts = hadoop011:6666 hadoop012:6666 #log4j.appender.loadbalance.UnsafeMode = true log4j.appender.out2.MaxBackoff = 30000 #FQDN RANDOM ,default is ROUND_ROBIN log4j.appender.loadbalance.Selector = RANDOM log4j.appender.loadbalance.layout=org.apache.log4j.PatternLayout log4j.appender.loadbalance.layout.ConversionPattern=%m
使用
@SpringBootApplication @RestController public class HaizeiApplication { private static Logger logger = LoggerFactory.getLogger("flume"); public static void main(String[] args) throws Exception { logger.info("***************************123"); SpringApplication.run(HaizeiApplication.class, args); } }
springboot-logback
pom.xml导入jar
<!-- https://mvnrepository.com/artifact/com.teambytes.logback/logback-flume-appender --> <dependency> <groupId>com.teambytes.logback</groupId> <artifactId>logback-flume-appender_2.11</artifactId> <version>0.0.9</version> </dependency>
logback-spring.xml
<appender name="flumeTest" class="com.teambytes.logback.flume.FlumeLogstashV1Appender"> <flumeAgents> ip:44444 </flumeAgents> <flumeProperties> connect-timeout=4000; request-timeout=8000 </flumeProperties> <batchSize>100</batchSize> <reportingWindow>1000</reportingWindow> <!--<additionalAvroHeaders> dir=logs </additionalAvroHeaders>--> <application>JustryDeng's Application</application> <layout class="ch.qos.logback.classic.PatternLayout"> <!--<pattern>%d{HH:mm:ss.SSS} %-5level %logger{36} - \(%file:%line\) - %message%n%ex</pattern>--> <pattern>%message</pattern> </layout> <filter class="ch.qos.logback.classic.filter.LevelFilter"> <level>INFO</level> </filter> </appender> <logger name="flumeTest" level="info"> <appender-ref ref="flumeTest"/> </logger>
使用
@Before("shoppingCartLog()") public void doBeforeByShoppingCart(JoinPoint joinPoint) throws Throwable { org.slf4j.Logger flumeLogger = LoggerFactory.getLogger("flumeTest"); flumeLogger.info("123"); }
创建flume接收文件
flume文件
a1.sources.r1.port = 44444 # 描述和配置 channel 组件,此处使用是内存缓存的方式 a1.channels.c1.type = memory # 默认该通道中最大的可以存储的 event 数量 a1.channels.c1.capacity = 1000 # 每次最大可以从 source 中拿到或者送到 sink 中的 event 数量 a1.channels.c1.transactionCapacity = 100 # 描述和配置 sink 组件:k1 a1.sinks.k1.channel = c1 a1.sinks.k1.type = hdfs a1.sinks.k1.hdfs.path = hdfs://hdfs开启ip地址:9000/user/%Y-%m-%d/%H a1.sinks.k1.hdfs.filePrefix = logs #a1.sinks.k1.hdfs.inUsePrefix = . # 默认值:30; hdfs sink 间隔多长将临时文件滚动成最终目标文件,单位:秒; 如果设置成 0,则表示不根据时间来滚动文件 a1.sinks.k1.hdfs.rollInterval = 0 # 默认值:1024; 当临时文件达到该大小(单位:bytes)时,滚动成目标文件; 如果设置成 0,则表示不根据临时文件大小来滚动文件 a1.sinks.k1.hdfs.rollSize = 16777216 # 默认值:10; 当 events 数据达到该数量时候,将临时文件滚动成目标文件; 如果设置成 0,则表示不根据 events 数据来滚动文件 a1.sinks.k1.hdfs.rollCount = 0 a1.sinks.k1.hdfs.batchSize = 100 a1.sinks.k1.hdfs.writeFormat = text # 生成的文件类型,默认是 Sequencefile,可用 DataStream,则为普通文本 a1.sinks.k1.hdfs.fileType = DataStream # 操作 hdfs 超时时间 a1.sinks.k1.callTimeout =10000 a1.sinks.k1.hdfs.useLocalTimeStamp=true # 描述和配置 source channel sink 之间的连接关系 a1.sources.r1.channels = c1 a1.sinks.k1.channel = c1
问题
1.在存入到hdfs时,会生成许多小文件,设置rollSize没有生效,需要加入:
a1.sinks.k1.hdfs.minBlockReplicas = 1
2.如果我们希望多台上的日志采集到一个文件中,只需要监听的端口是一样的就可以。