场景一:
生产环境因集群,无法快速定位日志,以下配置可在kibana可视化界面查看系统日志
系统报如下日志
在kibana搜索
实现步骤
一)application.properties文件新增配置
#logstash服务器地址
logstash.host=10.86.210.18
#logstash端口
logstash.port=8084
二)引入pom文件
<dependency>
<groupId>net.logstash.logback</groupId>
<artifactId>logstash-logback-encoder</artifactId>
<version>5.3</version>
</dependency>
<dependency>
<groupId>net.logstash.log4j</groupId>
<artifactId>jsonevent-layout</artifactId>
<version>1.6</version>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-classic</artifactId>
<version>1.1.8</version>
</dependency>
如果出现如下错误:
ERROR in ch.qos.logback.core.joran.action.NestedBasicPropertyIA - Unexpected aggregationType AS_BASIC_PROPERTY_COLLECTION
将ch.qos.logback 包更改为如下
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-classic</artifactId>
<exclusions>
<exclusion>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-core</artifactId>
</exclusion>
</exclusions>
<version>1.1.8</version>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-core</artifactId>
<version>1.1.8</version>
</dependency>
三)实现获取ip方法
import ch.qos.logback.classic.pattern.ClassicConverter;
import ch.qos.logback.classic.spi.ILoggingEvent;
public class LogIpConfig extends ClassicConverter {
@Override
public String convert(ILoggingEvent iLoggingEvent) {
return DataServerUtil.getLocalHost();
}
}
四)xml配置
在resource新建logback-spring.xml文件
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
<include resource="org/springframework/boot/logging/logback/defaults.xml"/>
<property resource="application.properties" />
<!--服务器ip获取方法配置-->
<conversionRule conversionWord="ip" converterClass="cn.evun.geely.dataserver.gds.utils.LogIpConfig" />
<include resource="default.xml" />
</configuration>
在resource新建default.xml文件
<?xml version="1.0" encoding="UTF-8"?>
<included>
<!-- 将日志打印到控制台 -->
<appender name="PROJECT-CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern> %d{HH:mm:ss.SSS} %contextName [%thread] %-5level %logger{36} - %msg%n</pattern>
</encoder>
</appender>
<!-- 将日志写入到文件配置 -->
<appender name="PROJECT-FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${logging.path}/application.log</file>
<encoder >
<pattern>"host": "%ip" %d{HH:mm:ss.SSS} %-5level [%thread] %logger{32}[%file:%line] -> %msg%n</pattern>
<charset class="java.nio.charset.Charset">UTF-8</charset>
</encoder>
<!--配置备份 -->
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${logging.path}/application.log.%d{yyyy-MM-dd}.log.gz</fileNamePattern>
<maxHistory>7</maxHistory>
</rollingPolicy>
</appender>
<!-- logback 和 logstash 通讯配置 -->
<appender name="SOCKET" class="net.logstash.logback.appender.LogstashSocketAppender">
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>ERROR</level>
</filter>
<host>${logstash.host}</host>
<port>${logstash.port}</port>
</appender>
<!-- logstash远程日志配置-->
<appender name="LOGSTASH" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
<destination>${logstash.host}:${logstash.port}</destination>
<!--输出打印json格式-->
<encoder charset="UTF-8" class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder">
<providers>
<pattern>
<pattern>
<!--输出日志可自定义,可根据自己需要配置-->
{
<!--es索引名称 -->
"indexname":"test_logstash",
<!--应用名称 -->
"appname":"${spring.application.name}",
<!--服务器ip -->
"host": "%ip",
<!--应用端口 -->
"port": "${spring.application.index}",
<!--打印时间 -->
"timestamp": "%d{yyyy-MM-dd HH:mm:ss.SSS}",
<!--线程名称 -->
"thread": "%thread",
<!--日志级别 -->
"level": "%level",
<!--日志名称 -->
"logger_name": "%logger",
<!--日志信息 -->
"message": "%msg",
<!--日志堆栈 -->
"stack_trace": "%exception"
}
</pattern>
</pattern>
</providers>
</encoder>
</appender>
<root level="INFO">
<appender-ref ref="PROJECT-FILE"/>
<appender-ref ref="PROJECT-CONSOLE"/>
<appender-ref ref="SOCKET"/>
<appender-ref ref="LOGSTASH"/>
</root>
</included>
以上四步骤即可实现将系统打印的日志上传到logstash,logstash再自动上传elasearch,然后再kibana展示。
ligstash配置
input { stdin { } }
input {
tcp {
#不配置默认是安装服务器ip
#host => "localhost"
#开启的端口
port => 8084
mode => "server"
tags => ["tags"]
#输出json格式,需要装插件
codec => json_lines
}
}
output {
stdout{codec =>rubydebug}
elasticsearch {
#es地址,可多个
hosts => ["localhost:9200"]
flush_size => 1000
action => "index"
#获取输出参数"indexname"值当做索引,如果没有则会自动创建对应索引(需要es开启自动创建索引)
index => "%{indexname}"
}
}
场景二:
场景二:比如我想统计api接口调用次数、那些人调用等信息
1)创建InterfaceApiFitlerResult
public class InterfaceApiFitlerResult {
@ApiModelProperty("接口id")
private String interfaceId;
@ApiModelProperty("appKey")
private String appKey;
@ApiModelProperty("ip")
private String ip;
import cn.evun.geely.dataserver.gds.model.api.InterfaceApiFitlerResult;
import com.alibaba.fastjson.JSONObject;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.MDC;
public class InterfaceApiFitlerLog {
private static Logger log = LoggerFactory.getLogger(InterfaceApiFitlerLog.class);
public static void log(InterfaceApiFitlerResult req) {
MDC.clear();
MDC.put("appKey",req.getAppKey());
MDC.put("interfaceId",req.getInterfaceId());
MDC.put("userIp",req.getIp());
log.info(json);
}
private static String format(InterfaceApiFitlerResult req) {
return JSONObject.toJSONString(req);
}
}
二)xml配置
2.1)新增interface_api_log_fitler.xml
<?xml version="1.0" encoding="UTF-8"?>
<included>
<!--日志文件备份配置 -->
<appender name="FILTER_RESULT_LOG"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>INFO</level>
</filter>
<!-- <Encoding>UTF-8</Encoding> -->
<File>${logging.path}/api/api-filter-result.log</File>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<!-- daily rollover -->
<FileNamePattern>${logging.path}/api/api-filter-result.%d{yyyy-MM-dd}.log
</FileNamePattern>
<!-- keep 30 days' worth of history -->
<maxHistory>30</maxHistory>
</rollingPolicy>
<layout class="ch.qos.logback.classic.PatternLayout">
<pattern>%m%n</pattern>
</layout>
</appender>
<!-- logback 和 logstash 通讯配置 -->
<appender name="SOCKET" class="net.logstash.logback.appender.LogstashSocketAppender">
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>ERROR</level>
</filter>
<host>${logstash.host}</host>
<port>${logstash.port}</port>
</appender>
<!-- logstash远程日志配置-->
<appender name="LOGSTASH" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
<destination>${logstash.host}:${logstash.port}</destination>
<!--输出打印json格式-->
<encoder charset="UTF-8" class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder">
<providers>
<pattern>
<pattern>
<!--输出日志可自定义,可根据自己需要配置-->
{
<!--es索引名称 -->
"indexname":"interface_api",
<!--应用名称 -->
"appname":"${spring.application.name}",
"appKey":"%mdc{appKey}",
"interfaceId":"%mdc{interfaceId}",
"userIp":"%mdc{userIp}"
}
</pattern>
</pattern>
</providers>
</encoder>
</appender>
<logger name="cn.evun.geely.dataserver.gds.log.InterfaceApiFitlerLog" level="INFO"
additivity="false">
<appender-ref ref="FILTER_RESULT_LOG" />
<appender-ref ref="SOCKET"/>
<appender-ref ref="LOGSTASH"/>
</logger>
</included>
2.2)在logback-spring.xml文件引入
<include resource="interface_api_log_fitler.xml" />
3)测试方法
@ApiOperation("")
@RequestMapping(value = "/interfaceApiFitlerLog", method = RequestMethod.GET)
public APIResponse interfaceApiFitlerLog() {
InterfaceApiFitlerResult api = new InterfaceApiFitlerResult();
api.setIp("127.0.0.1");
api.setAppKey("testAppKey");
api.setInterfaceId("testInterfaceId");
InterfaceApiFitlerLog.log(api);
return APIResponse.success();
}
4)执行结果
这样就可以通过kibana做一个简单的统计图了