1.添加jar包
<!--kafka日志追加-->
<dependency>
<groupId>com.github.danielwegener</groupId>
<artifactId>logback-kafka-appender</artifactId>
<version>0.2.0-RC1</version>
</dependency>
<dependency>
<groupId>net.logstash.logback</groupId>
<artifactId>logstash-logback-encoder</artifactId>
<version>6.3</version>
</dependency>
2.编写获取ip地址的ClassicConverter
/**
* @ClassName LogIpConfig
* @Author ShuYu Liu
* @Description 获取ip
* @Date 2020/6/10 13:26
*/
public class LogIpConfig extends ClassicConverter {
private static final Logger LOGGER = LoggerFactory.getLogger(LogIpConfig .class);
@Override
public String convert(ILoggingEvent event) {
StringBuilder sb = new StringBuilder();
try {
//获取本地所有网络接口
Enumeration< NetworkInterface > en = NetworkInterface.getNetworkInterfaces();
//遍历枚举中的每一个元素
while (en.hasMoreElements()) {
NetworkInterface ni= (NetworkInterface) en.nextElement();
Enumeration <InetAddress> enumInetAddr = ni.getInetAddresses();
while (enumInetAddr.hasMoreElements()) {
InetAddress inetAddress = (InetAddress) enumInetAddr.nextElement();
if (!inetAddress.isLoopbackAddress() && !inetAddress.isLinkLocalAddress()
&& inetAddress.isSiteLocalAddress()) {
sb.append("name:" + inetAddress.getHostName().toString()+"\n");
sb.append("ip:" + inetAddress.getHostAddress().toString()+"\n");
}
}
}
} catch (SocketException e) {
}
return sb.toString();
}
}
3.bootstrap.yml中配置kafka参数
spring:
test:
kafka:
bootstrap-servers: localhost:9092
topic: logs-channel
client-id: logs-${random.value}
random-name: ${random.value}
4.logback中加入如下配置
<conversionRule conversionWord="ip" converterClass="com.xxx.LogIpConfig" />
<!-- 测试环境 -->
<springProfile name="test">
<!-- configuration to be enabled when the "staging" profile is active -->
<springProperty scope="context" name="module" source="spring.application.name"
defaultValue="undefinded"/>
<!-- 该节点会读取Environment中配置的值,在这里我们读取application.yml中的值 -->
<springProperty scope="context" name="bootstrapServers" source="spring.test.kafka.bootstrap-servers"/>
<springProperty scope="context" name="topic" source="spring.test.kafka.topic"/>
<springProperty scope="context" name="clientId" source="spring.test.kafka.client-id"/>
<springProperty scope="context" name="randomName" source="spring.test.kafka.random-name"/>
<!-- kafka的appender配置 -->
<appender name="kafka" class="com.github.danielwegener.logback.kafka.KafkaAppender">
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<!--格式化输出,%d:日期;%thread:线程名;%-5level:级别,从左显示5个字符宽度;%msg:日志消息;%n:换行符-->
<pattern>
host=%ip#@#timestamp=%d{yyyy-MM-dd HH:mm:ss.SSS}#@#randomName=${randomName}#@#thread=%thread#@#logLevel=%level#@#logger=%logger{50} - %msg%n
</pattern>
</encoder>
<topic>${topic}</topic>
<keyingStrategy class="com.github.danielwegener.logback.kafka.keying.NoKeyKeyingStrategy"/>
<deliveryStrategy class="com.github.danielwegener.logback.kafka.delivery.AsynchronousDeliveryStrategy"/>
<producerConfig>bootstrap.servers=${bootstrapServers}</producerConfig>
<producerConfig>acks=0</producerConfig>
<!-- wait up to 1000ms and collect log messages before sending them as a batch -->
<producerConfig>linger.ms=1000</producerConfig>
<producerConfig>max.block.ms=0</producerConfig>
<producerConfig>client.id=${clientId}</producerConfig>
</appender>
<logger name="com.xxx" level="info">
<appender-ref ref="kafka" />
</logger>
</springProfile>
5.logstash配置
input {
kafka {
id => "logstash"
topics => ["test-order-logger-channel"]
bootstrap_servers => ["localhost:9092"]
codec => plain
decorate_events => true
type => "logs"
consumer_threads => 2
}
}
filter {
kv {
source => "message"
field_split => "#@#"
}
mutate{
remove_field => ["@version"]
remove_field => ["@timestamp"]
remove_field => ["message"]
}
}
output {
if[type]=="logs"{
elasticsearch {
hosts => "localhost:9200"
index => "logs-%{+YYYY.MM.dd}"
document_type => "logs"
document_id => "%{[@metadata][kafka][topic]}-%{[@metadata][kafka][partition]}-%{[@metadata][kafka][offset]}"
}
}
}
这样数据结果就展示在这里了