在搭建ELK之前,我们需要做一些准备工作。
正如官方所说的那样 https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html,Elasticsearch默认使用mmapfs目录来存储索引。操作系统默认的mmap计数太低可能导致内存不足,我们可以使用下面这条命令来增加内存
为了防止ElasticSearch启动报错,max file descriptors [65535] for elasticsearch process is too low, increase to at least [65536]
vi /etc/sysctl.conf
添加配置
vm.max_map_count=655360
执行命令
sysctl -p
目录结构
创建目录
mkdir -p elk/elasticsearch/data/ elk/logstash/conf.d/
要授权给该目录,否则elasticsearch无法启动
chmod 777 elk/elasticsearch/data
创建logstash.conf配置文件
vi elk/logstash/conf.d/logstash.conf
内容如下所示
input {
tcp {
port => 4567
host => "0.0.0.0"
mode => "server"
type => "test-logstash"
codec => json_lines
}
}
filter {
}
output {
elasticsearch {
hosts => ["elasticsearch:9200"]
index => "springboot-logstash-%{+YYYY.MM.dd}"
user => "elastic"
password => "changeme"
}
}
ELK的docker镜像安装
docker-compose.yml
version: '3'
services:
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:5.5.1
environment:
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- "TZ=Asia/Shanghai"
volumes:
- $PWD/elasticsearch/data:/usr/share/elasticsearch/data
- /etc/timezone:/etc/timezone
- /etc/localtime:/etc/localtime
container_name: elasticsearch551
hostname: elasticsearch
restart: on-failure
ports:
- "9200:9200"
- "9300:9300"
kibana:
image: docker.elastic.co/kibana/kibana:5.5.1
environment:
- ELASTICSEARCH_URL=http://elasticsearch:9200
- "TZ=Asia/Shanghai"
volumes:
- /etc/timezone:/etc/timezone
- /etc/localtime:/etc/localtime
container_name: kibana551
hostname: kibana
depends_on:
- elasticsearch
restart: on-failure
ports:
- "5601:5601"
logstash:
image: docker.elastic.co/logstash/logstash:5.5.1
command: logstash -f /etc/logstash/conf.d/logstash.conf
environment:
- "TZ=Asia/Shanghai"
volumes:
- $PWD/logstash/conf.d:/etc/logstash/conf.d
- $PWD/log:/tmp
- /etc/timezone:/etc/timezone
- /etc/localtime:/etc/localtime
container_name: logstash551
hostname: logstash
restart: on-failure
depends_on:
- elasticsearch
ports:
- "7001-7005:7001-7005"
- "4567:4567"
Springboot 的logback.xml配置
1.maven安装对应的包
<!--集成logstash-->
<dependency>
<groupId>net.logstash.logback</groupId>
<artifactId>logstash-logback-encoder</artifactId>
<version>5.3</version>
</dependency>
2.配置logback.xml
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE configuration>
<configuration>
<include resource="org/springframework/boot/logging/logback/defaults.xml"/>
<include resource="org/springframework/boot/logging/logback/base.xml"/>
<springProperty scope="context" name="appName" source="spring.application.name"/>
<!-- 日志在工程中的输出位置 -->
<property name="LOG_FILE" value="${BUILD_FOLDER:-build}/${appName}"/>
<!-- 控制台的日志输出样式 -->
<property name="CONSOLE_LOG_PATTERN" value="%clr(%d{yyyy-MM-dd HH:mm:ss.SSS}){faint} %clr(${LOG_LEVEL_PATTERN:-%5p}) %clr(${PID:- }){magenta} %clr(---){faint} %clr([%15.15t]){faint} %m%n${LOG_EXCEPTION_CONVERSION_WORD:-%wEx}}"/>
<!-- 控制台输出 -->
<appender name="console" class="ch.qos.logback.core.ConsoleAppender">
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
<!-- 日志输出编码 -->
<encoder>
<pattern>${CONSOLE_LOG_PATTERN}</pattern>
<charset>utf8</charset>
</encoder>
</appender>
<!--logstash配置-->
<appender name="LOGSTASH" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
<destination>192.168.248.129:4567</destination>
<!-- 日志输出编码 -->
<encoder charset="UTF-8" class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder">
<providers>
<timestamp>
<timeZone>UTC</timeZone>
</timestamp>
<pattern>
<pattern>
{
"logLevel": "%level",
"app": "${appName}",
"pid": "${PID:-}",
"thread": "%thread",
"class": "%logger{40}",
"rest": "%message"
}
</pattern>
</pattern>
</providers>
</encoder>
<!--<encoder charset="UTF-8" class="net.logstash.logback.encoder.LogstashEncoder"/>-->
</appender>
<root level="INFO">
<appender-ref ref="LOGSTASH"/>
<appender-ref ref="CONSOLE"/>
</root>
</configuration>
kibana访问地址:http://192.168.248.129:5601/
账号密码 elastic changeme