安装
es
参考: https://www.cnblogs.com/balloon72/p/13177872.html
此文中关于汉化的配置是错误的,应当是:i18n.locale: "zh-CN"
添加 data 与 config 目录,并设置data目录为 777
docker run --name es -p 9200:9200 -p 9300:9300 \
-e "discovery.type=single-node" \
-e ES_JAVA_OPTS="-Xms64m -Xmx128m" \
--net docker-br0 --ip 172.172.0.3 \
-v /home/datasingle/docker/es/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml \
-v /home/datasingle/docker/es/data:/usr/share/elasticsearch/data \
-v /home/datasingle/docker/es/plugins:/usr/share/elasticsearch/plugins \
-d elasticsearch:7.6.2
kibana
docker run --name kibana -e “I18N_LOCALE=zh-CN” --net docker-br0 --ip 172.172.0.4 --link es:elasticsearch -p 5601:5601 -d kibana:7.6.2
logstash
添加 config
和 pipeline
目录
config/logstash.yml
http.host: "0.0.0.0"
path.config: /usr/share/logstash/pipeline
pipeline/logstash.conf
input {
tcp {
port => 10102
codec => json_lines
}
}
## Add your filters / logstash plugins configuration here
output {
elasticsearch {
hosts => "elasticsearch:9200"
index => "%{[appname]}-%{+YYYY.MM.dd}"
}
}
启动logstash
docker run -it -d -p 5044:5044 -p 5045:5045 -p 10102:10102 \
--name logstash \
--net docker-br0 --ip 172.172.0.5 \
--link es:elasticsearch \
-v /home/datasingle/docker/logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml:ro \
-v /home/datasingle/docker/logstash/pipeline:/usr/share/logstash/pipeline:ro \
-e LS_JAVA_OPTS="-Xmx256m -Xms256m" \
logstash:7.6.2
logback.xml
<?xml version="1.0" encoding="UTF-8"?>
<!-- 日志级别从低到高分为TRACE < DEBUG < INFO < WARN < ERROR < FATAL,如果设置为WARN,则低于WARN的信息都不会输出 -->
<!-- scan:当此属性设置为true时,配置文件如果发生改变,将会被重新加载,默认值为true -->
<!-- scanPeriod:设置监测配置文件是否有修改的时间间隔,如果没有给出时间单位,默认单位是毫秒。当scan为true时,此属性生效。默认的时间间隔为1分钟。 -->
<!-- debug:当此属性设置为true时,将打印出logback内部日志信息,实时查看logback运行状态。默认值为false。 -->
<configuration scan="true" scanPeriod="10 seconds">
<!--<include resource="org/springframework/boot/logging/logback/base.xml" />-->
<contextName>logback</contextName>
<!-- name的值是变量的名称,value的值时变量定义的值。通过定义的值会被插入到logger上下文中。定义变量后,可以使“${}”来使用变量。 -->
<!-- <property name="log.path" value="/home/laolang/gitosc/km-boot/km-boot/logs/km-bbs/dev"/>-->
<springProperty name="LOG_PATH" source="logging.path" defaultValue="../log/shop-boot"/>
<!-- <property name="LOG_FILE" value="shop-boot" /> -->
<!-- 彩色日志 -->
<!-- 彩色日志依赖的渲染类 -->
<conversionRule conversionWord="clr" converterClass="org.springframework.boot.logging.logback.ColorConverter"/>
<conversionRule conversionWord="wex" converterClass="org.springframework.boot.logging.logback.WhitespaceThrowableProxyConverter"/>
<conversionRule conversionWord="wEx"
converterClass="org.springframework.boot.logging.logback.ExtendedWhitespaceThrowableProxyConverter"/>
<!-- 彩色日志格式 -->
<property name="CONSOLE_LOG_PATTERN"
value="${CONSOLE_LOG_PATTERN:-%clr(%d{yyyy-MM-dd HH:mm:ss.SSS}){faint} %clr(${LOG_LEVEL_PATTERN:-%5p}) %clr(${PID:- }){magenta} %clr(---){faint} %clr([%15.15t]){faint} %clr(%-40.40logger{39}){cyan} %clr(:){faint} %m%n${LOG_EXCEPTION_CONVERSION_WORD:-%wEx}}"/>
<!-- 输出到控制台 -->
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
<!--此日志appender是为开发使用,只配置最底级别,控制台输出的日志级别是大于或等于此级别的日志信息-->
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<!-- <level>info</level>-->
<level>debug</level>
</filter>
<encoder>
<Pattern>${CONSOLE_LOG_PATTERN}</Pattern>
<!-- 设置字符集 -->
<charset>UTF-8</charset>
</encoder>
</appender>
<!-- 日志输出到文件 -->
<appender name="FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!-- 日志文件输出格式 -->
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
<charset>UTF-8</charset> <!-- 此处设置字符集 -->
</encoder>
<!-- 日志记录器的滚动策略,按日期,按大小记录 -->
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${LOG_PATH}/%d{yyyy-MM-dd}/info-%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>10MB</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
<!-- 日志文件保留天数 -->
<maxHistory>150</maxHistory>
</rollingPolicy>
</appender>
<!-- elk -->
<appender name="logstash" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
<!--模拟多个logstash-->
<destination>192.168.1.110:10102</destination>
<!--<destination>192.168.1.121:4560</destination>-->
<!--<destination>192.168.1.121:4560</destination>-->
<encoder charset="UTF-8" class="net.logstash.logback.encoder.LogstashEncoder">
<timeZone>UTC</timeZone>
<customFields>{"appname":"shop-boot"}</customFields>
</encoder>
<!--<connectionStrategy>-->
<!--<roundRobin>-->
<!--<connectionTTL>5 minutes</connectionTTL>-->
<!--</roundRobin>-->
<!--</connectionStrategy>-->
</appender>
<!--开发环境:打印控制台-->
<!--<springProfile name="dev">-->
<!--<logger name="com.laolang" level="debug"/>-->
<!--</springProfile>-->
<root level="debug">
<appender-ref ref="CONSOLE"/>
<appender-ref ref="FILE"/>
<appender-ref ref="logstash"/>
</root>
</configuration>
问题及解决
如果出现如下错误:
cluster currently has [999]/[1000] maximum shards open
是因为es7.x默认分片为1000,可用如下方法:
参考:https://blog.csdn.net/luoqinglong850102/article/details/106406699
- kibana devtools
PUT /_cluster/settings
{
"persistent": {
"cluster": {
"max_shards_per_node":10000
}
}
}
- 修改配置文件
# vim elasticsearch.yml
cluster.max_shards_per_node: 10000
- shell
curl -X PUT "192.168.1.107:9200/_cluster/settings" -H 'Content-Type: application/json' -d'
{
"persistent" : {
"cluster.max_shards_per_node" : "5000"
}
}
'
- persistent:永久生效,
- transient:临时生效
es API
_cat
GET /_cat/nodes: 查看所有节点
GET /_cat/health: 查看 es 健康状况
GET /_cat/master: 查看主节点
GET /_cat/indices: 查看所有索引