Docker 安装ELK集群

目录

前言

版本与端口信息

[root@localhost ~]# docker ps
CONTAINER ID        IMAGE                     COMMAND                  CREATED             STATUS              PORTS                                                                                         NAMES
fe7a22c213e2        logstash:5.6.14           "/usr/local/bin/do..."   3 seconds ago       Up 2 seconds                                                                                                      logstash
1151a8e3389e        docker.io/kibana:5.6.14   "/docker-entrypoin..."   2 days ago          Up 38 minutes       0.0.0.0:5601->5601/tcp                                                                        kb
ea4a4e1869b4        elasticsearch:5.6.3       "/docker-entrypoin..."   2 days ago          Up 38 minutes       9200/tcp, 0.0.0.0:9203->9203/tcp, 9300/tcp, 0.0.0.0:9303->9303/tcp                            es03
b6509178471a        elasticsearch:5.6.3       "/docker-entrypoin..."   2 days ago          Up 38 minutes       9200/tcp, 0.0.0.0:9202->9202/tcp, 9300/tcp, 0.0.0.0:9302->9302/tcp                            es02
4a49b0e5bc26        elasticsearch:5.6.3       "/docker-entrypoin..."   2 days ago          Up 38 minutes       9200/tcp, 0.0.0.0:9201->9201/tcp, 9300/tcp, 0.0.0.0:9301->9301/tcp                            es01
554fb17a2947        elasticsearch-head:5        "/bin/sh -c 'grunt..."   2 days ago          Up 38 minutes       0.0.0.0:9100->9100/tcp                                                                        es-head
对应IP地址
#宿主机IP信息
IP:192.168.8.100

# 容器IP信息
es01: 172.50.0.101
es02: 172.50.0.102
es03: 172.50.0.103
head:172.50.0.100
kb:  172.50.0.99
logstash:与宿主机相同

Elasticsearch

docker 拉取镜像

#elasticsearch:5.6.3
docker pull docker.io/elasticsearch:5.6.3

#head:5
docker pull mobz/elasticsearch-head:5
docker tag docker.io/mobz/elasticsearch-head:5 elasticsearch-head
docker rmi docker.io/mobz/elasticsearch-head:5

#kibana:5.6.14
docker pull docker.io/kibana:5.6.14

#logstash:5.6.14 (https://www.docker.elastic.co/#)
docker pull docker.elastic.co/logstash/logstash:5.6.14
docker tag docker.elastic.co/logstash/logstash:5.6.14 logstash
docker rmi docker.elastic.co/logstash/logstash:5.6.14

在home下新建三个文件夹

[root@localhost es]# ls -ll
total 0
drwxr-xr-x. 3 root root 43 Jan 12 22:50 es01
drwxr-xr-x. 3 root root 43 Jan 12 22:52 es02
drwxr-xr-x. 3 root root 43 Jan 12 22:53 es03
drwxr-xr-x. 2 root root 40 Jan 12 12:31 head

es01 : elasticsearch.yml (node-1)

cluster.name: elasticsearch-cluster
node.name: node-1
network.bind_host: 0.0.0.0
network.publish_host: 172.50.0.101
http.port: 9201
transport.tcp.port: 9301
http.cors.enabled: true
http.cors.allow-origin: "*"
node.master: true
node.data: true
discovery.zen.ping.unicast.hosts: ["172.50.0.101:9301","172.50.0.102:9302","172.50.0.103:9303"]
discovery.zen.minimum_master_nodes: 2

es02 : elasticsearch.yml (node-2)

cluster.name: elasticsearch-cluster
node.name: node-2
network.bind_host: 0.0.0.0
network.publish_host: 172.50.0.102
http.port: 9202
transport.tcp.port: 9302
http.cors.enabled: true
http.cors.allow-origin: "*"
node.master: true
node.data: true
discovery.zen.ping.unicast.hosts: ["172.50.0.101:9301","172.50.0.102:9302","172.50.0.103:9303"]
discovery.zen.minimum_master_nodes: 2

es03 : elasticsearch.yml (node-3)

cluster.name: elasticsearch-cluster
node.name: node-3
network.bind_host: 0.0.0.0
network.publish_host: 172.50.0.103
http.port: 9203
transport.tcp.port: 9303
http.cors.enabled: true
http.cors.allow-origin: "*"
node.master: true
node.data: true
discovery.zen.ping.unicast.hosts: ["172.50.0.101:9301","172.50.0.102:9302","172.50.0.103:9303"]
discovery.zen.minimum_master_nodes: 2

home/es 目录,存放配置文件 (解决权限问题)

[root@localhost es]# pwd
/home/es
[root@localhost es]# cd ..
[root@localhost home]# chcon -Rt svirt_sandbox_file_t es/
[root@localhost es01]# chmod 777 data
[root@localhost es02]# chmod 777 data
[root@localhost es03]# chmod 777 data

docker创建网络

docker network create --subnet=172.50.0.0/24 net50

创建es节点容器,修改JVM (默认2G)

# node-1
docker run -d -e ES_JAVA_OPTS="-Xms256m -Xmx256m" --name=es01 -p 9201:9201 -p 9301:9301 --net=net50 --ip 172.50.0.101 -v /home/es/es01/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml -v /home/es/es01/data:/usr/share/elasticsearch/data elasticsearch:5.6.3

# node-2
docker run -d -e ES_JAVA_OPTS="-Xms256m -Xmx256m" --name=es02 -p 9202:9202 -p 9302:9302 --net=net50 --ip 172.50.0.102 -v /home/es/es02/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml -v /home/es/es02/data:/usr/share/elasticsearch/data elasticsearch:5.6.3

# node-3
docker run -d -e ES_JAVA_OPTS="-Xms256m -Xmx256m" --name=es03 -p 9203:9203 -p 9303:9303 --net=net50 --ip 172.50.0.103 -v /home/es/es03/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml -v /home/es/es03/data:/usr/share/elasticsearch/data elasticsearch:5.6.3

防火墙放行

firewall-cmd --permanent --add-port=9201-9203/tcp
firewall-cmd --permanent --add-port=9301-9303/tcp
firewall-cmd --reload
firewall-cmd --permanent --list-port

查看es容器情况 (出现创建失败查看日志 docker logs ${name})

[root@localhost es01]# docker ps | grep es
ea4a4e1869b4        elasticsearch:5.6.3   "/docker-entrypoin..."   13 minutes ago      Up 6 minutes        9200/tcp, 0.0.0.0:9203->9203/tcp, 9300/tcp, 0.0.0.0:9303->9303/tcp   es03
b6509178471a        elasticsearch:5.6.3   "/docker-entrypoin..."   13 minutes ago      Up 6 minutes        9200/tcp, 0.0.0.0:9202->9202/tcp, 9300/tcp, 0.0.0.0:9302->9302/tcp   es02
4a49b0e5bc26        elasticsearch:5.6.3   "/docker-entrypoin..."   13 minutes ago      Up 6 minutes        9200/tcp, 0.0.0.0:9201->9201/tcp, 9300/tcp, 0.0.0.0:9301->9301/tcp   es01
554fb17a2947        elasticsearch-head    "/bin/sh -c 'grunt..."   10 hours ago        Up 6 minutes        0.0.0.0:9100->9100/tcp                                               es-head

创建es-head容器

docker run -d --name es-head -p 9100:9100 --net=net50 --ip 172.50.0.100 elasticsearch-head

进入es-head容器

docker exec -it es-head bash

# update system
apt-get update

# install vim
apt-get install vim -y

# port: 9100, 上面添加 hostname: '*',
cat -n Gruntfile.js | grep host
vim +93 Gruntfile.js 

# /usr/src/app/_site  查看是否 http://localhost:9200
cat app.js -n | grep this.base_uri
vim +4328 app.js 

es and head 重启

docker restart es01 es02 es03 es-head

宿主机 查看es版本

[root@localhost head]# curl http://192.168.8.100:9201
{
  "name" : "node-2",
  "cluster_name" : "boss-es-cluster",
  "cluster_uuid" : "84yzys3DSAiG5wInGUiXXQ",
  "version" : {
    "number" : "5.6.3",
    "build_hash" : "1a2f265",
    "build_date" : "2017-10-06T20:33:39.012Z",
    "build_snapshot" : false,
    "lucene_version" : "6.6.1"
  },
  "tagline" : "You Know, for Search"
}

宿主机 查看集群情况

[root@localhost ~]# curl http://192.168.8.100:9201/_cat/health?v 
epoch      timestamp cluster               status node.total node.data shards pri relo init unassign pending_tasks max_task_wait_time active_shards_percent
1547305499 15:04:59  elasticsearch-cluster green           3         3      0   0    0    0        0             0                  -                100.0%

宿主机 查看节点健康度

[root@localhost ~]# curl http://192.168.8.100:9201/_cat/nodes?v 
ip           heap.percent ram.percent cpu load_1m load_5m load_15m node.role master name
172.50.0.101           45          25   0    0.08    0.07     0.10 mdi       -      node-1
172.50.0.103           24          25   0    0.08    0.07     0.10 mdi       *      node-3
172.50.0.102           40          25   0    0.08    0.07     0.10 mdi       -      node-2

浏览打开 http://192.168.8.100:9100/

输入框: http://192.168.8.100:9201/  按:连接

Kibana

创建存放目录和 kibana.yml

[root@localhost kibana]# pwd
/home/es/kibana

[root@localhost kibana]# ls -ll
total 4
-rw-r--r--. 1 root root 112 Jan 13 00:59 kibana.yml

[root@localhost kibana]# cat kibana.yml 
server.port: 5601
server.host: "0.0.0.0"
elasticsearch.url: "http://192.168.8.100:9201"
kibana.index: ".kibana"
安装 kibana
docker run -d -p 5601:5601 --name kb --net=net50 --ip 172.50.0.99 -v /home/es/kibana/kibana.yml:/usr/share/kibana/config/kibana.yml docker.io/kibana:5.6.14
查看是否安装 kibana 成功
http://192.168.8.100:5601

Code

SpringBoot 新建一个测试方法

@Slf4j
@RunWith(SpringRunner.class)
@SpringBootTest
public class EdServiceTest {

    Random random = new Random();

    @Test
    public void testList() throws Exception {
        while (true) {
            Thread.sleep(3000);
            log.info("Hello world form eddie");
            if (random.nextBoolean()) {
                log.debug("eddie " + random.nextInt(1000));
                log.info("eddie " + random.nextInt(1000));
                log.warn("eddie " + random.nextInt(1000));
                log.error("eddie " + random.nextInt(1000));
            }
        }
    }

}

SpringBoot 配置 logback-dev.xml (新建 LOGSTASH )

<?xml version="1.0" encoding="UTF-8"?>
<configuration>
    <property name="log.home" value="/var/jenkins_home/logback/dev/logs"/>

    <conversionRule conversionWord="clr" converterClass="org.springframework.boot.logging.logback.ColorConverter"/>
    <conversionRule conversionWord="wex"
                    converterClass="org.springframework.boot.logging.logback.WhitespaceThrowableProxyConverter"/>
    <conversionRule conversionWord="wEx"
                    converterClass="org.springframework.boot.logging.logback.ExtendedWhitespaceThrowableProxyConverter"/>

    <property name="CONSOLE_LOG_PATTERN"
              value="${CONSOLE_LOG_PATTERN:-%clr(%d{yyyy-MM-dd HH:mm:ss.SSS}){faint} %clr(${LOG_LEVEL_PATTERN:-%5p}) %clr(${PID:- }){magenta} %clr(---){faint} %clr([%15.15t]){faint} %clr(%-40.40logger{39}){cyan} %clr(:){faint} %m%n${LOG_EXCEPTION_CONVERSION_WORD:-%wEx}}"/>

    <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
        <encoder>
            <pattern>${CONSOLE_LOG_PATTERN}</pattern>
            <charset>utf8</charset>
        </encoder>
    </appender>

    <appender name="LOGSTASH" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
        <destination>192.168.8.100:4567</destination>
        <!-- encoder必须配置,有多种可选 -->
        <encoder charset="UTF-8" class="net.logstash.logback.encoder.LogstashEncoder" >
            <!-- "appname":"yang_test" 的作用是指定创建索引的名字时用,并且在生成的文档中会多了这个字段  -->
            <customFields>{"eddie":"spring_dev"}</customFields>
        </encoder>
    </appender>

    <appender name="ERROR-OUT" class="ch.qos.logback.core.rolling.RollingFileAppender">
        <encoder>
            <pattern>%d{yyyy-MM-dd HH:mm:ss} [%class:%line] - %m%n</pattern>
        </encoder>
        <filter class="ch.qos.logback.classic.filter.LevelFilter">
            <level>ERROR</level>
            <onMatch>ACCEPT</onMatch>
            <onMismatch>DENY</onMismatch>
        </filter>
        <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
            <fileNamePattern>${log.home}/error/%d{yyyy-MM-dd}-error.log</fileNamePattern>
            <maxHistory>30</maxHistory>
        </rollingPolicy>

        <triggeringPolicy class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
            <MaxFileSize>1024MB</MaxFileSize>
        </triggeringPolicy>
    </appender>

    <appender name="INFO-OUT" class="ch.qos.logback.core.rolling.RollingFileAppender">
        <encoder>
            <pattern>%d{yyyy-MM-dd HH:mm:ss} [%class:%line] - %m%n</pattern>
        </encoder>
        <filter class="ch.qos.logback.classic.filter.LevelFilter">
            <level>INFO</level>
            <onMatch>ACCEPT</onMatch>
            <onMismatch>DENY</onMismatch>
        </filter>
        <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
            <fileNamePattern>${log.home}/info/%d{yyyy-MM-dd}-info.log</fileNamePattern>
            <maxHistory>30</maxHistory>
        </rollingPolicy>

        <triggeringPolicy class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
            <MaxFileSize>1024MB</MaxFileSize>
        </triggeringPolicy>
    </appender>

    <logger name="com.test.dao.mapper" level="DEBUG"/>

    <root level="info">
        <appender-ref ref="LOGSTASH"/>
        <appender-ref ref="STDOUT"/>
        <appender-ref ref="INFO-OUT"/>
        <appender-ref ref="ERROR-OUT"/>
    </root>
</configuration>

SpringBoot 配置 application-dev.yml

logging:
  config: classpath:logback-dev.xml

Logstash

安装 logstash (端口跟宿主机一样)

docker run -d --name logstash --net=host logstash:5.6.14

进入宿主机

docker exec -it --user root logstash bash

基础操作目录

[root@localhost logstash]# ls -ll config/
total 16
-rw-r--r--. 1 logstash logstash 1879 Jan 15 05:33 jvm.options
-rw-rw-r--. 1 logstash logstash  551 Dec  5 14:34 log4j2.properties
-rw-rw-r--. 1 logstash logstash  264 Jan 15 04:03 logstash.yml
-rw-r--r--. 1 logstash logstash 1702 Dec  5 22:26 startup.options

[root@localhost logstash]# ls -ll pipeline/
total 8
-rw-rw-r--. 1 logstash logstash  92 Dec  5 14:34 logstash.conf
-rw-r--r--. 1 root     root     560 Jan 15 07:22 logstash-simple.conf

[root@localhost logstash]# ls -ll bin/
total 48
-rwxr-xr-x. 1 logstash logstash  377 Dec  5 22:26 cpdump
-rwxr-xr-x. 1 logstash logstash 1010 Dec  5 22:26 dependencies-report
-rwxr-xr-x. 1 logstash logstash  155 Dec  5 22:26 ingest-convert.sh
-rwxr-xr-x. 1 logstash logstash 2511 Dec  5 22:26 logstash
-rw-r--r--. 1 logstash logstash 1449 Dec  5 22:26 logstash.bat
-rwxr-xr-x. 1 logstash logstash 5400 Dec  5 22:26 logstash.lib.sh
-rwxr-xr-x. 1 logstash logstash  448 Dec  5 22:26 logstash-plugin
-rw-r--r--. 1 logstash logstash  260 Dec  5 22:26 logstash-plugin.bat
-rwxr-xr-x. 1 logstash logstash  840 Dec  5 22:26 ruby
-rw-r--r--. 1 logstash logstash 1438 Dec  5 22:26 setup.bat
-rwxr-xr-x. 1 logstash logstash 3530 Dec  5 22:26 system-install

修改内存 (默认1g)

[root@localhost logstash]# cat -n config/jvm.options | grep 512m
     6  -Xms512m
     7  -Xmx512m

关闭 xpack = false

[root@localhost logstash]# cat -n config/logstash.yml
     1  http.host: "0.0.0.0"
     2  path.config: /usr/share/logstash/pipeline
     3  xpack.monitoring.enabled: false
     4  xpack.monitoring.elasticsearch.url: http://elasticsearch:9200
     5  xpack.monitoring.elasticsearch.username: logstash_system
     6  xpack.monitoring.elasticsearch.password: changeme

新建 logstash-simple.conf

[root@localhost logstash]# cat pipeline/logstash-simple.conf
input {

    tcp {
        port => 4567
        codec => json_lines
    }

}

output {
    elasticsearch {
        hosts => ["192.168.8.100:9201"]
        index => "logstash-%{eddie}-%{+YYYY.MM.dd}"
        document_type => "%{eddie}"
        flush_size => 20000
        idle_flush_time => 10
        sniffing => true
        template_overwrite => true
        action => "index"
        }
        stdout { codec => rubydebug}
}

防火墙放行(宿主机)

[root@localhost logstash]# firewall-cmd --permanent --add-port=5044/tcp     
[root@localhost logstash]# firewall-cmd --permanent --add-port=9600/tcp
[root@localhost logstash]# firewall-cmd --permanent --add-port=9601/tcp
[root@localhost logstash]# firewall-cmd --reload
[root@localhost logstash]# firewall-cmd --permanent --list-port

浏览器查看 logstash 是否正常

# 地址
http://192.168.8.100:9600/

# 输出
{"host":"localhost.localdomain","version":"5.6.14","http_address":"0.0.0.0:9600","id":"798e4ae9-b513-450b-9194-c277c0c6dd59","name":"localhost.localdomain","build_date":"2018-12-05T22:29:42+00:00","build_sha":"0af8a5b23d27a0d0abcf92ec564d61ae715e700c","build_snapshot":false}

容器运行

# 运行该指令
nohup bin/logstash -f /usr/share/logstash/pipeline/logstash-simple.conf --path.data /var/log/1 &

# 正常日志如下:
[2019-01-15T07:46:13,586][INFO ][logstash.outputs.elasticsearch] Installing elasticsearch template to _template/logstash
[2019-01-15T07:46:13,606][INFO ][logstash.outputs.elasticsearch] New Elasticsearch output {:class=>"LogStash::Outputs::ElasticSearch", :hosts=>["//192.168.8.100:9201"]}
[2019-01-15T07:46:13,607][INFO ][logstash.pipeline        ] Starting pipeline {"id"=>"main", "pipeline.workers"=>6, "pipeline.batch.size"=>125, "pipeline.batch.delay"=>5, "pipeline.max_inflight"=>750}
[2019-01-15T07:46:13,706][INFO ][logstash.pipeline        ] Pipeline main started
[2019-01-15T07:46:13,727][INFO ][logstash.agent           ] Successfully started Logstash API endpoint {:port=>9601}
[2019-01-15T07:46:18,598][INFO ][logstash.outputs.elasticsearch] Elasticsearch pool URLs updated {:changes=>{:removed=>[http://192.168.8.100:9201/], :added=>[http://172.50.0.101:9201/, http://172.50.0.103:9203/, http://172.50.0.102:9202/]}}
[2019-01-15T07:46:18,599][INFO ][logstash.outputs.elasticsearch] Running health check to see if an Elasticsearch connection is working {:healthcheck_url=>http://172.50.0.101:9201/, :path=>"/"}
[2019-01-15T07:46:18,608][WARN ][logstash.outputs.elasticsearch] Restored connection to ES instance {:url=>"http://172.50.0.101:9201/"}
[2019-01-15T07:46:18,613][INFO ][logstash.outputs.elasticsearch] Running health check to see if an Elasticsearch connection is working {:healthcheck_url=>http://172.50.0.103:9203/, :path=>"/"}
[2019-01-15T07:46:18,628][WARN ][logstash.outputs.elasticsearch] Restored connection to ES instance {:url=>"http://172.50.0.103:9203/"}
[2019-01-15T07:46:18,634][INFO ][logstash.outputs.elasticsearch] Running health check to see if an Elasticsearch connection is working {:healthcheck_url=>http://172.50.0.102:9202/, :path=>"/"}
[2019-01-15T07:46:18,640][WARN ][logstash.outputs.elasticsearch] Restored connection to ES instance {:url=>"http://172.50.0.102:9202/"}


额外设置

Master节点 (主节点)
node.master: true 
node.data: false

#这样配置的节点为master节点。主节点的主要职责是和集群操作相关的内容,如创建或删除索引,跟踪哪些节点是群集的一部分,并决定哪些分片分配给相关的节点。稳定的主节点对集群的健康是非常重要的。

Data节点(数据节点)
node.master: false 
node.data: true

#数据节点主要是存储索引数据的节点,主要对文档进行增删改查操作,聚合操作等。数据节点对cpu,内存,io要求较高,在优化的时候需要监控数据节点的状态,当资源不够的时候,需要在集群中添加新的节点。

Client节点 (客户端节点)
node.master: false 
node.data: false

#当主节点和数据节点配置都设置为false的时候,该节点只能处理路由请求,处理搜索,分发索引操作等,从本质上来说该客户节点表现为智能负载平衡器。独立的客户端节点在一个比较大的集群中是非常有用的,他协调主节点和数据节点,客户端节点加入集群可以得到集群的状态,根据集群的状态可以直接路由请求。
#警告:添加太多的客户端节点对集群是一种负担,因为主节点必须等待每一个节点集群状态的更新确认!客户节点的作用不应被夸大,数据节点也可以起到类似的作用。

#主要要保持两个主节点: 如果一个主,一个数据,一个客户 是启动不了集群的,下面例子 两个主,一个客户:

Node1 例子:
[root@localhost es01]# cat ../es01/elasticsearch.yml  
cluster.name: elasticsearch-cluster
node.name: node-1
network.bind_host: 0.0.0.0
network.publish_host: 172.50.0.101
http.port: 9201
transport.tcp.port: 9301
http.cors.enabled: true
http.cors.allow-origin: "*"
node.master: true 
node.data: true  
discovery.zen.ping.unicast.hosts: ["172.50.0.101:9301","172.50.0.102:9302","172.50.0.103:9303"]
discovery.zen.minimum_master_nodes: 2

Node2 例子:
[root@localhost es01]# cat ../es02/elasticsearch.yml     
cluster.name: elasticsearch-cluster
node.name: node-2
network.bind_host: 0.0.0.0
network.publish_host: 172.50.0.102
http.port: 9202
transport.tcp.port: 9302
http.cors.enabled: true
http.cors.allow-origin: "*"
node.master: true
node.data: true
discovery.zen.ping.unicast.hosts: ["172.50.0.101:9301","172.50.0.102:9302","172.50.0.103:9303"]
discovery.zen.minimum_master_nodes: 2

Node3 例子 (客户端节点)[root@localhost es01]# cat ../es03/elasticsearch.yml  
cluster.name: elasticsearch-cluster
node.name: node-3
network.bind_host: 0.0.0.0
network.publish_host: 172.50.0.103
http.port: 9203
transport.tcp.port: 9303
http.cors.enabled: true
http.cors.allow-origin: "*"
node.master: false
node.data: false
discovery.zen.ping.unicast.hosts: ["172.50.0.101:9301","172.50.0.102:9302","172.50.0.103:9303"]
discovery.zen.minimum_master_nodes: 2
You have new mail in /var/spool/mail/root

logstash 配置或者日志:
input {
    file {
        path => "/var/log/yum.log" # nginx 访问日志的路径
        start_position => "beginning" # 从文件起始位置读取日志,如果不设置则在文件有写入时才读取,类似于tail -f
        }
}

output {
    elasticsearch {
        hosts => ["192.168.8.100:9201"]
        index => "eddie-%{+YYYY.MM.dd}"
         }
        stdout { codec => rubydebug}
}

kibana容器 x-pack-5.6.14 下载和安装

#进入 kibana 容器
[root@localhost ~]# docker exec -it kb bash  
root@1151a8e3389e:/# ls
bin  boot  dev  docker-entrypoint.sh  etc  home  lib  lib64  media  mnt  opt  proc  root  run  sbin  srv  sys  tmp  usr  var

#查询 kibana-plugin 文件
root@1151a8e3389e:/# find / -name kibana-plugin
find: `/proc/acpi': Permission denied
find: `/proc/scsi': Permission denied
find: `/sys/firmware': Permission denied
/usr/share/kibana/bin/kibana-plugin

#进入目录
root@1151a8e3389e:/# cd /usr/share/kibana/                 
root@1151a8e3389e:/usr/share/kibana# ls
LICENSE.txt  NOTICE.txt  README.txt  bin  config  node  node_modules  optimize  package.json  plugins  src  ui_framework  webpackShims

#安装 x-pack
root@1151a8e3389e:/usr/share/kibana# bin/kibana-plugin install x-pack
Attempting to transfer from x-pack
Attempting to transfer from https://artifacts.elastic.co/downloads/kibana-plugins/x-pack/x-pack-5.6.14.zip
Transferring 119232732 bytes.............
Transfer complete
Retrieving metadata from plugin archive
Error: end of central directory record signature not found
    at /usr/share/kibana/node_modules/yauzl/index.js:179:14
    at /usr/share/kibana/node_modules/yauzl/index.js:539:5
    at /usr/share/kibana/node_modules/fd-slicer/index.js:32:7
    at FSReqWrap.wrapper [as oncomplete] (fs.js:683:17)
Plugin installation was unsuccessful due to error "Error retrieving metadata from plugin archive"

#会出现下载很慢,失败在继续安装
root@1151a8e3389e:/usr/share/kibana# bin/kibana-plugin install x-pack
Attempting to transfer from x-pack
Attempting to transfer from https://artifacts.elastic.co/downloads/kibana-plugins/x-pack/x-pack-5.6.14.zip
Transferring 119232732 bytes....................
Transfer complete
Retrieving metadata from plugin archive
Extracting plugin archive
Extraction complete
Optimizing and caching browser bundles...
Plugin installation complete

#返回宿主机 添加 {xpack.security.enabled: "false"} 网上说
#但本人测试是报错的 { Error: EACCES: permission denied, open '/usr/share/kibana/optimize/bundles/graph.entry.js

[root@localhost ~]# cat /home/es/kibana/kibana.yml  
server.port: 5601
server.host: "0.0.0.0"
elasticsearch.url: "http://192.168.8.100:9201"
kibana.index: ".kibana"
#xpack.security.enabled: "false"

重启 kibana 容器
[root@localhost ~]# docker restart kb

登录页面
默认分别是 elastic 和 changeme
http://192.168.8.100:5601  --> Monitoring模块查询节点曲线图
es容器 x-pack-5.6.14 下载和安装 (如果是集群,es01,es02都需要以下步骤)
#宿主机追加配置
[root@localhost ~]# cat /home/es/es01/elasticsearch.yml 
cluster.name: elasticsearch-cluster
node.name: node-1
network.bind_host: 0.0.0.0
network.publish_host: 172.50.0.101
http.port: 9201
transport.tcp.port: 9301
http.cors.enabled: true
http.cors.allow-origin: "*"
node.master: true 
node.data: true  
discovery.zen.ping.unicast.hosts: ["172.50.0.101:9301","172.50.0.102:9302","172.50.0.103:9303"]
discovery.zen.minimum_master_nodes: 2
xpack.security.enabled: "false"  # this
http.cors.allow-headers: "Authorization" # this

#进入容器
[root@localhost ~]# docker exec -it es03 bash
#运行安装
root@ea4a4e1869b4:/usr/share/elasticsearch# bin/elasticsearch-plugin install x-pack
-> Downloading x-pack from elastic
[=================================================] 100%   
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@     WARNING: plugin requires additional permissions     @
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
* java.io.FilePermission \\.\pipe\* read,write
* java.lang.RuntimePermission accessClassInPackage.com.sun.activation.registries
* java.lang.RuntimePermission getClassLoader
* java.lang.RuntimePermission setContextClassLoader
* java.lang.RuntimePermission setFactory
* java.security.SecurityPermission createPolicy.JavaPolicy
* java.security.SecurityPermission getPolicy
* java.security.SecurityPermission putProviderProperty.BC
* java.security.SecurityPermission setPolicy
* java.util.PropertyPermission * read,write
* java.util.PropertyPermission sun.nio.ch.bugLevel write
* javax.net.ssl.SSLPermission setHostnameVerifier
See http://docs.oracle.com/javase/8/docs/technotes/guides/security/permissions.html
for descriptions of what these permissions allow and the associated risks.

Continue with installation? [y/N]y
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@        WARNING: plugin forks a native controller        @
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
This plugin launches a native controller that is not subject to the Java
security manager nor to system call filters.

Continue with installation? [y/N]y
-> Installed x-pack

重启 kibana 容器
[root@localhost ~]# docker restart es03
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
以下是使用Docker Compose建立ELK集群的步骤: 1.创建一个新的目录并在其中创建一个docker-compose.yml文件。 2.在docker-compose.yml文件中定义三个服务:Elasticsearch、Logstash和Kibana。 3.为每个服务定义容器映像,端口和其他配置选项。 4.使用Docker Compose命令启动ELK集群。 5.在Kibana中配置索引模式和可视化仪表板以查看和分析日志数据。 下面是一个示例docker-compose.yml文件,用于启动一个ELK集群: ```yaml version: '3' services: elasticsearch: image: docker.elastic.co/elasticsearch/elasticsearch:7.14.0 container_name: elasticsearch environment: - node.name=elasticsearch - discovery.seed_hosts=elasticsearch - cluster.initial_master_nodes=elasticsearch - bootstrap.memory_lock=true - "ES_JAVA_OPTS=-Xms512m -Xmx512m" ulimits: memlock: soft: -1 hard: -1 volumes: - esdata1:/usr/share/elasticsearch/data ports: - 9200:9200 networks: - elk logstash: image: docker.elastic.co/logstash/logstash:7.14.0 container_name: logstash volumes: - ./logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml - ./logstash/pipeline:/usr/share/logstash/pipeline ports: - 5044:5044 networks: - elk kibana: image: docker.elastic.co/kibana/kibana:7.14.0 container_name: kibana environment: ELASTICSEARCH_URL: http://elasticsearch:9200 ports: - 5601:5601 networks: - elk volumes: esdata1: driver: local networks: elk: ``` 在上面的示例中,我们定义了三个服务:Elasticsearch、Logstash和Kibana。每个服务都有自己的容器映像,端口和其他配置选项。我们还定义了一个名为“elk”的网络,以便服务可以相互通信。 要启动ELK集群,请在包含docker-compose.yml文件的目录中运行以下命令: ``` docker-compose up ``` 这将启动Elasticsearch、Logstash和Kibana容器,并将它们连接到“elk”网络。一旦容器启动,您可以在浏览器中访问Kibana Web界面,该界面默认在端口5601上运行。 在Kibana中,您可以配置索引模式和可视化仪表板以查看和分析日志数据。要将日志数据发送到Logstash,请将日志发送到Logstash监听的端口5044。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

eddie_k2

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值