部署elk
elasticsearch docker-compose.yml 部署
version: '2.2'
services:
es01:
image: docker.elastic.co/elasticsearch/elasticsearch:7.3.1
container_name: es01
environment:
- TZ=Asia/Shanghai
- node.name=es01
- discovery.seed_hosts=es02
- cluster.initial_master_nodes=es01,es02
- cluster.name=docker-cluster
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- ./esdata01/data:/usr/share/elasticsearch/data
- ./config:/usr/share/elasticsearch/config
ports:
- 9200:9200
networks:
- esnet
es02:
image: docker.elastic.co/elasticsearch/elasticsearch:7.3.1
container_name: es02
environment:
- TZ=Asia/Shanghai
- node.name=es02
- discovery.seed_hosts=es01
- cluster.initial_master_nodes=es01,es02
- cluster.name=docker-cluster
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- ./esdata02/data:/usr/share/elasticsearch/data
networks:
- esnet
volumes:
esdata01:
driver: local
esdata02:
driver: local
networks:
esnet:
# elasticsearch.yml
cluster.name: "docker-cluster"
network.host: 0.0.0.0
http.cors.enabled: true
http.cors.allow-origin: "*"
# kibana docker-compose.yml
version: '2'
services:
kibana:
image: docker.elastic.co/kibana/kibana:7.3.1
volumes:
- ./kibana.yml:/usr/share/kibana/config/kibana.yml
environment:
TZ: Asia/Shanghai
SERVER_NAME: kibana.proper
ELASTICSEARCH_HOSTS: http://172.168.1.4:9200
ports:
- 5601:5601
# kibana.yml
server.port: 5601
server.host: "0.0.0.0"
i18n.locale: "zh-CN"
# logstash docker-compose.yml
version: '2'
services:
logstash:
image: logstash:7.3.1
volumes:
- ./config/:/usr/share/logstash/config/
- ./pipeline/:/usr/share/logstash/pipeline/
environment:
TZ: Asia/Shanghai
SERVER_NAME: logstash.proper
ports:
- 5044:5044
# filebeat agent部署
curl -L -O https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-7.3.1-amd64.deb
dpkg -i filebeat-7.3.1-amd64.deb
# /etc/filebeat/filebeat.yml
#=========================== Filebeat inputs =============================
filebeat.inputs:
- type: log
enabled: true
paths:
- /opt/docker/nginx_proxy/log/*.log
#- c:\programdata\elasticsearch\logs\*
#============================== Kibana =====================================
setup.kibana:
host: "172.168.1.4:5601"
#-------------------------- Elasticsearch output ------------------------------
output.elasticsearch:
hosts: ["172.168.1.4:9200"]
#protocol: "https"
#username: "elastic"
#password: "changeme"
# /etc/filebeat/modules.d/nginx.yml
- module: nginx
# Access logs
access:
enabled: true
var.paths: ["/opt/docker/nginx_proxy/log/req_stat.log"]
# Error logs
error:
enabled: true
var.paths: ["/opt/docker/nginx_proxy/log/error.log"]
#启动
filebeat modules enable nginx
filebeat setup
service filebeat start
一个镜像集成版本
#设置虚拟内存
vim /etc/sysctl.conf
vm.max_map_count=262144
sysctl -p /etc/sysctl.conf
#docker-compose
elk:
image: sebp/elk
ports:
- "5601:5601"
- "9200:9200"
- "5044:5044"
$ sudo docker run -p 5601:5601 -p 9200:9200 -p 5044:5044 -it --name elk sebp/elk
Elasticsearch 5.0之后的安装elasticsearch-head插件
下载 elasticsearch-head
elasticsearch-head,可以直接下压缩包,也可以通过 git clone。
输入命令,等待下载完成:
git clone git://github.com/mobz/elasticsearch-head.git
安装 grunt-cli
npm install -g grunt-cli
安装 grunt
elasticsearch-head 下载完成后,进入 elasticsearch-head 文件夹,执行命令:
npm install grunt --save
安装依赖的 npm 包
npm install
修改启动文件
所有依赖包安装成功后,修改 elasticsearch-head 目录下的 Gruntfile.js 文件,在 options 属性内增加 hostname,设置为 0.0.0.0。
connect: {
server: {
options: {
hostname: '0.0.0.0',
port: 9100,
base: '.',
keepalive: true
}
}
}
修改 Elasticsearch 配置文件 config/elasticsearch.yml
在配置文件最后增加两个配置项,这样 elasticsearch-head 插件才可以访问 Elasticsearch 。
http.cors.enabled: truehttp.cors.allow-origin: "*"
启动 elasticsearch-head
在 elasticsearch-head 目录下,执行命令:
grunt server
输出如下内容表示启动成功:
Running "connect:server" (connect) task
Waiting forever...
Started connect web server on http://localhost:9100
访问 http://localhost:9100 地址,就可以看到当前 Elasticsearch 集群信息。