docker-compose.yml
version: "3"
services:
elasticsearch: #容器name
image: docker.elastic.co/elasticsearch/elasticsearch:6.8.6 #指定elasticsearch使用的版本镜像
environment: #elasticsearch容器启动的系统环境
- node.name=es01
- cluster.name=es-docker-cluster
- discovery.type=single-node #单节点运行
- http.cors.enabled=true
- http.cors.allow-origin=*
- "ES_JAVA_OPTS=-Xms16g -Xmx16g" #JAVA的内存大小
ulimits: #集群运行,该配置无实际意义,可忽略
memlock:
soft: -1
hard: -1
volumes: #挂载相应本地存储目录存储数据,挂载配置文件
- ./esdata/data:/usr/share/elasticsearch/data #将本地文件存储和容器目录映射
- ./esdata/logs:/user/share/elasticsearch/logs
- ./esdata/plugins:/usr/share/elasticsearch/plugins
- ./elk-config/elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
ports: #容器与外部服务器的端口映射
- 9200:9200
- 9300:9300
networks: #指定固定IP地址,但是运行后没有生效,没找到原因,暂时放弃
esnet:
ipv4_address: 10.0.18.3
deploy:
replicas: 1
placement:
constraints:
- node.hostname==docker-worker02 #指定在docker-swarm的那个主机节点运行
update_config:
parallelism: 1
delay: 10s
restart_policy:
condition: on-failure
logstash:
image: docker.elastic.co/logstash/logstash:6.8.6
ports:
- '9044:9044'
volumes:
- ./elk-config/logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml
- ./elk-config/logstash/config/logstash.conf:/usr/share/logstash/pipeline/logstash.conf
- ./logdata:/usr/share/logstash/data
networks:
- esnet
deploy:
replicas: 1
placement:
constraints:
- node.hostname==docker-worker02
kibana:
image: docker.elastic.co/kibana/kibana:6.8.6
volumes:
- ./elk-config/kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml
ports:
- '5601:5601'
networks:
- esnet
depends_on: #依赖服务
- elasticsearch
deploy:
replicas: 1
placement:
constraints:
- node.hostname==docker-worker02
headPlugin:
image: 'mobz/elasticsearch-head:5'
container_name: head
ports:
- '9100:9100'
networks:
- esnet
deploy:
replicas: 1
placement:
constraints:
- node.hostname==docker-worker02
networks:
esnet:
ipam:
config:
- subnet: 10.0.18.0/24
elasticsearch.yml
Elasticsearch.yml
---
## Default Elasticsearch configuration from Elasticsearch base image.
## https://github.com/elastic/elasticsearch/blob/master/distribution/docker/src/docker/config/elasticsearch.yml
#
cluster.name: "docker-cluster"
network.host: 0.0.0.0
## X-Pack settings
## see https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-xpack.html
#
xpack.license.self_generated.type: basic #更改试用30许可证类型为基础版,必须更改
xpack.security.enabled: false #关闭收费组件
xpack.monitoring.collection.enabled: true
kibana.yml
Kibana.yml
---
## Default Kibana configuration from Kibana base image.
## https://github.com/elastic/kibana/blob/master/src/dev/build/tasks/os_packages/docker_generator/templates/kibana_yml.template.js
#
server.name: kibana
server.host: 0.0.0.0
#elasticsearch.ssl.verify: false
elasticsearch.hosts: [ "http://10.0.18.10:9200" ]
#elasticsearch_url: "http://172.16.10.12:9200"
#monitoring.ui.container.elasticsearch.enabled: true
i18n.locale: zh-CN
## X-Pack security credentials
logstash.conf 因为日志格式为:XXX|XXX|XXX|XXX|XXX|XXX|XXX|XXX|XXX
input {
file {
path => "/home/logs/acceelog/task*"
start_position => "beginning"
}
}
filter {
mutate {
split => ["message","|"]
}
mutate {
add_field => {
"XXIP" => "%{[message][0]}"
"XXXX" => "%{[message][1]}"
"XXXX" => "%{[message][2]}"
"XXXX" => "%{[message][3]}"
"XXXX" => "%{[message][4]}"
"XXXX" => "%{[message][5]}"
"XXXX" => "%{[message][6]}"
"XXXX" => "%{[message][7]}"
"XXXX" => "%{[message][8]}"
}
}
date {
match => ["时间", "yyyyMMddHHmmss" ]
timezone => "Asia/Shanghai"
}
mutate {
split => ["cname",";"]
}
mutate {
add_field => {
"cname1" => "%{[cname][0]}"
"cname2" => "%{[cname][1]}"
"cname3" => "%{[cname][2]}"
}
}
}
output {
elasticsearch {
hosts => ["172.16.10.12:9200"]
index => "dnslog-%{+YYYY.MM.dd}"
}