docker部署ELK

我的运维笔记 同时被 2 个专栏收录
96 篇文章 0 订阅
21 篇文章 0 订阅

架构

Filebeat->kafka->logstash->ES

日志采集-filebeat

创建数据目录

mkdir -pv /data/filebeat/{config,data,logs}

配置文件

发送到kafka

cat > /data/filebeat/config/filebeat.yml << 'EOF'
###################### Filebeat Configuration Example #########################
filebeat.name: web01
filebeat.idle_timeout: 5s
filebeat.spool_zie: 2048

#----------------------------------input form nginx access_log--------------------------------#
filebeat.inputs:
- type: log
  enabled: true
  paths:
   - /data/nginx/logs/access.log.*
  fields:
    kafka_topic: topic-nginx-access
    server_ip: 192.168.2.180
  fields_under_root: true

  # filebeat 多行日志的处理
  # multiline.pattern: '^\['
  # multiline.negate: true
  # multiline.match: after

  encoding: plain
  tail_files: false

  # 检测指定目录下文件更新时间
  scan_frequency: 3s
  # 每隔1s检测一下文件变化,如果连续检测2次之后文件还没有变化,下一次检测间隔时间变为5s
  backoff: 1s
  max_backoff: 5s
  backoff_factor: 2

#----------------------------------input form nginx error_log--------------------------------#
- type: log
  enabled: true
  paths:
   - /data/nginx/logs/error.log.*
  fields:
    kafka_topic: topic-nginx-error
    server_ip: 192.168.2.181
  fields_under_root: true

  # filebeat 多行日志的处理
  # multiline.pattern: '^\['
  # multiline.negate: true
  # multiline.match: after

  encoding: plain
  tail_files: false

  # 检测指定目录下文件更新时间
  scan_frequency: 3s
  # 每隔1s检测一下文件变化,如果连续检测2次之后文件还没有变化,下一次检测间隔时间变为5s
  backoff: 1s
  max_backoff: 5s
  backoff_factor: 2

#----------------------------------Kafka output--------------------------------#
output.kafka:
  enabled: true
  hosts: ['192.168.2.60:9092','192.168.2.61:9092','192.168.2.62:9092']
  topic: '%{[kafka_topic]}'
  codec.format:
    string: '%{[message]} -- %{[server_ip]}'

EOF

docker-compose编排

mkdir -pv /data/docker-compose/filebeat
cat > /data/docker-compose/filebeat/docker-compose.yml << EOF
version: "3"
services:
  filebeat:
    container_name: filebeat
    image: elastic/filebeat:7.11.1
    user: root
    restart: always
    volumes:
      - /data/filebeat/config/filebeat.yml:/usr/share/filebeat/filebeat.yml
      - /data/filebeat/data:/usr/share/filebeat/data/registry
      - /data/filebeat/data/logs:/usr/share/filebeat/logs
      - /data/nginx/logs:/data/nginx/logs
EOF

启动

docker-compose up -d

ES部署

创建数据目录

mkdir -pv /data/elasticsearch/{config,data,logs}
chown 1000 /data/elasticsearch/{data,logs}

修改主机配置

vim /etc/sysctl.conf
加入
vm.max_map_count=655360
sysctl -p

vim /etc/security/limits.conf
加入
* soft memlock unlimited
* hard memlock unlimited

配置文件

cat > /data/elasticsearch/config/elasticsearch.yml << 'EOF'
cluster.name: git_es_cluster
node.name: node-3
network.host: 3.1.101.35
http.port: 9200
bootstrap.memory_lock: true

# 允许跨域访问
http.cors.enabled: true
http.cors.allow-origin: "*"
http.cors.allow-methods: OPTIONS, HEAD, GET, POST, PUT, DELETE
http.cors.allow-headers: "X-Requested-With, Content-Type, Content-Length, X-User"

# Cluster
node.master: true
node.data: true
transport.tcp.port: 9300
discovery.seed_hosts: ["192.168.2.151", "192.168.2.152", "192.168.2.153"]
cluster.initial_master_nodes: ["node-1","node-2","node-3"]

cluster.routing.allocation.same_shard.host: true
cluster.routing.allocation.node_initial_primaries_recoveries: 4
cluster.routing.allocation.node_concurrent_recoveries: 4
EOF

chown 1000 /data/elasticsearch/config/*
自动设置密码命令
elasticsearch-setup-passwords auto

自定义密码命令
elasticsearch-setup-passwords interactive
或
elasticsearch-setup-passwords interactive -EELASTIC_PASSWORD="123456"
生成证书命令
elasticsearch-certutil ca -out config/elastic-certificates.p12 -pass "123456"

es-head登录
http://3.1.101.33:9200/?auth_user=elastic&auth_password=elastic123456

docker-compose编排

mkdir -pv /data/docker-compose/elasticsearch/
cat > /data/docker-compose/elasticsearch/docker-compose.yml << EOF
version: "3"
services:
  es:
    container_name: es
    image: elasticsearch:7.11.1
    network_mode: host
    restart: always
    volumes:
      - /etc/localtime:/etc/localtime
      - /data/elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
      - /data/elasticsearch/logs:/usr/share/elasticsearch/logs
      - /data/elasticsearch/data:/usr/share/elasticsearch/data
    environment:
      - bootstrap.memory_lock=true
      - "ES_JAVA_OPTS=-Xms2G -Xmx2G"
    ulimits:
      memlock:
        soft: -1
        hard: -1

  es-admin:
    container_name: es-admin
    image: mobz/elasticsearch-head:5
    ports:
      - "9201:9100"
EOF

node-1上部署head,node-2,node-3只部署es即可

1. 解决es-head跨域问题(浏览器报: Request header field Content-Type is not allowed by Access-Control-Allow-Headers)
es配置文件加入:
http.cors.enabled: true
http.cors.allow-origin: "*"
http.cors.allow-methods: OPTIONS, HEAD, GET, POST, PUT, DELETE
http.cors.allow-headers: "X-Requested-With, Content-Type, Content-Length, X-User"

2. 解决es-head数据浏览空白(浏览器报: 406 Not Acceptable)
修改es-head代码文件vendor.js
第6886行左右
contentType: "application/x-www-form-urlencoded" --> contentType: "application/json;charset=UTF-8"

启动

docker-compose up -d

Logstash部署

创建数据目录

mkdir -pv /data/logstash/{config,data,pipeline.d,logs}

配置文件

logstash.yml

cat > /data/logstash/config/logstash.yml << 'EOF'
node.name: logstast-node1
http.host: "0.0.0.0"
path.data: data
path.logs: /usr/share/logstash/logs
config.reload.automatic: true
config.reload.interval: 5s
config.test_and_exit: false
EOF

pipelines.yml

cat > /data/logstash/config/pipelines.yml << 'EOF'- pipeline.id: access-pipeline  path.config: "/usr/share/logstash/pipeline.d/nginx-access.conf"- pipeline.id: error-pipeline  path.config: "/usr/share/logstash/pipeline.d/nginx-error.conf"EOFchown 1000.1000 /data/logstash/{config,data,pipeline.d,logs} -R

pipelines.yml内容要置顶才生效

nginx-access.conf

pipeline.d/nginx-access.conf

cat > /data/logstash/pipeline.d/nginx-access.conf << 'EOF'
input {
   kafka {
    topics_pattern => "topic-nginx-access"
    bootstrap_servers => "192.168.2.60:9092,192.168.2.61:9092,192.168.2.62:9092"
    consumer_threads => 1
    decorate_events => true
    group_id => "kafka-nginx"
    add_field => {"logstash-server" => "192.168.2.161"}
   }
}

filter {
  grok {
    match => { "message" => "%{IP:remote_addr} - %{DATA:remote_user} \[%{HTTPDATE:time_local}\] (\"%{WORD:htttp_request} %{URIPATHPARAM:htttp_request_url} %{DATA:http_protocol}\"|\"%{DATA:http_request}\") %{INT:htttp_request_status} %{INT:body_bytes_sent} \"%{DATA:http_referer}\" \"%{DATA:http_user_agent}\" -- %{IP:filebeat-server}" }
  }

  geoip {
      source => "remote_addr"
      target => "geoip"
      add_field => [ "[geoip][coordinates]", "%{[geoip][longitude]}" ]
      add_field => [ "[geoip][coordinates]", "%{[geoip][latitude]}" ]
      remove_field => [ "[geoip][latitude]", "[geoip][longitude]", "[geoip][country_code2]","[geoip][country_code3]", "[geoip][timezone]", "[geoip][continent_code]", "[geoip][dma_code]", "[geoip][region_code]" ]
  }

  mutate{
    enable_metric => "false"
    remove_field => ["message","@version"]
    convert => [ "body_bytes_sent", "integer" ]
    convert => [ "htttp_request_status}", "integer" ]
    #convert => [ "responsetime", "float" ]
    #convert => [ "upstreamtime", "float" ]
    convert => [ "[geoip][coordinates]", "float" ]
  }

  useragent {
    source => "http_user_agent"
    target => "http_user_agent"
    # 过滤useragent没用的字段
    remove_field => [ "[http_user_agent][minor]","[http_user_agent][major]","[http_user_agent][build]","[http_user_agent][patch]","[http_user_agent][os_minor]","[http_user_agent][os_major]" ]
  }

  date {
    match => ["time_local","dd/MMM/YYYY:HH:mm:ss Z","yyyy-MM-dd HH:mm:ss"]
    target => "time_local"
  }

}

output {
        elasticsearch {
        hosts => ["192.168.2.151:9200","192.168.2.152:9200","192.168.2.153:9200"]
        index => "logstash-nginx-access_%{+YYY-MM-dd}"
        sniffing => true
        template_overwrite => true
    }
}
EOF

如果使用kibana地图功能,索引名称必须要以logstash开头

nginx-error.conf

pipeline.d/nginx-error.conf

cat > /data/logstash/pipeline.d/nginx-access.conf << 'EOF'
input {
   kafka {
    bootstrap_servers => "192.168.2.60:9092,192.168.2.61:9092,192.168.2.62:9092"
    topics_pattern => "topic-nginx-error"
    group_id => "kafka-nginx"
    consumer_threads => 5
    decorate_events => true
    add_field => {"logstash-server" => "192.168.2.162"}
   }
}

filter {
  grok {
    match => { "message" => "%{DATA:detail} -- %{IP:filebeat-server}" }
  }

  mutate{
    enable_metric => "false"
    remove_field => ["message","@version"]
  }

}

output {
        elasticsearch {
        hosts => ["192.168.2.151:9200","192.168.2.152:9200","192.168.2.153:9200"]
        index => "logstash-nginx-error_%{+YYY-MM-dd}"
        sniffing => true
        template_overwrite => true
    }
}
EOF

docker-compose编排

mkdir -pv /data/docker-compose/logstash
cat >> /data/docker-compose/logstash/docker-compose.yml << EOF
version: "3"
services:
  logstash:
    container_name: logstash
    user: root
    image: logstash:7.11.1
    restart: always
    volumes:
      - /data/logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml
      - /data/logstash/config/pipelines.yml:/usr/share/logstash/config/pipelines.yml
      - /data/logstash/data:/usr/share/logstash/data
      - /data/logstash/pipeline.d:/usr/share/logstash/pipeline.d
      - /data/logstash/logs:/usr/share/logstash/logs
    environment:
      LS_JAVA_OPTS: "-Xmx2G -Xms2G"
EOF

启动

docker-compose up -d

Kibana部署

创建数据目录

mkdir -pv /data/kibana/{config,logs}
chown 1000 /data/kibana/{config,logs}

配置文件

cat > /data/kibana/config/kibana.yml << 'EOF'
#
# ** THIS IS AN AUTO-GENERATED FILE **
#

# Default Kibana configuration for docker target
server.name: kibana
server.port: 5601
server.host: "0"
elasticsearch.hosts: [ "http://192.168.2.151:9200" ]
monitoring.ui.container.elasticsearch.enabled: true

map.tilemap.url: 'http://webrd02.is.autonavi.com/appmaptile?lang=zh_cn&size=1&scale=1&style=7&x={x}&y={y}&z={z}'
i18n.locale: "zh-CN"
EOF
elasticsearch.username: "elastic"
elasticsearch.password: "elastic"
xpack.security.enabled: true
xpack.security.encryptionKey: "4297f44b13955235245b2497399d7a93" 

docker-compose编排

mkdir -pv /data/docker-compose/kibana/
cat > /data/docker-compose/kibana/docker-compose.yml << EOF
version: "3"
services:
  kibana:
    container_name: kibana
    image: kibana:7.11.1
    restart: always
    ports:
      - "5601:5601"
    volumes:
      - /data/kibana/config/kibana.yml:/opt/kibana/config/kibana.yml
EOF

启动

docker-compose up -d

指标采集-metricbeat

创建数据目录

mkdir -pv /data/metricbeat/{config,data,logs}

配置文件

  • 采集nginx和system为例
cat > /data/metricbeat/config/metricbeat.yml << EOF
###################### Metricbeat Configuration Example #########################
processors:
  - add_host_metadata: ~
  - add_cloud_metadata: ~
setup.kibana:
  host: "192.168.2.171:5601"

metricbeat.modules:
#---------------------------------- Monitor Nginx --------------------------------#
- module: nginx
  metricsets:
    - stubstatus
  period: 10s
  hosts: ["http://192.168.2.180:8000"]
  server_status_path: "status"
  
# Module: system
# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.11/metricbeat-module-system.html

#---------------------------------- Monitor system --------------------------------#
- module: system
  period: 10s
  metricsets:
    - cpu
    - load
    - memory
    - network
    - process
    - process_summary
    - socket_summary
    #- entropy
    #- core
    #- diskio
    #- socket
    #- service
    #- users
  process.include_top_n:
    by_cpu: 5      # include top 5 processes by CPU
    by_memory: 5   # include top 5 processes by memory

- module: system
  period: 1m
  metricsets:
    - filesystem
    - fsstat
  processors:
  - drop_event.when.regexp:
      system.filesystem.mount_point: '^/(sys|cgroup|proc|dev|etc|host|lib|snap)($|/)'

- module: system
  period: 15m
  metricsets:
    - uptime

#---------------------------------- Elasticsearch --------------------------------#
output.elasticsearch:
  hosts: ["192.168.2.151:9200","192.168.2.152:9200","192.168.2.153:9200"]

EOF

nginx开启status

server {
listen       8000;
server_name  localhost;

location /status {
stub_status on;
access_log off;
}
}

docker-compose编排

mkdir -pv /data/docker-compose/metricbeat
cat > /data/docker-compose/metricbeat/docker-compose.yml << EOF
version: "3"
services:
  metricbeat:
    container_name: metricbeat
    image: elastic/metricbeat:7.11.1
    privileged: true
    restart: always
    volumes:
      - /data/metricbeat/config/metricbeat.yml:/usr/share/metricbeat/metricbeat.yml
      - /var/run/docker.sock:/var/run/docker.sock
      - /sys/fs/cgroup:/hostfs/sys/fs/cgroup
      - /proc:/hostfs/proc
      - /:/hostfs
EOF

启动

docker-compose up -d

安装kibana仪表盘

docker-compose exec  metricbeat metricbeat setup --dashboards
  • 0
    点赞
  • 0
    评论
  • 0
    收藏
  • 一键三连
    一键三连
  • 扫一扫,分享海报

©️2021 CSDN 皮肤主题: 大白 设计师:CSDN官方博客 返回首页
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、C币套餐、付费专栏及课程。

余额充值