Elastic Stack (ELK Stack)

安装与配置

下载安装

oracle官网安装jdk-14.0.1_linux-x64_bin.rpm
服务器安装(端口):Elasticsearch(9200,9300),Kibana(5601),Logstash(5044)
https://www.elastic.co/downloads
systemctl enable elasticsearch
systemctl enable logstash
systemctl enable kibana

cat << EOF > /etc/yum.repos.d/elasticsearch-7.x.repo
[elasticsearch-7.x]
name=elasticsearch repository for 7.x packages
baseurl=https://artifacts.elastic.co/packages/7.x/yum
gpgcheck=1
gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch
enabled=1
autorefresh=1
type=rpm-md
EOF

ES6

vi /etc/elasticsearch/elasticsearch.yml
node.name: wl-es01
path.data: /var/lib/elasticsearch
path.logs: /var/log/elasticsearch
network.host: 0.0.0.0
http.port: 9200
script.painless.regex.enabled: true
xpack.monitoring.enabled: false
bootstrap.memory_lock: true
http.cors.enabled: true
http.cors.allow-origin: "*"
http.max_content_length: 200mb
cluster.routing.allocation.disk.threshold_enabled: false
thread_pool:
    write:
        queue_size: 2000
    index:
        queue_size: 2000
    bulk:
        queue_size: 2000
    search:
        queue_size: 2000
    get:
        queue_size: 2000

ES7 集群优化配置

cluster.name: sw-es
node.name: es01
path.data: /opt/elasticsearch/data
path.logs: /var/log/elasticsearch
network.host: 0.0.0.0
cluster.initial_master_nodes: ["es01"]
discovery.seed_hosts: ["172.23.8.226:9300", "172.23.8.230:9300", "172.23.8.236:9300", "172.23.8.237:9300", "172.23.8.238:9300"]
cluster.max_shards_per_node: 90000
bootstrap.memory_lock: true
http.max_content_length: 300MB
http.max_initial_line_length: 20KB
http.max_header_size: 32KB
thread_pool.write.queue_size: 1000
thread_pool.search.queue_size: 2000
thread_pool.get.queue_size: 2000

kibana

vi /etc/kibana/kibana.yml
server.host: "0.0.0.0"
elasticsearch.url: "http://10.200.78.67:9200"
elasticsearch.requestTimeout: 120000
#i18n.defaultLocale: "cn"
tilemap.url: 'http://webrd02.is.autonavi.com/appmaptile?lang=zh_cn&size=1&scale=1&style=7&x={x}&y={y}&z={z}'

xpack.apm.enabled: false
xpack.graph.enabled: false
xpack.ml.enabled: false
xpack.reporting.enabled: false
xpack.watcher.enabled: false
xpack.monitoring.enabled: false
xpack.monitoring.ui.enabled: false
xpack.infra.enabled: false
vi /usr/share/kibana/bin/kibana
NODE_ENV=production exec "${NODE}" --no-warnings --max-http-header-size=65536 $NODE_OPTIONS --max_old_space_size=10240 "${DIR}/src/cli" ${@}
Kibana自定义脚本字段配置
vi /etc/elasticsearch/elasticsearch.yml
script.painless.regex.enabled: true
def m = /^(\w+)\ .*$/.matcher(doc['ua.device.keyword'].value);
if ( m.matches() ) {
   return m.group(1)
} else {
   return "null"
}
DSL 例子
{
  "query": {
    "regexp": {
      "userID": "[0-9].+"
    }
  }
}
---
{
  "query": {
    "prefix": {
      "userID": "demo"
    }
  }
}
---
{
  "query": {
    "bool": {
      "should": [
        {
          "prefix": {
            "url.keyword": "/sellerMgr/"
          }
        },
        {
          "prefix": {
            "url.keyword": "/seller/"
          }
        }
      ]
    }
  }
}

logstash

vi /etc/logstash/logstash.yml 
xpack.management.enabled: false
xpack.monitoring.enabled: false

插件安装路径:/usr/share/logstash/vendor/bundle/jruby/2.3.0/gems
IP地址库:/usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/logstash-filter-geoip-5.0.3-java/vendor

/usr/share/logstash/bin/logstash-plugin update logstash-filter-grok
/usr/share/logstash/bin/logstash-plugin update logstash-filter-geoip
/usr/share/logstash/bin/logstash-plugin update logstash-filter-useragent
/usr/share/logstash/bin/logstash-plugin update logstash-filter-date
/usr/share/logstash/bin/logstash-plugin update logstash-filter-mutate
/usr/share/logstash/bin/logstash-plugin update logstash-filter-json

logstash调试模式

vi /etc/logstash/conf.d/gameclient.conf
input {
  beats {
    port => 5044
  }
}
output{
  stdout{
    codec => rubydebug
  }

}

/usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/gameclient.conf

mongodb通过logstash传到es

./logstash-plugin install logstash-input-mongodb
./logstash-plugin install logstash-filter-xml

vi /etc/logstash/conf.d/mongodb
input {
  mongodb {
    uri => 'mongodb://test:123456@127.0.0.1:27017/test'
    placeholder_db_dir => '/opt/logstash-mongodb/'
    placeholder_db_name =>'test.db'
    collection => 'msgCont'
    batch_size => 1000
    generateId => true
  }
}

filter {

  mutate {
        rename => ["_id", "uid"]
    }

  xml {
       source => "msg"
       store_xml => false
       xpath => {"/message/*/*/text()" => "msg-content"}
   }
}

output {
  elasticsearch {
    hosts => ["http://localhost:9200"]
    index => "im_message"
  }
}

NGINX日志过滤logstash配置

vi /etc/logstash/conf.d/beats-nginx.conf
input {
  beats {
    port => 5044
  }
}
filter {
  grok {
    match => { "message" => "%{IPORHOST:remote_addr} - (%{USERNAME:remote_user}|-) \[%{HTTPDATE:time_local}\] (%{IPORHOST:http_host}|-) \"%{WORD:method} %{DATA:request_url} HTTP/%{NUMBER:http_version}\" %{NUMBER:response_code} %{NUMBER:body_sent} \"%{DATA:referrer}\" \"%{DATA:user_agent}\" \"%{IPORHOST:x_forwarded_for}\" “%{NUMBER:request_time}\"" }
    remove_field => "message"
  }
  date {
    match => [ "time_local", "dd/MMM/YYYY:HH:mm:ss Z" ]
    target => "@timestamp"
    timezone => "-04:00"
    }
  useragent {
    regexes => "/etc/logstash/regexes.yaml"
    target => "ua"
    source => "user_agent"
  }
  mutate {
    convert => { "response_code" => "integer" }
    convert => { "body_sent" => "integer" }
    convert => { "request_time" => "float" }
  }
  if [x_forwarded_for] !~ "^127\.|^192\.168\.|^172\.1[6-9]\.|^172\.2[0-9]\.|^172\.3[01]\.|^10\.|^100.64\." {
      geoip {
          source => "x_forwarded_for"
          target => "geoip"
          fields => ["city_name","region_name","country_name","location"]
      }
  if ! [geoip][region_name] and ! [geoip][city_name] {
      mutate {
          add_field => { "client_addr" => "%{[geoip][country_name]}" }
      }
  }
  else if ! [geoip][city_name] {
      mutate {
          add_field => { "client_addr" => "%{[geoip][country_name]},%{[geoip][region_name]}" }
      }
  }
  else if ! [geoip][region_name] {
      mutate {
          add_field => { "client_addr" => "%{[geoip][country_name]},%{[geoip][city_name]}" }
      }
  }
  else {
      mutate {
          add_field => { "client_addr" => "%{[geoip][country_name]},%{[geoip][region_name]},%{[geoip][city_name]}" }
      }
  }
  mutate {
      remove_field => ["[geoip][country_name]","[geoip][region_name]","[geoip][city_name]"]
}
}
}
output {
  elasticsearch {
    hosts => "localhost:9200"
    manage_template => false
    index => "nginx-%{+YYYY.MM.dd}"
#    index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}"
  }
}

user agent 机型适配配置

vi /etc/logstash/regexes.yaml
https://github.com/ua-parser/uap-core/blob/master/regexes.yaml

  #######################
  - regex: 'iPhone'
    device_replacement: 'iPhone'
    brand_replacement: 'Apple'
    model_replacement: 'iPhone'
  - regex: 'Xiaomi_'
    device_replacement: 'XiaoMi'
    brand_replacement: 'XiaoMi'
    model_replacement: 'XiaoMi'
  #######################



  #######################
  - regex: 'Windows NT'
    device_replacement: 'PC'
    brand_replacement: 'PC'
    model_replacement: 'PC'
  - regex: 'Macintosh\;'
    device_replacement: 'MAC'
    brand_replacement: 'Apple'
    model_replacement: 'MAC'
  ######################

防火墙日志过滤logstash配置

vi /etc/logstash/conf.d/syslog.conf
input {
  tcp {
    port => 10514
    type => "Juniper"
  }
  udp {
    port => 10514
    type => "Juniper"
  }
}
filter {
    grok {
    match => { "message" => "reason=(?<reason>([\s\S]*))" }
    }
    kv {
        source => "message"
    include_keys => [ "start_time", "src", "src_port", "dst", "dst_port", "sent", "rcvd", "duration", "session_id", "service" ]
#    remove_field => "message"
    }
    date {  
        match => [ "start_time", "yyyy-MM-dd HH:mm:ss" ]  
        target => "@timestamp"
      timezone => "+08:00"  
    }
    mutate {
        convert => { "src_port" => "integer" }
        convert => { "dst_port" => "integer" }
        convert => { "sent" => "integer" }
        convert => { "rcvd" => "integer" }
        convert => { "duration" => "integer" }
    convert => { "session_id" => "integer" }
    }
    if [dst] !~ "^127\.|^192\.168\.|^172\.1[6-9]\.|^172\.2[0-9]\.|^172\.3[01]\.|^10\.|^100.64\." {
        geoip {
        source => "dst"
        target => "dstgeoip"
        fields => ["city_name","region_name","country_name"]
        }
    
    if ! [dstgeoip][region_name] and ! [dstgeoip][city_name] {
    mutate {
        add_field => { "dstname" => "%{[dstgeoip][country_name]}" }
    }
    }
    else if ! [dstgeoip][city_name] {
        mutate {
            add_field => { "dstname" => "%{[dstgeoip][country_name]},%{[dstgeoip][region_name]}" }
        }
    }
    else if ! [dstgeoip][region_name] {
        mutate {
            add_field => { "dstname" => "%{[dstgeoip][country_name]},%{[dstgeoip][city_name]}" }
        }
    }
    else {
        mutate {
            add_field => { "dstname" => "%{[dstgeoip][country_name]},%{[dstgeoip][region_name]},%{[dstgeoip][city_name]}" }
    }
    }
    mutate {
    remove_field => "dstgeoip"
   }
   }
}
output{
    elasticsearch {
        hosts => "localhost:9200"
        document_type => "Juniper"
        index => "juniper-%{+YYYY.MM.dd}"
    }
#    stdout{
#        codec => rubydebug
#    }
}

gameclient logstash配置

input {
  beats {
    port => 5044
    type => "gameclient"
  }
}
filter {
     if [type] == "gameclient" {
          grok {
            match => { "message" => "%{TIMESTAMP_ISO8601:date} \[%{DATA:level}\] \<%{DATA}\>@%{DATA}\:%{DATA}\:%{NUMBER} (?<info>([\s\S]*))\,other\:(?<other>([\s\S]*))" }
          }
          kv {
            source => "info"
            prefix => "info_"
            field_split => ","
            value_split => ":"
          }
          kv {
            source => "other"
            prefix => "other_"
            remove_char_key => "\"|\{"
            remove_char_value => "\"|\}"
            field_split => ","
            value_split => ":"
          }
          date {
            match => [ "date", "yyyy-MM-dd HH:mm:ss.SSS" ]
            target => "@timestamp"
            timezone => "+08:00"
          }
     }
}
output{
     if [type] == "gameclient" {
        #  stdout{
        #    codec => rubydebug
        #  }
          elasticsearch {
            hosts => "localhost:9200"
            document_type => "gameclient"
            index => "gameclient-%{+YYYY.MM.dd}"
          }
     }
}

rabbitmq日志采集到logstash实例

vi rabbitmq.conf 
input {
  beats {
    port => 5044
  }
}

filter {
        grok {
          match => { "message" => "(?m)%{TIMESTAMP_ISO8601:date} \[%{USERNAME:level}\] <%{DATA:tid}> %{GREEDYDATA:logs}" }
        }
        date {
          match => [ "date", "yyyy-MM-dd HH:mm:ss.SSS" ]
          target => "@timestamp"
          timezone => "-04:00"
        }
        mutate {
          remove_field => ["beat","host","prospector","tags","source","date","offset"]
        }
    }
output{
    elasticsearch {
      hosts => "10.200.77.48:9200"
      document_type => "doc"
      index => "new-fg-pro-mq-%{+YYYY.MM.dd}"
    }
}

客户端安装:Filebeat

systemctl enable filebeat
vi /etc/filebeat/filebeat.yml

path.home: /usr/share/filebeat
path.config: /etc/filebeat
path.data: /var/lib/filebeat
path.logs: /var/log/filebeat
filebeat.config.modules:
  path: ${path.config}/modules.d/*.yml
#output.logstash:
#  hosts: ["10.100.77.60:5044"]
output.elasticsearch:
  hosts: ["10.100.77.60:9200"]
setup.kibana:
  host: "10.100.77.60:5601"
cd /etc/filebeat
/usr/share/filebeat/bin/filebeat setup --template
./filebeat setup --template -E output.logstash.enabled=false -E 'output.elasticsearch.hosts=["localhost:9200"]'
/usr/share/filebeat/bin/filebeat setup --dashboards
/usr/share/filebeat/bin/filebeat modules enable nginx
vi nginx.yml
- module: nginx
  # Access logs
  access:
    enabled: true
    var.paths: ["/home/wwwlogs/static.log"]
  # Error logs
  error:
    enabled: false
    #var.paths:
vi /etc/filebeat/filebeat.yml
path.home: /usr/share/filebeat
path.config: /etc/filebeat
path.data: /var/lib/filebeat
path.logs: /var/log/filebeat
filebeat.config.modules:
  path: ${path.config}/modules.d/*.yml
output.logstash:
  hosts: ["10.100.77.60:5044"]
#output.elasticsearch:
#  hosts: ["10.100.77.60:9200"]
setup.kibana:
  host: "10.100.77.60:5601"

实例

cat /etc/filebeat/filebeat.yml    
path.home: /usr/share/filebeat
path.config: /etc/filebeat
path.data: /var/lib/filebeat
path.logs: /var/log/filebeat
#filebeat.config.modules:
#  path: ${path.config}/modules.d/*.yml

filebeat.prospectors:
- type: log
  enabled: true
  paths:
    - /home/huangliang/19090/log/info*
  fields_under_root: true
  fields:
    type: sit

- type: log
  enabled: true
  paths:
    - /home/huangliang/19091/log/info*
#  tags: ["pro"]
  fields_under_root: true
  fields:
    type: pro
#    logs_env: PRO

output.logstash:
  hosts: ["127.0.0.1:5044"]
setup.kibana:
  host: "localhost:5601"

FG配置实例

path.home: /usr/share/filebeat
path.config: /etc/filebeat
path.data: /var/lib/filebeat
path.logs: /var/log/filebeat

filebeat.prospectors:
- type: log
  enabled: true
  paths:
    - /home/huangliang/19090/log/info*
  fields_under_root: true
  fields:
    type: sit

- type: log
  enabled: true
  paths:
    - /home/huangliang/19092/log/info*
  fields_under_root: true
  fields:
    type: ptsit

output.logstash:
  hosts: ["127.0.0.1:5044"]

多行日志filebeat实例

path.home: /usr/share/filebeat
path.config: /etc/filebeat
path.data: /var/lib/filebeat
path.logs: /var/log/filebeat

filebeat.prospectors:
- type: log
  enabled: true
  paths:
    - /var/log/rabbitmq/rabbit@fg-pro-rabbitmq*.log
  encoding: utf-8
  multiline.pattern: ^?20\d{2}(\-|\/|\.)\d{1,2}(\-|\/|\.)\d{1,2}
  multiline.negate: true
  multiline.match: after
output.logstash:
  hosts: ["10.240.3.55:5044"]

geoip画地图elasticsearch坐标字段模板

vi /tmp/elasticsearch.template.nginx.json
{
  "index_patterns" : ["nginx*"],
  "mappings" : {
    "doc" : {
      "properties" : {
        "geoip" : {
          "properties" : {
            "location" : {
              "type" : "geo_point"
            }
          }
        }
      }
    }
  }
}

curl -XPUT -H 'Content-Type: application/json' 'http://10.100.77.60:9200/_template/nginx?pretty' -d@/tmp/elasticsearch.template.nginx.json

修改使用高德地图

编辑kibana配置文件kibana.yml,最后面添加:

tilemap.url: 'http://webrd02.is.autonavi.com/appmaptile?lang=zh_cn&size=1&scale=1&style=7&x={x}&y={y}&z={z}'

重启kibana

Elasticsearch操作:

curl localhost:9200/_cat/indices?v
curl 10.100.77.60:9200/_cat/templates
curl localhost:9200/filebeat-6.0.0-2017.12.01?pretty
curl 'http://localhost:9200/_template/nginx?pretty'
curl -XDELETE 'http://localhost:9200/filebeat-*'
curl localhost:9200/gameclient-2018.03.08/?pretty
curl http://10.100.77.60:9200/nginx-2017.12.01/_search?pretty
#查看es的线程池使用情况
curl -XGET 'http://10.118.72.139:9200/_nodes/stats/thread_pool?pretty'
#cluster and most of other config:
http://10.118.72.139:9200/_cluster/settings?include_defaults=true
#index config
http://10.118.72.139:9200/_settings?include_defaults=true

#ES集群状态
GET _cluster/health?pretty命令,查看参数status值。
GET /_cluster/allocation/explain?pretty命令,查看shard未分配的原因。

# 集群中索引red有分片未分配
POST /_cluster/reroute?retry_failed=true,手动触发重分配下

#查询任务
curl -u admin:123456 http://localhost:9200/_cat/tasks
GET /_tasks?nodes=nodeId1,nodeId2 
GET /_tasks?nodes=nodeId1,nodeId2&actions=cluster:* 
#指定任务查询
GET /_tasks/taskId1
GET /_tasks/tidxiaorui.cctid:12345?wait_for_completion=true&timeout=10s
#结束任务
POST /_tasks/taskId1/_cancel
POST /_tasks/_cancel?node_id=nodeId1,nodeId2&actions=*reindex

curl -XPOST 'http://localhost:9200/_forcemerge?only_expunge_deletes=true&max_num_segments=1'
CLEAN_DAY=`date +%F -d -3day`
curl -XPOST "http://localhost:9200/_all/_delete_by_query" -H 'Content-Type: application/json' -d"
{
  \"query\": {
    \"range\": {
      \"@timestamp\": {
        \"lte\": \"${CLEAN_DAY}T00:00:00\"
      }
    }
  }
}"

#elasticsearch curl  账号密码访问: 
curl --user admin:admin http://demo.xxx.com:9200
curl -u username:password -k 'https://demo.xxx.com:9200/_cat/indices'

#添加证书(root-ca.pem)到文件 /etc/pki/tls/certs/ca-bundle.crt
curl -k --header "Content-Type: application/json;charset=UTF-8" --user admin:admin -XPUT https://demo.xxx.com:9200/_template/history_dbl_template -d '{...}'

导入索引

curl -XPUT http://10.118.72.181:9200/corpuserinfodocument --data @./corpuser-index-config.json

查询

curl -H "Content-Type: application/json" -XGET localhost:9200/gameclient-2018.06.10/doc/_search -d '{"query":{"match":{"gameName":{"query":"2277"}}}}'

批量替换字段为数值

curl -H "Content-Type: application/json" -XPOST localhost:9200/gameclient-2018.06.10/doc/_update_by_query -d '{"query":{"match":{"gameName":{"query":"2277"}}},"script":{"inline":"ctx._source.gameName='6666'","lang":"painless"}}'

批量替换字段为字符串

curl -H "Content-Type: application/json" -XPOST localhost:9200/gameclient-2018.06.10/doc/_update_by_query -d '{"query":{"match":{"gameName":{"query":"2277"}}},"script":{"inline":"ctx._source.gameName = params.last","params": {"last": "金球争霸"},"lang":"painless"}}'
cat ch.sh
#!/bin/bash
set -x
cat list.txt|while read line
do
  id=`echo $line|awk '{print $1}'`
  name=`echo $line|awk '{print $2}'`
  cat gamedate.txt|while read line2
  do
    curl -H "Content-Type: application/json" -XPOST 10.200.77.45:9200/$line2/doc/_update_by_query -d "{\"query\":{\"match\":{\"gameName\":{\"query\": \"$id\"}}},\"script\":{\"inline\":\"ctx._source.gameName = params.last\",\"params\": {\"last\": \"$name\"},\"lang\":\"painless\"}}"
  echo ""
  done
done

Python2Elasticsearch 文本导入

#!/usr/local/bin/python3
# -*- coding:utf-8 -*-
import time
from datetime import datetime
import sys
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk
def set_mapping(es, index_name = "pointlogs", doc_type_name = "point"):
    my_mapping = {
        "mappings":{
            "point": {
                "properties": {
                "@timestamp" : {
                "type" : "date"
            },
                    "x": {
                        "type": "integer"
                    },
                    "y": {
                        "type": "float"
                    },
            "z": {
                        "type": "text"
                    }
                }
            }
        }
    }
    put_my_mapping = {
          "properties": {
                    "@timestamp" : {
                "type" : "date"
                },
            "x": {
                        "type": "integer"
                    },
                    "y": {
                        "type": "float"
                    },
                    "z": {
                        "type": "text"
                    }
                }
    }
    create_index = es.indices.create(index = index_name,body = my_mapping)
    mapping_index = es.indices.put_mapping(index = index_name, doc_type = doc_type_name, body = put_my_mapping,ignore=400)
#    if create_index["acknowledged"] != True or mapping_index["acknowledged"] != True:
#        print ("Index creation failed...")
def set_data(es, input_file, index_name = "pointlogs", doc_type_name="point"):
    i = 0
    count = 0
    ACTIONS = []
    with open(input_file,'r') as fd:
        for num,line in enumerate(fd):
            y = float(line)
            action = {
                "_index": index_name,
                "_type": doc_type_name,
                "_source": {
              "@timestamp" : datetime.now().strftime( "%Y-%m-%dT%H:%M:%S.%f+0800"),
                      "x": num,
                      "y": y,
              "z": "10w.txt",
                }
            }
            i += 1
            ACTIONS.append(action)
            if (i == 100000):
                success, _ = bulk(es, ACTIONS, index = index_name, raise_on_error = True)
                count += success
                print("insert %s lines" % count)
                i = 0
                ACTIONS = []
        success, _ = bulk(es, ACTIONS, index = index_name, raise_on_error=True)
        count += success
        print("ALL insert %s lines" % count)
if __name__ == '__main__':
    es = Elasticsearch(hosts=["127.0.0.1:9200"], timeout=5000)
    set_mapping(es)
    set_data(es,sys.argv[1])

elasticsearch导出与导入

npm install elasticdump

kibana配置导出

cd /root/node_modules/elasticdump/bin/
./elasticdump --input=http://localhost:9200/.kibana --output=kibana_mapping.json --type=mapping
./elasticdump --input=http://localhost:9200/.kibana --output=kibana.json --type=data

kibana配置导入

cd /root/node_modules/elasticdump/bin/
./elasticdump --input=kibana_mapping.json --output=http://localhost:9200/.kibana --type=mapping
./elasticdump --input=kibana.json --output=http://localhost:9200/.kibana --type=data

导出

./elasticdump --input=http://localhost:9200/gameclient-2018.06.30 --output=gameclient-2018.06.30-2.json --type=data --limit 10000

索引分片index置为只读read-only的问题

curl -XPUT -H "Content-Type: application/json" http://localhost:9200/_all/_settings -d '{"index.blocks.read_only_allow_delete": false}'

调整磁盘策略比例

curl -XPUT 'http://10.1.217.216:9200/_cluster/settings' -d '{
 "persistent": {
  "cluster.routing.allocation.disk.watermark.low": "96%",
  "cluster.routing.allocation.disk.watermark.high": "97%",
  "cluster.routing.allocation.disk.watermark.flood_stage": "98%"
 }
}'

ES 日志清理

0 1 * * * /opt/es-index-clear.sh
vi /opt/es-index-clear.sh
# 获取eslasticsearch的service地址
ES_API_HOST=`kubectl get svc -n kube-system | grep elasticsearch-api | awk '{print $3}'`

# 只保留 7 天内的日志索引
LAST_DATA=`date -d "-7 days" "+%Y.%m.%d"`

# 删除 7 天前的日志索引
curl -XDELETE 'http://'$ES_API_HOST':9200/*-'${LAST_DATA}'*'

##
0 1 * * * /usr/bin/curl -XDELETE http://172.16.19.84:9200/*$(/usr/bin/date -d "-7 days" "+%Y%m%d")*

移除节点

curl -X PUT "localhost:9200/_cluster/settings" -H 'Content-Type: application/json' -d'
{
  "transient" : {
    "cluster.routing.allocation.exclude._ip" : "10.0.0.1"
  }
}
'

es设置max_result_window与shard刷新间隔

index.max_result_window: 10000
index.refresh_interval: 10s
curl -XPUT 'http://127.0.0.1:9200/_all/_settings' -H 'Content-Type: application/json' -d '{ "index.max_result_window" :"100000","index.refresh_interval" : "10s"}'
curl -XPUT 'http://localhost:9200/_all/_settings?preserve_existing=true' -H 'Content-Type: application/json' -d '{
  "index.max_result_window" : "100000",
  "index.refresh_interval" : "10s"
}'
PUT _cluster/settings -d '{ "index" : { "max_result_window" : 1000000}}

永久设置总分片数

curl -X PUT "10.118.72.139:9200/_cluster/settings" -H 'Content-Type: application/json' -d'
{
    "persistent" : {
        "cluster.max_shards_per_node" : "30000"
    }
}'

多条件删除

curl -u admin:huawei@123 -X POST "http://10.1.250.192:9200/hwprod-java-file-20210302/_delete_by_query" -H 'Content-Type: application/json' -d'
{
    "query": {
        "bool": {
            "must": [
            { "match_phrase": { "namespace": "infra" } },
            { "match_phrase": { "appname": "pod-gateway-api" } },
            { "match_phrase": { "level": "ERROR" } }
        ]
    }
  }
}'

删除.monitoring-es

curl -H 'Content-type: application/json' -XPUT 'http://10.1.67.180:9200/_cluster/settings' -d '{
 "transient": {
  "xpack.monitoring.collection.enabled": false
 }
}'

curl -XDELETE http://localhost:9200/.monitoring-es*

logstash hot_threads

curl -XGET 'logstashhost:9600/_node/hot_threads?threads=20&pretty'
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
ELK是三个开源软件的缩写,分别表示:Elasticsearch , Logstash, Kibana , 它们都是开源软件。新增了一个FileBeat,它是一个轻量级的日志收集处理工具(Agent),Filebeat占用资源少,适合于在各个服务器上搜集日志后传输给Logstash,官方也推荐此工具。Elasticsearch是个开源分布式搜索引擎,提供搜集、分析、存储数据三大功能。它的特点有:分布式,零配置,自动发现,索引自动分片,索引副本机制,restful风格接口,多数据源,自动搜索负载等。Logstash 主要是用来日志的搜集、分析、过滤日志的工具,支持大量的数据获取方式。一般工作方式为c/s架构,client端安装在需要收集日志的主机上,server端负责将收到的各节点日志进行过滤、修改等操作在一并发往elasticsearch上去。Kibana 也是一个开源和免费的工具,Kibana可以为 Logstash 和 ElasticSearch 提供的日志分析友好的 Web 界面,可以帮助汇总、分析和搜索重要数据日志。该课程从ELK组件中的两大组件和常用的应用日志收集进行剖析讲解。(1)ElasticSearch:从单台的安装到集群的搭建、从基础的入门使用到高级的搜索和聚合都进行了较好的阐述。(2) Logstash:通过收集apache日志,使用grok插件和geoip插件先让学习人员对整体框架做到了了解于胸。然后再深入的对LogStash重要的知识点进行了剖析。(3)应用日志收集:对常见的Apache、Nginx、MySQL、Syslog进行日志收集和分析。虽说没有讲解太多,但是我想应该能做到让大家对应用日志收集了解的更加深入。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值