elk日志收集

  • 安装elasticsearch
  1.  上传安装包
      
  2.   解压
    tar -zxvf elasticsearch-7.9.3-linux-x86_64.tar.gz
    tar -zxvf kibana-7.9.3-linux-x86_64.tar.gz
    tar -zxvf logstash-7.9.3.tar.gz
    tar -zxvf filebeat-7.9.3-linux-x86_64.tar.gz
    tar -zxvf metricbeat-7.9.3-linux-x86_64.tar.gz
  3.   创建普通用户并设置参数
    // 创建分组
    groupadd basic
    // 创建用户 密码123456
    useradd elk -g basic -p 123456
    // 授权
    chown -R elk:basic  /usr/local/elk
    
     
    // 使用root用户配置
    vim /etc/sysctl.conf
     
    ##############新增如下配置#####################
    vm.max_map_count=655360
    ##############保存,退出#########################
     
    // 执行下面命令
    sysctl -p
    vim /etc/security/limits.conf
    ###########配置如下内容, '*'号也要#########################
    * soft nofile 65536
    * hard nofile 131072
    * soft nproc 2048
    * hard nproc 4096
    ##############保存、退出########################
    
  4.   配置elasticsearch
    vim /usr/local/elk/elasticsearch-7.9.3/config/elasticsearch.yml
    ########################修改配置如下#################
    
    cluster.name: es-cluster
    node.name: node-1
    network.host: 0.0.0.0
    http.port: 9200
    discovery.seed_hosts: ["192.168.2.122", "192.168.2.123","192.168.2.124"]
    cluster.initial_master_nodes: ["node-1"]
    
  5.   配置kibana
    vim /usr/local/elk/kibana-7.9.3-linux-x86_64/config/kibana.yml
    ########################编辑内容如下#######################
    server.port: 5601
    server.host: "0.0.0.0"
    ## 集群可配置多个es
    elasticsearch.hosts: ["http://192.168.2.122:9200""]
    i18n.locale: "zh-CN"
    kibana.index: ".kibana"
    ## 日志路径  需要提前创建
    logging.dest: /usr/local/elk/kibana-7.9.3-linux-x86_64/logs/kibana.log
    
    
    
  6.   配置logstash
      入门配置
    cp logstash-sample.conf logstash-es.conf
    vim logstash-es.conf
    ###########################配置内容如下##############################
    input {
      file {
        path => "/var/log/messages"
      }
    }
    filter {
    
    }
    output {
       elasticsearch {
         hosts => ["192.168.2.123:9200"]
         index => "mytest-%{+YYYY.MM.dd}"
       }
    }
    
    区分不同环境日志
      ​​
    input {
      file {
        path => "/usr/local/elk/test.log"
        add_field => {
          "log_type" => "test"
        }
      }
      file {
        path => "/usr/local/elk/prod.log"
        add_field => {
          "log_type" => "prod"
        }
      }
    }
    filter {
      if [log_type] in ["test","dev"] {
        mutate {
          add_field => {
            "[@metadata][target_index]" => "test-%{+YYYY.MM}"
          }
        }
      } else if [log_type] == "prod" {
        mutate {
          add_field => {
            "[@metadata][target_index]" => "prod-%{+YYYY.MM.dd}"
          }
        }
      } else {
        mutate {
          add_field => {
            "[@metadata][target_index]" => "unknown-%{+YYYY}"
          }
        }
      }
    }
    output {
      elasticsearch {
        hosts => "192.168.2.122:9200"
        index => "%{[@metadata][target_index]}"
      }
    }
    

    启动命令 
    // 启动
    nohup bin/logstash -f config/logstash-sample.conf &
    // 测试数据
    echo 12345678911111111111 >> /var/log/messages

     查看kibana:  http://192.168.2.123:5601
                         索引管理-->索引模式-->创建mytest-*

  7.   配置filebeat
     1) 配置filebeat.yml
    filebeat.inputs:
    - type: log
      enabled: true
      paths:
        - /usr/local/elk/test.log
      tags: ["test"]
      fields_under_root: true
      fields:
        project: mytest
        app: mytest
    
    - type: log
      enabled: true
      paths:
        - /usr/local/elk/prod.log
      tags: ["prod"]
      fields_under_root: true
      fields:
        project: myprod
        app: myprod
    
    
    output.logstash:
      hosts: ["192.168.2.122:5044"]
    

     2) 配置logstash-beat.conf
    input {
      beats {
        port => 5044
      }
    }
    filter {
      if [app] == "mytest" {
        mutate {
          add_field => {
            "[@metadata][target_index]" => "mytest-%{+YYYY.MM}"
          }
        }
      } else if [app] == "myprod" {
        mutate {
          add_field => {
            "[@metadata][target_index]" => "myprod-%{+YYYY.MM.dd}"
          }
        }
      } else {
        mutate {
          add_field => {
            "[@metadata][target_index]" => "unknown-%{+YYYY}"
          }
        }
      }
    }
    output {
      elasticsearch {
        hosts => "192.168.2.122:9200"
        index => "%{[@metadata][target_index]}"
      }
    }
    

     3) 启动logstash和filebeats 
    // 启动filebeat
    ./filebeat &
    // 查看filebeat日志
    journalctl -u filebeat
    // 启动logstash
    bin/logstash -f config/logstash-beat.conf
    4) 测试
    echo aaaa >> test.log
    echo bbbb >> prod.log

  8.   采集nginx日志
     1)配置filebeat.yml
     
    %{IPV4:remote_addr} - (%{USERNAME:remote_user}|-) \[%{HTTPDATE:time_local}\] \"%{WORD:request_method} %{URIPATHPARAM:request_uri} HTTP/%{NUMBER:http_protocol}\" %{NUMBER:http_status} %{NUMBER:body_bytes_sent} \"%{GREEDYDATA:http_referer}\" \"%{GREEDYDATA:http_user_agent}\"
    
    

     2)  配置logstash-beat.conf

       
      
    grok {
        match => {
          "message" => "%{IPV4:remote_addr} - (%{USERNAME:remote_user}|-) \[%{HTTPDATE:time_local}\] \"%{WORD:request_method} %{URIPATHPARAM:request_uri} HTTP/%{NUMBER:http_protocol}\" %{NUMBER:http_status} %{NUMBER:body_bytes_sent} \"%{GREEDYDATA:http_referer}\" \"%{GREEDYDATA:http_user_agent}\""
        }
      }
    

    3) 启动
       
    // 启动filebeat
    ./filebeat &
    // 查看filebeat日志
    journalctl -u filebeat
    // 启动logstash
    bin/logstash -f config/logstash-beat.conf

     
  9.  采集java日志
     
    filebeat.inputs:
    - type: log
      enabled: true
      paths:
        - /home/aaa/logs/agent/agent.log
      tags: ["java"]
      fields_under_root: true
      fields:
        project: java
        app_server: 127.0.0.1
      multiline.pattern: '^\s'
      multiline.negate: false
      multiline.match: after
     
    
    - type: log
      enabled: true
      paths:
        - /home/aaa/server/nginx/logs/access.log
      tags: ["nginx"]
      fields_under_root: true
      fields:
        project: nginx
        app_server: 127.0.0.1
    
     
    output.logstash:
      hosts: ["127.0.0.1:5044"]
    

     
      
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值