Elastic Stack--EFK架构:采集Nginx与Tomcat日志等

 前言:本博客仅作记录学习使用,部分图片出自网络,如有侵犯您的权益,请联系删除

学习B站博主教程笔记: 

最新版适合自学的ElasticStack全套视频(Elk零基础入门到精通教程)Linux运维必备—ElasticSearch+Logstash+Kibana精讲_哔哩哔哩_bilibiliicon-default.png?t=N7T8https://www.bilibili.com/video/BV1VMW3e6Ezk/?spm_id_from=333.1007.tianma.1-1-1.click&vd_source=e539f90574cdb0bc2bc30a8b5cb3fc00

1、部署nginx服务与tomcat服务

在node1节点中安装nginx服务并检查

 # 配置安装源
 cat > /etc/yum.repos.d/nginx.repo << 'EOF'
 [nginx-stable]
 name=nginx stable repo
 baseurl=http://nginx.org/packages/centos/$releasever/$basearch/
 gpgcheck=1
 enabled=1
 gpgkey=https://nginx.org/keys/nginx_signing.key
 module_hotfixes=true
 ​
 [nginx-mainline]
 name=nginx mainline repo
 baseurl=http://nginx.org/packages/mainline/centos/$releasever/$basearch/
 gpgcheck=1
 enabled=0
 gpgkey=https://nginx.org/keys/nginx_signing.key
 module_hotfixes=true
 EOF
 # 安装
 yum -y install nginx
 systemctl start nginx
 # 测试连接
 curl 127.0.0.1
 #查看日志文件
 cat /var/log/nginx/access.log

部署tomcat服务

 # (1)下载tomcat软件包
 wget https://dlcdn.apache.org/tomcat/tomcat-10/v10.1.28/bin/apache-tomcat-10.1.28.tar.gz
 # (2)解压tomcat软件包
 tar xf apache-tomcat-10.1.28.tar.gz -C /root/software/
 # (3)使用openjdk环境
 [root@node1 ~]# cat > /etc/profile.d/jdk.sh << 'EOF'
 #!/bin/bash
 export JAVA_HOME=/usr/share/elasticsearch/jdk
 export PATH=$PATH:$JAVA_HOME/bin
 EOF
 # (4)使得环境变量生效
 [root@node1 ~]# source /etc/profile.d/jdk.sh
 [root@node1 ~]# java -version
 openjdk version "1.8.0_262"
 OpenJDK Runtime Environment (build 1.8.0_262-b10)
 OpenJDK 64-Bit Server VM (build 25.262-b10, mixed mode)
 # (5)启动tomcat
 cd /root/software/apache-tomcat-10.1.28/bin/
 ./catalina.sh start

2.1、基于log类型收集nginx原生日志

(1)配置文件编写

 cat > nginx-log-to-es.yml << 'EOF'
 filebeat.inputs:
 - type: log
   enabled: true
   paths:
     - /var/log/nginx/access.log*
   tags: ["access"]
 ​
 output.elasticsearch:
   enabled: true
   hosts: ["http://192.168.1.10:9200","http://192.168.1.11:9200","http://192.168.1.12:9200"]
   index: "cluster-elk-nginx-%{+yyyy.MM.dd}"
  
 # 禁用索引生命周期管理
 setup.ilm.enabled: false
 setup.template.name: "cluster-elk"      # 设置索引模板的名称
 setup.template.pattern: "cluster-elk*"  # 设置索引模板的匹配模式
 setup.template.overwrite: true          # 覆盖已有的索引模板
 setup.template.settings:                # 配置索引模板
   index.number_of_shards: 3 
   index.number_of_replicas: 0
 EOF

(2)在浏览器可视化界面测试

 rm -rf /var/lib/filebeat/*
 filebeat -e -c /etc/filebeat/config/08-log-to-es.yml

2.2、基于log类型收集tomcat原生日志

(1)yml配置文件如下

 filebeat.inputs:
 - type: log
   enabled: true
   paths: 
     - /root/software/apache-tomcat-10.1.28/logs/*.txt
 ​
 output.elasticsearch:
   enabled: true
   hosts: ["http://192.168.1.10:9200","http://192.168.1.11:9200","http://192.168.1.12:9200"]
   index: "cluster-elk-tomcat-access-%{+yyyy.MM.dd}"
  
 setup.ilm.enabled: false
 setup.template.name: "cluster-elk"
 setup.template.pattern: "cluster-elk*"
 setup.template.overwrite: true
 setup.template.settings:
   index.number_of_shards: 3 
   index.number_of_replicas: 0

3.1、基于log类型收集nginx的json日志

(1)修改nginx的源日志格式

 # vim /etc/nginx/nginx.conf
 ...
 log_format cluster_nginx_json '{"@timestamp":"$time_iso8601",'
                                 '"host":"$server_addr",'
                                 '"clientip":"$remote_addr",'
                                 '"size":"$body_bytes_sent",'
                                 '"reponsetime":"$request_time",'
                                 '"upstreamtime":"$upstream_response_time",'
                                 '"upstreamhost":"$upstream_addr",'
                                 '"http_host":"$host",'
                                 '"uri":"$uri",'
                                 '"domain":"$host",'
                                 '"xff":"$http_x_forwarded_for",'
                                 '"referer":"$http_referer",'
                                 '"tcp_xff":"$proxy_protocol_addr",'
                                 '"http_user_agent":"$http_user_agent",'
                                 '"status":"$status"}';
 access_log /var/log/nginx/access.log  cluster_nginx_json;   

(2)编写filebeat日志收集配置文件

 [root@node1 config]# cat > etc/filebeat/config/nginx-log-to-es.yml << 'EOF' 
 filebeat.inputs:
 - type: log
   enabled: true
   paths:
     - /var/log/nginx/access.log*
   tags: ["access"]
   json.keys_under_root: true        # 使filebeat能解码josn格式
 ​
 output.elasticsearch:
   enabled: true
   hosts: ["http://192.168.1.10:9200","http://192.168.1.11:9200","http://192.168.1.12:9200"]
   index: "cluster-elk-nginx-access-%{+yyyy.MM.dd}"
  
 setup.ilm.enabled: false
 setup.template.name: "cluster-elk"
 setup.template.pattern: "cluster-elk*"
 setup.template.overwrite: true
 setup.template.settings:
   index.number_of_shards: 3 
   index.number_of_replicas: 0
 EOF

(3)测试执行

 rm -rf /var/lib/filebeat/*
 filebeat -e -c /etc/filebeat/config/nginx-log-to-es.yml

(4)访问kifana,可发现可分类出许多自定义字段;

3.2、基于log类型收集tomcat的json日志

(1)修改tomcat的源日志格式

 # 首先备份配置文件
 cp /root/software/apache-tomcat-10.1.28/conf/{server.xml,server.xml-`date +%F`}
 # 修改配置文件
 # vim /root/software/apache-tomcat-10.1.28/conf/server.xml
 ...切换到行位,大概是133-149之间
 <Host name="tomcat.oldboyedu.com" appBase="webapps" unpackWARs="true" autoDeploy="true">
         <Valve className="org.apache.catalina.valves.AccessLogValve" directory="logs"
                prefix="access" suffix=".log"
                pattern="{&quot;requestTime&quot;:&quot;%t&quot;,&quot;clientIP&quot;:&quot;%h&quot;,&quot;threadID&quot;:&quot;%I&quot;,&quot;protocol&quot;:&quot;%H&quot;,&quot;requestMethod&quot;:&quot;%r&quot;,&quot;requestStatus&quot;:&quot;%s&quot;,&quot;sendBytes&quot;:&quot;%b&quot;,&quot;queryString&quot;:&quot;%q&quot;,&quot;responseTime&quot;:&quot;%Dms&quot;,&quot;partner&quot;:&quot;%{Referer}i&quot;,&quot;agentVersion&quot;:&quot;%{User-Agent}i&quot;}"
         />
 </Host>
  
 # 重新启动tomcat
 /root/software/apache-tomcat-10.1.28/bin/catalina.sh stop
 rm -rf  /root/software/apache-tomcat-10.1.28/logs/*
 /root/software/apache-tomcat-10.1.28/bin/catalina.sh start
 # 记得加上域名解析
 [root@master ~]# cat /etc/hosts
 ...
 192.168.1.10 master
 192.168.1.11 node1 tomcat.oldboyedu.com
 192.168.1.12 node2

在其他节点进行测试访问,拿到日志文件内容:发现是一个JSON格式日志

 [root@node2 ~]# curl tomcat.oldboyedu.com:8080
 [root@node1 logs]# cat tomcat.oldboyedu.com_access_log.2024-08-20.txt 
 {"clientip":"192.168.1.12","ClientUser":"-","authenticated":"-","AccessTime":"[20/Aug/2024:15:47:26 +0800]","request":"GET / HTTP/1.1","status":"200","SendBytes":"11239","QueryString":"","partner":"???,"http_user_agent":"???}
 ...

(2)编辑filebeat日志收集文件:

 [root@node1 config]# cat 13-tomcat-to-es.yml 
 filebeat.inputs:
 - type: log
   enabled: true
   paths: 
     - /root/software/apache-tomcat-10.1.28/logs/*.txt
 ​
 output.elasticsearch:
   enabled: true
   hosts: ["http://192.168.1.10:9200","http://192.168.1.11:9200","http://192.168.1.12:9200"]
   index: "cluster-elk-tomcat-access-%{+yyyy.MM.dd}"
  
 setup.ilm.enabled: false
 setup.template.name: "cluster-elk"
 setup.template.pattern: "cluster-elk*"
 setup.template.overwrite: true
 setup.template.settings:
   index.number_of_shards: 3 
   index.number_of_replicas: 0

(3)测试访问;可以发现已经收集到了JSON日志格式;但这是一条展示,我们可以添加参数使得filebeat能够将日志信息拆分字段;

 rm -rf /var/lib/*
 filebeat -e -c /etc/filebeat/config/13-tomcat-to-es.yml 

(4)添加参数使filebeat能够解码为JSON消息的日志:

 [root@node1 config]# cat 13-tomcat-to-es.yml 
 filebeat.inputs:
 - type: log
   enabled: true
   paths: 
     - /root/software/apache-tomcat-10.1.28/logs/*.txt
   json.key_under_root: true         # 添加json解码参数
 ...
 # 再次清理并重新启动

4.1、基于module类型收集nginx日志

首先恢复nginx日志默认格式:

 log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                   '$status $body_bytes_sent "$http_referer" '
                   '"$http_user_agent" "$http_x_forwarded_for"';
 ​
 access_log  /var/log/nginx/access.log  main;

编写filebeat配置文件:

 filebeat.config.modules:
   # 指定模块的配置文件路径,如果yum安装,在7.17.3版本中,不能使用如下默认值
   # path: ${path.config}/modules.d/*yml
   path: /etc/filebeat/modules.d/*.yml
   reload.enabled: true
 ​
 output.elasticsearch:
   enabled: true
   hosts: ["http://192.168.1.10:9200","http://192.168.1.11:9200","http://192.168.1.12:9200"]
   index: "cluster-elk-nginx-access-%{+yyyy.MM.dd}"
  
 setup.ilm.enabled: false
 setup.template.name: "cluster-elk"
 setup.template.pattern: "cluster-elk*"
 setup.template.overwrite: true
 setup.template.settings:
   index.number_of_shards: 3 
   index.number_of_replicas: 0

当然可以修改要收集的模块配置文件

 [root@node1 config]# cat /etc/filebeat/modules.d/nginx.yml
 - module: nginx
   access:
     enabled: true
     var.paths: ["/var/log/nginx/access.log*"]
   error:        # 这边不收集错误日志
     enabled: false
 ​
     var.path: ["/var/log/nginx/error.log"]
   ingress_controller:
     enabled: false

运行测试访问,即可发现有许多模块定义好的字段

4.2、基于module类型采集tomcat日志

启用tomcat的模块管理

 # 启用tomcat模块
 filebeat -c /etc/filebeat/config/11-nging-to-es.yml modules enable tomcat
 Enabled tomcat
 # 禁用nginx模块
 filebeat -c /etc/filebeat/config/11-nging-to-es.yml modules disable nginx
 ​
 # 查看禁用与启用的模块
 # filebeat -c /etc/filebeat/config/11-nging-to-es.yml modules list | head
 Enabled:
 tomcat
 ​
 Disabled:
 activemq
 apache
 ...
 ​
 # 修改模块文件,指定收集日志路径
 [root@node1 config]# cat /etc/filebeat/modules.d/tomcat.yml 
 - module: tomcat
   log:
     enabled: true
     var.input: file
     var.paths:
       - /root/software/apache-tomcat-10.1.28/logs/*.txt

修改filebeat收集日志配置文件

 [root@node1 config]# cat /etc/filebeat/config/12-tomcat-to-es.yml 
 filebeat.config.modules:
   # 指定模块的配置文件路径,如果yum安装,在7.17.3版本中,不能使用如下默认值
   # path: ${path.config}/modules.d/*yml
   path: /etc/filebeat/modules.d/*.yml
   reload.enabled: true
 ​
 output.elasticsearch:
   enabled: true
   hosts: ["http://192.168.1.10:9200","http://192.168.1.11:9200","http://192.168.1.12:9200"]
   index: "cluster-elk-tomcat-access-%{+yyyy.MM.dd}"
  
 setup.ilm.enabled: false
 setup.template.name: "cluster-elk"
 setup.template.pattern: "cluster-elk*"
 setup.template.overwrite: true
 setup.template.settings:
   index.number_of_shards: 3 
   index.number_of_replicas: 0

测试访问:

 rm -rf /var/lib/filebeat/*
 filebeat -e -c /etc/filebeat/config/12-tomcat-to-es.yml

5.1、多行匹配-收集tomcat的错误日志

首先,停止tomcat再修改错tomcat配置文件;在启动就会产生一条错误日志

 cat /root/software/apache-tomcat-10.1.28/logs/catalina.out

发现一条错误日志占有多行;我们只需要其中重要的行;下面编写日志收集配置文件:

 [root@node1 config]# cat 15-tomcat-to-es.yml 
 filebeat.inputs:
 - type: log
   enabled: true
   paths: 
     - /root/software/apache-tomcat-10.1.28/logs/*.out
   # 指定多行匹配类型,可选值为:"pattern","count"
   multiline.type: pattern
   multiline.pattern: '^\d{2}'   # 指定匹配模式
   multiline.negate: true        # 参考官方架构图
   multiline.match: after
   
 output.elasticsearch:
   enabled: true
   hosts: ["http://192.168.1.10:9200","http://192.168.1.11:9200","http://192.168.1.12:9200"]
   index: "cluster-elk-tomcat-error-%{+yyyy.MM.dd}"
  
 setup.ilm.enabled: false
 setup.template.name: "cluster-elk"
 setup.template.pattern: "cluster-elk*"
 setup.template.overwrite: true
 setup.template.settings:
   index.number_of_shards: 3 
   index.number_of_replicas: 0

清除环境并启动测试:可以发现

5.2、多行匹配-收集elasticsearch的错误日志

(1)查看elasticsearch的日志文件:

 [root@node1 config]# tail -100f /var/log/elasticsearch/master-elk.log 
 # 可以发现也是java类型的日志

(2)编写filebeat日志收集配置文件:

 [root@node1 config]# cat 16-eslog-to-es.yml 
 filebeat.inputs:
 - type: log
   enabled: true
   paths: 
     - /var/log/elasticsearch/master-elk.log*
   multiline.type: pattern
   multiline.pattern: '^\['
   multiline.negate: true 
   multiline.match: after
 ​
 ​
 output.elasticsearch:
   enabled: true
   hosts: ["http://192.168.1.10:9200","http://192.168.1.11:9200","http://192.168.1.12:9200"]
   index: "cluster-elk-es-error-%{+yyyy.MM.dd}"
  
 setup.ilm.enabled: false
 setup.template.name: "cluster-elk"
 setup.template.pattern: "cluster-elk*"
 setup.template.overwrite: true
 setup.template.settings:
   index.number_of_shards: 3 
   index.number_of_replicas: 0

(3)测试启动并访问界面:

可以看到本应多行的错误日志现在为一行展示;

6、日志过滤

使用include_linesexclude_lines参数实现黑白名单;

 [root@node1 config]# cat 17-to-log-console.yml
 filebeat.inputs:
 - type: log
   enabled: true
   paths:
     - /tmp/test/*.log
   # 注意,黑白名单都支持通配符,不建议同时使用
   include_lines: ["^ERR","^WARN"]       # 包含指定的内容才会采集,且区分大小写
   exclude_lines: ["^DBG"]               # 指定黑名单,排除指定的内容
 output.console:
   pretty: true

7、nginx错误日志过滤

实现将访问日志与错误日志输出到不同的索引

 访问日志 ---> cluster-elk-web-nginx-access-{}
 错误日志 ---> cluster-elk-web-nginx-error-{}

记得按之前步骤将nginx日志以JSON格式记录

 [root@node1 config]# cat 18-nginx-to-es.yml
 filebeat.inputs:
 - type: log
   enabled: true
   paths:
     - /var/log/nginx/access.log*
   tags: ["access"]
   json.keys_under_root: true  
 ​
 - type: log
   enabled: true
   paths:
     - /var/log/nginx/error.log*
   tags: ["error"]
   include_lines: ['\[error\]']
 ​
 output.elasticsearch:
   enabled: true
   hosts: ["http://192.168.1.10:9200","http://192.168.1.11:9200","http://192.168.1.12:9200"]
   indices:  
     - index: "cluster-elk-web-nginx-access-%{+yyyy.MM.dd}"
       when.contains:
         tags: "access"
     - index: "cluster-elk-web-nginx-error-%{+yyyy.MM.dd}"
       when.contains:
         tags: "error"
  
 setup.ilm.enabled: false
 setup.template.name: "cluster-elk"
 setup.template.pattern: "cluster-elk*"
 setup.template.overwrite: true
 setup.template.settings:
   index.number_of_shards: 3 
   index.number_of_replicas: 0

8、nginx和tomcat同时采集

 [root@node1 config]# cat 18-nginx-to-es.yml
 filebeat.inputs:
 - type: log
   enabled: true
   paths:
     - /var/log/nginx/access.log*
   tags: ["nginx-access"]
   json.keys_under_root: true  
 ​
 - type: log
   enabled: true
   paths:
     - /var/log/nginx/error.log*
   tags: ["nginx-error"]
   include_lines: ['\[error\]']
 ​
 - type: log
   enabled: true
   paths: 
     - /root/software/apache-tomcat-10.1.28/logs/*.txt
   json.keys_under_root: true
   tags: ["tomcat-access"]
  
 - type: log
   enabled: true
   paths: 
     - /root/software/apache-tomcat-10.1.28/logs/*.out
   multiline.type: pattern
   multiline.pattern: '^\d{2}'   # 指定匹配模式
   multiline.negate: true        # 参考官方架构图
   multiline.match: after
   tags: ["tomcat-error"]
 ​
 output.elasticsearch:
   enabled: true
   hosts: ["http://192.168.1.10:9200","http://192.168.1.11:9200","http://192.168.1.12:9200"]
   indices:  
     - index: "cluster-elk-web-nginx-access-%{+yyyy.MM.dd}"
       when.contains:
         tags: "nginx-access"
     - index: "cluster-elk-web-nginx-error-%{+yyyy.MM.dd}"
       when.contains:
         tags: "nginx-error"
     - index: "cluster-elk-web-tomcat-access-%{+yyyy.MM.dd}"
       when.contains:
         tags: "tomcat-access"
     - index: "cluster-elk-web-tomcat-error-%{+yyyy.MM.dd}"
       when.contains:
         tags: "tomcat-error"
 ​
 setup.ilm.enabled: false
 setup.template.name: "cluster-elk"
 setup.template.pattern: "cluster-elk*"
 setup.template.overwrite: true
 setup.template.settings:
   index.number_of_shards: 3 
   index.number_of_replicas: 0

9、log类型切换filestream类型注意事项

9.1、filestream类型json解析配置

 filebeat.inputs:
 - type: filestream
   enabled: true
   paths:
     - /var/log/nginx/access.log*
   tags: ["access"]
   parsers:
   - ndjson:
       keys_under_root: true
 ​
 output.elasticsearch:
   enabled: true
   hosts: ["http://192.168.1.10:9200","http://192.168.1.11:9200","http://192.168.1.12:9200"]
   index: "cluster-elk-nginx-access-%{+yyyy.MM.dd}"
  
 setup.ilm.enabled: false
 setup.template.name: "cluster-elk"
 setup.template.pattern: "cluster-elk*"
 setup.template.overwrite: true
 setup.template.settings:
   index.number_of_shards: 3 
   index.number_of_replicas: 0

9.2、filestream类型多行匹配

 filebeat.inputs:
 - type: filestream
   enabled: true
   paths:
     - /root/software/apache-tomcat-10.1.28/logs/*.txt
   tags: ["access"]
   parsers:                  #*
     - ndjson:
         keys_under_root: true
 ​
 - type: filestream
   enabled: true
   paths:
     - /root/software/apache-tomcat-10.1.28/logs/*.out
   tags: ["error"]
   parsers:                  #*
     - multiline:
       type: pattern
       pattern: '^\d{2}'
       negate: true
       match: after
 ​
 output.elasticsearch:
   enabled: true
   hosts: ["http://192.168.1.10:9200","http://192.168.1.11:9200","http://192.168.1.12:9200"]
   indices:  
     - index: "cluster-elk-web-tomcat-access-%{+yyyy.MM.dd}"
       when.contains:
         tags: "nginx-access"
     - index: "cluster-elk-web-tomcat-error-%{+yyyy.MM.dd}"
       when.contains:
         tags: "nginx-error"
         
 setup.ilm.enabled: false
 setup.template.name: "cluster-elk"
 setup.template.pattern: "cluster-elk*"
 setup.template.overwrite: true
 setup.template.settings:
   index.number_of_shards: 3 
   index.number_of_replicas: 0

10、日志聚合落地到本地

 filebeat.inputs:
 - type: tcp
   host: "0.0.0.0:9000"
   tags: ["aaa"]
   
 - type: tcp
   host: "0.0.0.0:8000"
   tags: ["bbb"]
 ​
 output.elasticsearch:
   enabled: true
   hosts: ["http://192.168.1.10:9200","http://192.168.1.11:9200","http://192.168.1.12:9200"]
   indices:  
     - index: "cluster-linux-elk-aaa-%{+yyyy.MM.dd}"
       when.contains:
         tags: "aaa"
     - index: "cluster-linux-elk-bbb-%{+yyyy.MM.dd}"
       when.contains:
         tags: "bbb"     
         
 setup.ilm.enabled: false
 setup.template.name: "cluster-elk-system-log"
 setup.template.pattern: "cluster-elk-system-log*"
 setup.template.overwrite: true
 setup.template.settings:
   index.number_of_shards: 3
   index.number_of_replicas: 0

11、收集日志到redis

(1)在master节点部署redis

 yum -y install epel-release
 yum -y install redis

(2)修改redis配置文件:

 vim /etc/redis.conf 
 ...
 bind 0.0.0.0
 requirepass cluster

(3)启动redis服务并测试

 systemctl start redis

(4)在其他节点连接测试redis环境是否正常使用:

 [root@node2 ~]# redis-cli -a cluster -h 192.168.1.10 -p 6379 --raw
 192.168.1.10:6379> keys *

(5)将filebeat数据写入到redis环境

 filebeat.inputs:
 - type: tcp
   max_message_size: 10MiB
   host: "0.0.0.0:9000"
 ​
 output.redis:
   hosts: ["192.168.1.10:6379"]
   password: cluster
   key: "cluster-linux-filebeat"
   db: 5
   timeout: 3
   
 # 启动
 filebeat -e -c /etc/filebeat/config/tcp-to-redis.yml

(6)测试

 # 写入数据
 echo 12323432 | nc 192.168.1.10 9000
 # 查看数据
 redis-cli -a cluster -h 192.168.1.10 -p 6379 --raw -n 5
 ...

12、收集系统日志

 使用filebeat收集以下系统日志:
 /var/log/secure
 /var/log/maillog
 /var/log/yum.log
 /var/log/firewalld
 /var/log/cron
 /var/log/messages
 ​
 7.17.13版本可能遇到的问题:
 (1)input源配置一旦超4个,写入ES时,就看会出现部分数据无法写入的问题;
     解决方案一:拆成多个filebeat实例。运行多个filebeat实例时需要指定数据路径"--path.data"
     解决方案二:日志聚合思路解决问题;
         yum -y install rsyslog

解决方案一:拆多个实例

 filebeat.inputs:
 - type: filestream
   enabled: true
   paths:
     - /var/log/secure
   tags: ["secure"]
 ​
 - type: filestream
   enabled: true
   paths:
     - /var/log/maillog
   tags: ["maillog"]
 ​
 - type: filestream
   enabled: true
   paths:
     - /var/log/yum.log
   tags: ["yum"]
 ​
 - type: filestream
   enabled: true
   paths:
     - /var/log/cron
   tags: ["cron"]
 ​
 ​
 ​
 output.elasticsearch:
   enabled: true
   hosts: ["http://192.168.1.10:9200","http://192.168.1.11:9200","http://192.168.1.12:9200"]
   indices:  
     - index: "cluster-elk-system-log-secure-%{+yyyy.MM.dd}"
       when.contains:
         tags: "secure"
     - index: "cluster-elk-system-log-maillog-%{+yyyy.MM.dd}"
       when.contains:
         tags: "maillog"
     - index: "cluster-elk-system-log-yum-%{+yyyy.MM.dd}"
       when.contains:
         tags: "yum"
     - index: "cluster-elk-system-log-firewalld-%{+yyyy.MM.dd}"
       when.contains:
         tags: "firewalld"
     - index: "cluster-elk-system-log-cron-%{+yyyy.MM.dd}"
       when.contains:
         tags: "cron"
     - index: "cluster-elk-system-log-messages-%{+yyyy.MM.dd}"
       when.contains:
         tags: "messages"
                 
 setup.ilm.enabled: false
 setup.template.name: "cluster-elk-system-log"
 setup.template.pattern: "cluster-elk-system-log*"
 setup.template.overwrite: true
 setup.template.settings:
   index.number_of_shards: 10
   index.number_of_replicas: 0
filebeat.inputs:
- type: filestream
  enabled: true
  paths:
    - /var/log/firewalld
  tags: ["firewalld"]
  
- type: filestream
  enabled: true
  paths:
    - /var/log/messages
  tags: ["messages"]

output.elasticsearch:
  enabled: true
  hosts: ["http://192.168.1.10:9200","http://192.168.1.11:9200","http://192.168.1.12:9200"]
  indices:  
    - index: "cluster-elk-system-log-firewalld-%{+yyyy.MM.dd}"
      when.contains:
        tags: "firewalld"
    - index: "cluster-elk-system-log-messages-%{+yyyy.MM.dd}"
      when.contains:
        tags: "messages"
        
setup.ilm.enabled: false
setup.template.name: "cluster-elk-system-log"
setup.template.pattern: "cluster-elk-system-log*"
setup.template.overwrite: true
setup.template.settings:
  index.number_of_shards: 10
  index.number_of_replicas: 0
# 运行两个会报错,因此在第二个加上参数
filebeat -e -c /etc/filebeat/config/systemLog1-to-es.yml
filebeat -e -c /etc/filebeat/config/systemLog2-to-es.yml --path.data /tmp/filebeat

解决方案二:日志聚合

(1)部署rsyslog服务:

yum -y install rsyslog

(2)修改rsyslog服务配置文件:

vim /etc/rsyslog.conf
...
# 打开TCP端口
$ModLoad imtcp
$iNputTCPServerRun 514
# 将所有日志重定向,由于打开了TCP端口,也可重定向到其他端口:@IP
*.* 		/var/log/cluster.log

(3)重启服务并测试

systemctl restart rsyslog
logger "test"

(4)使用filebeat收集日志

filebeat.inputs:
- type: filestream
  enabled: true
  paths:
    - /var/log/cluster.log
  tags: ["rsyslog"]

output.elasticsearch:
  enabled: true
  hosts: ["http://192.168.1.10:9200","http://192.168.1.11:9200","http://192.168.1.12:9200"]
  indices:  
    - index: "cluster-elk-system-log-rsyslog-%{+yyyy.MM.dd}"
      when.contains:
        tags: "rsyslog"
        
setup.ilm.enabled: false
setup.template.name: "cluster-elk-system-log"
setup.template.pattern: "cluster-elk-system-log*"
setup.template.overwrite: true
setup.template.settings:
  index.number_of_shards: 10
  index.number_of_replicas: 0
  • 10
    点赞
  • 9
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

小李学不完

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值