Elasticsearch

集群三种颜色:
#red:集群部分主分片为正常工作
#yellow:集群的部分副本分片为正常工作
#green:集群的主分片和副本分片正常工作

主分片和副本的区别:
#   主分片可以读写(rw)
#   副本分片只能读(ro)

一 官网下载

https://www.elastic.co/cn/downloads/elasticsearch

version :Elasticsearch 7.17.13
cd /usr/local/src
#tar包:
[root@es1 ~]# wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.17.13-linux-x86_64.tar.gz

#rpm包:
[root@es1 ~]# wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.17.13-x86_64.rpm


kibana下载:

#tar包:
[root@es1 src]# wget https://artifacts.elastic.co/downloads/kibana/kibana-7.17.13-linux-x86_64.tar.gz
#rpm包:
[root@es1 src]# wget https://artifacts.elastic.co/downloads/kibana/kibana-7.17.13-x86_64.rpm

filebate下载:filebate下载在10.39.156.1测试机上:
[root@web-2 /usr/local/src]#wget https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-7.17.13-x86_64.rpm

二 部署方式

1,单点部署
单点部署:
#rpm包安装部署:
[root@es1 src]# yum -y install elasticsearch-7.17.13-x86_64.rpm

#配置文件
[root@es1 src]# rpm -ql elasticsearch | grep  -E *.yml
/etc/elasticsearch/elasticsearch-plugins.example.yml
/etc/elasticsearch/elasticsearch.yml
/etc/elasticsearch/role_mapping.yml
/etc/elasticsearch/roles.yml


#查看service文件
[root@es1 src]# systemctl cat elasticsearch.service

#查看home目录,内置jdk环境:
[root@es1 src]# ll /usr/share/elasticsearch
total 648
drwxr-xr-x  2 root root   4096 Sep 30 18:23 bin
drwxr-xr-x  8 root root     96 Sep 30 18:23 jdk
drwxr-xr-x  3 root root   4096 Sep 30 18:23 lib
-rw-r--r--  1 root root   3860 Sep  1 01:32 LICENSE.txt
drwxr-xr-x 62 root root   4096 Sep 30 18:23 modules
-rw-r--r--  1 root root 642830 Sep  1 01:34 NOTICE.txt
drwxr-xr-x  2 root root      6 Sep  1 01:38 plugins
-rw-r--r--  1 root root   2710 Sep  1 01:32 README.asciidoc

#查看java版本:
[root@es1 src]# /usr/share/elasticsearch/jdk/bin/java -version
openjdk version "20.0.2" 2023-07-18
OpenJDK Runtime Environment (build 20.0.2+9-78)
OpenJDK 64-Bit Server VM (build 20.0.2+9-78, mixed mode, sharing)

#查看状态并启动
[root@es1 src]# systemctl status elasticsearch.service
[root@es1 src]# systemctl start elasticsearch.service
[root@es1 src]# ss -tnl
LISTEN      0      128        [::ffff:127.0.0.1]:9200      [::]:*                  
LISTEN      0      128        [::1]:9200                   [::]:*                  
LISTEN      0      128        [::ffff:127.0.0.1]:9300      [::]:*                  
LISTEN      0      128        [::1]:9300                   [::]:* 

#注:9200集群外部提供访问端口,http协议,9300集群各节点通讯端口,使用tcp协议

#访问测试:
[root@es1 src]# curl 127.0.0.1:9200 未更改配置文件之前,需对本机进行curl测试访问,如果使用web页面访问,要关闭防火墙或者在防火墙上添加相对应端口号
{
  "name" : "es1.novalocal",
  "cluster_name" : "elasticsearch",
  "cluster_uuid" : "8RtFf3Q9R_yoD0aPhL1c5g",
  "version" : {
    "number" : "7.17.13",
    "build_flavor" : "default",
    "build_type" : "rpm",
    "build_hash" : "2b211dbb8bfdecaf7f5b44d356bdfe54b1050c13",
    "build_date" : "2023-08-31T17:33:19.958690787Z",
    "build_snapshot" : false,
    "lucene_version" : "8.11.1",
    "minimum_wire_compatibility_version" : "6.8.0",
    "minimum_index_compatibility_version" : "6.0.0-beta1"
  },
  "tagline" : "You Know, for Search"
}

#单机版,修改配置文件:
[root@es1 elasticsearch]# grep -Env "^#|^$" elasticsearch.yml 
18:cluster.name: my-application
25:node.name: node-1
35:path.data: /var/lib/elasticsearch
39:path.logs: /var/log/elasticsearch
59:network.host: 0.0.0.0
74:discovery.seed_hosts: ["10.39.156.6"]

#重启并查看日志:
[root@es1 elasticsearch]# systemctl restart elasticsearch.service
[root@es1 elasticsearch]# ss -tnl
[root@es1 elasticsearch]# tail  -f  /var/log/elasticsearch/elasticsearch.log


#访问测试:
[root@es1 elasticsearch]# curl 10.39.156.6:9200
{
  "name" : "node-1",
  "cluster_name" : "my-application",
  "cluster_uuid" : "8RtFf3Q9R_yoD0aPhL1c5g",
  "version" : {
    "number" : "7.17.13",
    "build_flavor" : "default",
    "build_type" : "rpm",
    "build_hash" : "2b211dbb8bfdecaf7f5b44d356bdfe54b1050c13",
    "build_date" : "2023-08-31T17:33:19.958690787Z",
    "build_snapshot" : false,
    "lucene_version" : "8.11.1",
    "minimum_wire_compatibility_version" : "6.8.0",
    "minimum_index_compatibility_version" : "6.0.0-beta1"
  },
  "tagline" : "You Know, for Search"
}
二,集群部署
集群部署:
基于单点部署基础上,部署集群:

#在另两台机器上rpm方式安装:
[root@es2 src]# yum -y install elasticsearch-7.17.13-x86_64.rpm

#修改配置文件,01节点:
[root@es1 elasticsearch]# grep -Env "^#|^$" elasticsearch.yml
18:cluster.name: my-application
25:node.name: node-1
35:path.data: /var/lib/elasticsearch
39:path.logs: /var/log/elasticsearch
59:network.host: 0.0.0.0
74:discovery.seed_hosts: ["10.39.156.6","10.39.156.7","10.39.156.8"]
79:cluster.initial_master_nodes: ["10.39.156.6","10.39.156.7","10.39.156.8"]
102:ingest.geoip.downloader.enabled: false


#02节点:
[root@es2 elasticsearch]# grep -Env "^#|^$" elasticsearch.yml
18:cluster.name: my-application
24:node.name: node-2
34:path.data: /var/lib/elasticsearch
38:path.logs: /var/log/elasticsearch
58:network.host: 0.0.0.0
73:discovery.seed_hosts: ["10.39.156.6","10.39.156.7","10.39.156.8"]
78:cluster.initial_master_nodes: ["10.39.156.6","10.39.156.7","10.39.156.8"]


#03节点:
[root@es3 elasticsearch]# grep -Env "^#|^$" elasticsearch.yml 
18:cluster.name: my-application
25:node.name: node-3
35:path.data: /var/lib/elasticsearch
39:path.logs: /var/log/elasticsearch
59:network.host: 0.0.0.0
74:discovery.seed_hosts: ["10.39.156.6","10.39.156.7","10.39.156.8"]
79:cluster.initial_master_nodes: ["10.39.156.6","10.39.156.7","10.39.156.8"]


#也可清楚集群产生的目录文件,以免造成影响:
rm -rf /var/{log,lib}/elasticsearch/*  /tmp/有关elasticsearch的目录文件,之后启动集群,并查看状态

#启动集群式查看集群日志:


#查看集群状态:集群任意节点成功显示:
[root@es1 elasticsearch]# curl 10.39.156.8:9200/_cat/nodes
192.168.5.103 1 57 0 0.00 0.01 0.05 cdfhilmrstw - node-2
192.168.5.111 1 57 0 0.00 0.04 0.06 cdfhilmrstw - node-3
192.168.5.145 3 58 0 0.00 0.01 0.07 cdfhilmrstw * node-1

三 kibana部署安装

#rpm 部署:安装在一个节点就可
[root@es1 src]# yum -y install kibana-7.17.13-x86_64.rpm

#修改配置文件:
[root@es1 kibana]# grep -Env "^#|^$" kibana.yml 
8:server.host: "0.0.0.0"
34:elasticsearch.hosts: ["http://10.39.156.6:9200","http://10.39.156.7:9200","http://10.39.156.8:9200"]
118:i18n.locale: "zh-CN"

server.name: "your-hostname"可配置为es的集群名称,也可不进行更改

#启动服务查看状态:
[root@es1 kibana]# systemctl start kibana.service
[root@es1 kibana]# systemctl status kibana.service   or   [root@es1 kibana]# journalctl -u kibana.service 

#查看日志:
[root@es1 kibana]# tail -100f /var/log/kibana/kibana.log 
#查看端口:
[root@es1 kibana]# ss -tnulp
tcp   LISTEN     0      128       *:5601         *:*                   users:(("node",pid=25130,fd=72))

#注意添加防火墙:


#web页面访问:
http://10.39.156.6:5601

四 filebeat部署

#filebate下载于10.39.156.1测试机上,部署在需要采集日志的主机上:
filebate下载:filebate下载在10.39.156.1测试机上:
[root@web-2 /usr/local/src]#wget https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-7.17.13-x86_64.rpm

#安装
[root@web-2 /usr/local/src]#yum -y install filebeat-7.17.13-x86_64.rpm

[root@web-2 /usr/local/src]#filebeat -h

#仓库
[root@app-test-1 ~]# ll /var/lib/filebeat/registry/filebeat


#查看配置文件:
[root@web-2 /usr/local/src]#grep -Env "^*#|^$" /etc/filebeat/filebeat.yml
15:filebeat.inputs:
22:- type: filestream
25:  id: my-filestream-id
28:  enabled: false
31:  paths:
32:    - /var/log/*.log
55:filebeat.config.modules:
57:  path: ${path.config}/modules.d/*.yml
60:  reload.enabled: false
67:setup.template.settings:
68:  index.number_of_shards: 1
104:setup.kibana:
135:output.elasticsearch:
137:  hosts: ["localhost:9200"]
163:processors:
164:  - add_host_metadata:
165:      when.not.contains.tags: forwarded
166:  - add_cloud_metadata: ~
167:  - add_docker_metadata: ~
168:  - add_kubernetes_metadata: ~



#配置文件配置说明:

#测试1
stdin and stdout  #标准输入和标准输出

cat 01-stdin-to-console.ymll

# 指定输入的类型
filebeat.inputs:
# 指定输入的类型为"stdin",表示标准输入
- type: stdin
# 指定输出的类型,此处为控制台 
output.console:
# 打印漂亮的格式
  pretty: true





#测试2
输出到es

# 指定输入的类型
filebeat.inputs:
# 指定输入的类型为"stdin",表示标准输入
- type: stdin
# 指定输出的类型 
#output.console:
# 打印漂亮的格式
#  pretty: true
output.elasticsearch:
  hosts: ["http://es01 or IP:9200","http://es02 or IP:9200","http://es03 or IP:9200"]


#测试3
input log 类型

filebeat.inputs:
- type: log
  paths:
    - /tmp/test.log
output.console:
  pretty: true



#测试4
通配符log类型

filebeat.inputs:
- type: log
  paths:
    - /tmp/test.log
    - /tmp/*.txt
output.console:
  pretty: true



#测试5
添加fielt和tag

filebeat.inputs:
- type: log
# 是否启动当前的输入类型,默认值为true 
  enabled: true
# 指定数据路径
  paths:
    - /tmp/test.log
    - /tmp/*.txt
# 给当前的输入类型搭上标签
  tags: ["oldboyedu-linux80","容器运维","DBA运维","SRE运维工程师"] # 自定义字段
  fields:
    school: "清华学院" 
    class: "信息办"
- type: log
  enabled: true
  paths:
    - /tmp/test/*/*.log
  tags: ["oldboyedu-python","云原生开发"] 
  fields:
    name: "oldboy"
    hobby: "linux,抖音"

# 将自定义字段的key-value放到顶级字段.
# 默认值为false,会将数据放在一个叫"fields"字段的下面. 
  fields_under_root: true ##默认为false

output.console:
  pretty: true



#测试6
过滤字段

filebeat.inputs:
- type: log
  enabled: true
  paths:
    - /tmp/test/*.log
# 注意,黑白名单均支持通配符,生产环节中不建议同时使用, # 指定白名单,包含指定的内容才会采集,且区分大小写! 
  include_lines: ['^ERR', '^WARN','oldboyedu']
# 指定黑名单,排除指定的内容
  exclude_lines: ['^DBG',"linux","oldboyedu"]     #都指定是exclude 生效
output.console:
  pretty: true



#测试7
将数据写入es

filebeat.inputs:
- type: log
  enabled: true
  paths:
    - /tmp/test.log
    - /tmp/*.txt
  tags: ["oldboyedu-linux80","容器运维","DBA运维","SRE运维工程师"] 
  fields:
    school: "北京昌平区沙河镇" 
    class: "linux80"
- type: log
  enabled: true
  paths:
    - /tmp/test/*/*.log
  tags: ["oldboyedu-python","云原生开发"] 
  fields:
    name: "oldboy"
    hobby: "linux,抖音" 
  fields_under_root: true

output.elasticsearch:
  hosts: ["http://es01 or IP:9200","http://es02 or IP:9200","http://es03 or IP:9200"]



#测试8
自定义索引名称

filebeat.inputs:
- type: log
  enabled: true
  paths:
    - /tmp/test.log
    - /tmp/*.txt
  tags: ["oldboyedu-linux80","容器运维","DBA运维","SRE运维工程师"] 
  fields:
    school: "北京昌平区沙河镇" 
    class: "linux80"
- type: log
  enabled: true
  paths:
    - /tmp/test/*/*.log
  tags: ["oldboyedu-python","云原生开发"] 
  fields:
    name: "oldboy"
    hobby: "linux,抖音" 
  fields_under_root: true

output.elasticsearch:
  enable: true
  hosts: ["http://es01:9200","http://es02:9200","http://es03:9200"]
  index: "oldboyedu-linux-elk-%{+yyyy.MM.dd}"
  # 禁用索引生命周期管理
setup.ilm.enabled: false
  # 设置索引模板的名称
setup.template.name: "oldboyedu-linux"
  # 设置索引模板的匹配模式 
setup.template.pattern: "oldboyedu-linux*"



#测试9
多个索引写入案例

filebeat.inputs:
- type: log
  enabled: true
  paths:
    - /tmp/test.log
    - /tmp/*.txt
  tags: ["oldboyedu-linux80","容器运维","DBA运维","SRE运维工程师"] 
  fields:
    school: "北京昌平区沙河镇" 
    class: "linux80"
- type: log
  enabled: true
  paths:
    - /tmp/test/*/*.log
  tags: ["oldboyedu-python","云原生开发"] 
  fields:
    name: "oldboy"
    hobby: "linux,抖音" 
  fields_under_root: true

output.elasticsearch:
  enable: true
  hosts: ["http://es01:9200","http://es02:9200","http://es03:9200"]
  indices:
    - index: "oldboyedu-linux-elk-%{+yyyy.MM.dd}" # 匹配指定字段包含的内容
      when.contains:
        tags: "oldboyedu-linux80"
    - index: "oldboyedu-linux-python-%{+yyyy.MM.dd}"
      when.contains:
        tags: "oldboyedu-python"
  # 禁用索引生命周期管理
setup.ilm.enabled: false
  # 设置索引模板的名称
setup.template.name: "oldboyedu-linux"
  # 设置索引模板的匹配模式 
setup.template.pattern: "oldboyedu-linux*"



#测试10
自定义分片和副本

filebeat.inputs:
- type: log
  enabled: true
  paths:
    - /tmp/test.log
    - /tmp/*.txt
  tags: ["oldboyedu-linux80","容器运维","DBA运维","SRE运维工程师"] 
  fields:
    school: "北京昌平区沙河镇" 
    class: "linux80"
- type: log
  enabled: true
  paths:
    - /tmp/test/*/*.log
  tags: ["oldboyedu-python","云原生开发"] 
  fields:
    name: "oldboy"
    hobby: "linux,抖音" 
  fields_under_root: true

output.elasticsearch:
  enable: true
  hosts: ["http://es01:9200","http://es02:9200","http://es03:9200"]
  indices:
    - index: "oldboyedu-linux-elk-%{+yyyy.MM.dd}" # 匹配指定字段包含的内容
      when.contains:
        tags: "oldboyedu-linux80"
    - index: "oldboyedu-linux-python-%{+yyyy.MM.dd}"
      when.contains:
        tags: "oldboyedu-python"
  # 禁用索引生命周期管理
setup.ilm.enabled: false
  # 设置索引模板的名称
setup.template.name: "oldboyedu-linux"
  # 设置索引模板的匹配模式 
setup.template.pattern: "oldboyedu-linux*"

# 覆盖已有的索引模板 
setup.template.overwrite: true
# 配置索引模板
setup.template.settings:
# 设置分片数量 
  index.number_of_shards: 10
# 设置副本数量,要求小于集群的数量 
  index.number_of_replicas: 2



#测试11
实现日志聚合到本地

filebeat.inputs:
- type: tcp
  host: "0.0.0.0:9000"
output.file:
  path: "/tmp/filebeat"
  filename: oldboyedu-linux80
# 指定文件的滚动大小,默认值为20MB 
  rotate_every_kb: 102400
# 指定保存的文件个数,默认是7个,有效值为2-1024个 
  number_of_files: 300
# 指定文件的权限,默认权限是0600
  permissions: 0600



#测试12
tcp 收集数据

filebeat.inputs:
- type: tcp
  host: "0.0.0.0:9000"
  tags: ["aaa"]
- type: tcp
  host: "0.0.0.0:8000"
  tags: ["bbb"]
output.elasticsearch:
  enabled: true
  hosts: ["http://es01:9200","http://es02:9200","http://es03:9200"]
  indices:
    - index: "oldboyedu-linux80-elk-aaa-%{+yyyy.MM.dd}"
      when.contains:
        tags: "aaa"
    - index: "oldboyedu-linux80-elk-bbb-%{+yyyy.MM.dd}"
      when.contains:
        tags: "bbb"
setup.ilm.enabled: false
setup.template.name: "oldboyedu-linux80-elk"
setup.template.pattern: "oldboyedu-linux80-elk*"
setup.template.overwrite: true
setup.template.settings:
  index.number_of_shards: 3
  index.number_of_replicas: 0

五 ELFK企业实战案例

部署nginx服务

# 配置nginx的软件源
cat > /etc/yum.repos.d/nginx.repo <<'EOF'
[nginx-stable]
name=nginx stable repo 
baseurl=http://nginx.org/packages/centos/$releasever/$basearch/ 
gpgcheck=1
enabled=1
gpgkey=https://nginx.org/keys/nginx_signing.key module_hotfixes=true
[nginx-mainline]
name=nginx mainline repo
baseurl=http://nginx.org/packages/mainline/centos/$releasever/$basearch/
gpgcheck=1
enabled=0
gpgkey=https://nginx.org/keys/nginx_signing.key
module_hotfixes=true
EOF

# 安装并启动nginx服务
yum -y install nginx
systemctl start nginx
# 访问
在浏览器中打开 
http://192.168.8.21/

基于log 收集nginx原生日志

filebeat.inputs:
- type: log
  enabled: true
  paths:
    - /var/log/nginx/access.log*
  tags: ["access"]
output.elasticsearch:
  enabled: true
  hosts: ["http://es01:9200","http://es02:9200","http://es03:9200"]
  index: "oldboyedu-linux-nginx-%{+yyyy.MM.dd}"
# 禁用索引生命周期管理
setup.ilm.enabled: false
# 设置索引模板的名称
setup.template.name: "oldboyedu-linux"
# 设置索引模板的匹配模式
setup.template.pattern: "oldboyedu-linux*"
# 覆盖已有的索引模板,如果为true,则会直接覆盖现有的索引模板,如果为false则不覆盖!
setup.template.overwrite: true
# 配置索引模板
setup.template.settings:
# 设置分片数量
  index.number_of_shards: 3
# 设置副本数量,要求小于集群的数量
  index.number_of_replicas: 0

基于log 类型收集nginx的jason日志

1. 修改nginx 原生日志格式 
vi /etc/nginx/nginx.conf
...

#    log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
#                      '$status $body_bytes_sent "$http_referer" '
#                      '"$http_user_agent" "$http_x_forwarded_for"';
#
#    access_log  /var/log/nginx/access.log  main;


     log_format oldboyedu_nginx_json '{"@timestamp":"$time_iso8601",'
                              '"host":"$server_addr",'
                              '"clientip":"$remote_addr",'
                              '"SendBytes":$body_bytes_sent,'
                              '"responsetime":$request_time,'
                              '"upstreamtime":"$upstream_response_time",'
                              '"upstreamhost":"$upstream_addr",'
                              '"http_host":"$host",'
                              '"uri":"$uri",'
                              '"domain":"$host",'
                              '"xff":"$http_x_forwarded_for",'
                              '"referer":"$http_referer",'
                              '"tcp_xff":"$proxy_protocol_addr",'
                              '"http_user_agent":"$http_user_agent",'
                              '"status":"$status"}';
    access_log  /var/log/nginx/access.log  oldboyedu_nginx_json;


2. 配置filebeat 配置文件nginxjason格式,并吐到logstash 

filebeat.inputs:
- type: log
  enabled: true
  paths:
    - /var/log/nginx/access.log*
  tags: ["access"]
# 以JSON格式解析message字段的内容(添加至顶级字段,实现日志拆分)
  json.keys_under_root: true

output.elasticsearch:
  enabled: true
  hosts: ["http://es01:9200","http://es02:9200","http://es03:9200"]
  index: "oldboyedu-linux-nginx-access-%{+yyyy.MM.dd}"
# 禁用索引生命周期管理
setup.ilm.enabled: false
# 设置索引模板的名称
setup.template.name: "oldboyedu-linux"
# 设置索引模板的匹配模式
setup.template.pattern: "oldboyedu-linux*"
# 覆盖已有的索引模板,如果为true,则会直接覆盖现有的索引模板,如果为false则不覆盖!
setup.template.overwrite: true
# 配置索引模板
setup.template.settings:
# 设置分片数量
  index.number_of_shards: 6
# 设置副本数量,要求小于集群的数量
  index.number_of_replicas: 1






基于module收集nginx日志文件

# 列出模块
filebeat modules list
# 启动模块
filebeat modules enable nginx
# 禁用模块 
filebeat modules enable nginx

注:所有模块在这个目录下面
 /etc/filebeat/modules.d/
##
## filebeat 配置文件 
filebeat.config.modules:
#指定模块的配置文件路径,如果是yum方式安装,在7.17.3版本中不能使用如下的默认值。
#  path: ${path.config}/modules.d/*.yml
# # #

###更改 vim /etc/filebeat/modules.d/nginx.yml
3:
4:- module: nginx
6:  access:
7:    enabled: true
8:  var.paths:“/var/log/nginx/access.log”
12:
14:  error:
15:    enabled: true
16:  var.paths:“/var/log/nginx/error.log”
20:
22:  ingress_controller: #k8s中使用
23:    enabled: false

###vim /etc/filebeat/filebeat.yml
  path: /etc/filebeat/modules.d/*.yml # 开启热加载功能
  reload.enabled: true

output.elasticsearch:
  enabled: true
  hosts: ["http://es01:9200","http://es02:9200","http://es03:9200"]
  index: "oldboyedu-linux-nginx-access-%{+yyyy.MM.dd}"
# 禁用索引生命周期管理
setup.ilm.enabled: false
# 设置索引模板的名称
setup.template.name: "oldboyedu-linux"
# 设置索引模板的匹配模式
setup.template.pattern: "oldboyedu-linux*"
# 覆盖已有的索引模板,如果为true,则会直接覆盖现有的索引模板,如果为false则不覆盖!
setup.template.overwrite: true
# 配置索引模板
setup.template.settings:
# 设置分片数量
  index.number_of_shards: 3
# 设置副本数量,要求小于集群的数量
  index.number_of_replicas: 0



基于modules采集tomcat日志文件
部署tomcat服务

1.下载并解压软件包 
wget https://dlcdn.apache.org/tomcat/tomcat-10/v10.1.7/bin/apache-tomcat-10.1.7.zip --no-check-certificate
tar xf apache-tomcat-10.1.7.tar.gz -C /lifei/soft/

2. 创建符号链接
cd /lifei/soft/
ln -sv apache-tomcat-10.1.7/ tomcat
3. 配置环境变量
[root@es02 soft]# cat /etc/profile.d/elk.sh
export JAVA_HOME=/usr/share/elasticsearch/jdk
export TOMCAT_HOME=/lifei/soft/tomcat
export PATH=$PATH:$TOMCAT_HOME/bin:$JAVA_HOME/bin
4. 使环境变量生效
source /etc/profile.d/elk.sh
5. 启动服务 
catalina.sh start

修改模块内容


cat /etc/filebeat/modules.d/tomcat.yml #只显示注释的内容 
- module: tomcat
  log:
    enabled: true
    var.input: file
    var.paths:
      - /lifei/soft/tomcat/logs/localhost_access_log*.txt

启动模块

filebeat modules enable tomcat
filebeat配置文件


filebeat.config.modules:
#指定模块的配置文件路径,如果是yum方式安装,在7.17.3版本中不能使用如下的默认值。
#path: ${path.config}/modules.d/*.yml
# # #
  path: /etc/filebeat/modules.d/*.yml
# 开启热加载功能
  reload.enabled: true
output.elasticsearch:
  enabled: true
  hosts: ["http://es01:9200","http://es02:9200","http://es03:9200"]
  index: "oldboyedu-linux-tomcat-access-%{+yyyy.MM.dd}"
# 禁用索引生命周期管理
setup.ilm.enabled: false
# 设置索引模板的名称
setup.template.name: "oldboyedu-linux"
# 设置索引模板的匹配模式
setup.template.pattern: "oldboyedu-linux*"
# 覆盖已有的索引模板,如果为true,则会直接覆盖现有的索引模板,如果为false则不覆盖!
setup.template.overwrite: true
# 配置索引模板
setup.template.settings:
# 设置分片数量
  index.number_of_shards: 3
# 设置副本数量,要求小于集群的数量
  index.number_of_replicas: 0

tomcat 原生日志收集

cat /root/config-filebeat/19-tomcatyuansheng.yml


filebeat.inputs:
- type: log
  enabled: true
  paths:
    - /lifei/soft/tomcat/logs/localhost_access_log*.txt
output.elasticsearch:
  enabled: true
  hosts: ["http://es01:9200","http://es02:9200","http://es01:9200"]
  index: "oldboyedu-linux-tomcat-access-%{+yyyy.MM.dd}"
# 禁用索引生命周期管理
setup.ilm.enabled: false
# 设置索引模板的名称
setup.template.name: "oldboyedu-linux"
# 设置索引模板的匹配模式
setup.template.pattern: "oldboyedu-linux*"
# 覆盖已有的索引模板,如果为true,则会直接覆盖现有的索引模板,如果为false则不覆盖!
setup.template.overwrite: true
# 配置索引模板
setup.template.settings:
# 设置分片数量
  index.number_of_shards: 3
# 设置副本数量,要求小于集群的数量
  index.number_of_replicas: 0

基于log类似收集tomcat jason格式日志
备份原配置文件

cp /lifei/soft/tomcat/conf/{server.xml,server.xml-date +%F}
修改配置文件

下面我注释的为原配置,没有注释的为新增配置。实际需要直接删除,xml 格式注释的不对

#     <Host name="localhost"  appBase="webapps"
#           unpackWARs="true" autoDeploy="true">

#       <!-- SingleSignOn valve, share authentication between web applications
#            Documentation at: /docs/config/valve.html -->
#       <!--
#       <Valve className="org.apache.catalina.authenticator.SingleSignOn" />
#       -->

#       <!-- Access log processes all example.
#            Documentation at: /docs/config/valve.html
#            Note: The pattern used is equivalent to using pattern="common" -->
#       <Valve className="org.apache.catalina.valves.AccessLogValve" directory="logs"
#              prefix="localhost_access_log" suffix=".txt"
#              pattern="%h %l %u %t &quot;%r&quot; %s %b" />

#     </Host>
       <Host name="tomcat.oldboyedu.com"  appBase="webapps"
                unpackWARs="true" autoDeploy="true">
        <Valve className="org.apache.catalina.valves.AccessLogValve"
directory="logs"
            prefix="tomcat.oldboyedu.com_access_log" suffix=".txt"
pattern="
{&quot;clientip&quot;:&quot;%h&quot;,&quot;ClientUser&quot;:&quot;%l&quo
t;,&quot;authenticated&quot;:&quot;%u&quot;,&quot;AccessTime&quot;:&quot
;%t&quot;,&quot;request&quot;:&quot;%r&quot;,&quot;status&quot;:&quot;%s
&quot;,&quot;SendBytes&quot;:&quot;%b&quot;,&quot;Query?
string&quot;:&quot;%q&quot;,&quot;partner&quot;:&quot;%
{Referer}i&quot;,&quot;http_user_agent&quot;:&quot;%{User-
Agent}i&quot;}"/>
</Host>

重启tomcat配置

catalina.sh stop ; catalina.sh start
filebeta 配置文件

cat /root/config-filebeat/20-comcatjson.yml

filebeat.inputs:
- type: log
  enabled: true
  paths:
    - /lifei/soft/tomcat/logs/tomcat.oldboyedu.com_access_log.2023-03-23.txt
  # 解析message字段的json格式,并放在顶级字段中
  json.keys_under_root: true
output.elasticsearch:
  enabled: true
  hosts: ["http://es01:9200","http://es02:9200","http://es03:9200"]
  index: "oldboyedu-linux-tomcat-access-%{+yyyy.MM.dd}"
# 禁用索引生命周期管理
setup.ilm.enabled: false
# 设置索引模板的名称
setup.template.name: "oldboyedu-linux"
# 设置索引模板的匹配模式
setup.template.pattern: "oldboyedu-linux*"
# 覆盖已有的索引模板,如果为true,则会直接覆盖现有的索引模板,如果为false则不覆盖!
setup.template.overwrite: true
# 配置索引模板
setup.template.settings:
# 设置分片数量
  index.number_of_shards: 3
# 设置副本数量,要求小于集群的数量
  index.number_of_replicas: 1


#多行匹配收集tomcat的错误日志
cat config-filebeat/21-tomcat-multiline.yml

filebeat.inputs:
- type: log
  enabled: true
  paths:
    - /lifei/soft/tomcat/logs/*.out
# 指定多行匹配的类型,可选值为"pattern","count"
  multiline.type: pattern
# 指定匹配模式
  multiline.pattern: '^\d{2}'
# 下面2个参数参考官方架构图即可,如上图所示。
  multiline.negate: true
  multiline.match: after
output.elasticsearch:
  enabled: true
  hosts: ["http://es01:9200","http://es02:9200","http://es03:9200"]
  index: "oldboyedu-linux-tomcat-error-%{+yyyy.MM.dd}"
# 禁用索引生命周期管理
setup.ilm.enabled: false
# 设置索引模板的名称
setup.template.name: "oldboyedu-linux"
# 设置索引模板的匹配模式
setup.template.pattern: "oldboyedu-linux*"
# 覆盖已有的索引模板,如果为true,则会直接覆盖现有的索引模板,如果为false则不覆盖!
setup.template.overwrite: true
# 配置索引模板
setup.template.settings:
# 设置分片数量
  index.number_of_shards: 3
# 设置副本数量,要求小于集群的数量
  index.number_of_replicas: 0

nginx 错误日志过滤

filebeat.inputs:
  - type: log
    enabled: true
    paths:
      - /var/log/nginx/access.log*
    tags: ["access"]
# 解析message字段的json格式,并放在顶级字段中
    json.keys_under_root: true
  - type: log
    enabled: true
    paths:
      - /var/log/nginx/error.log*
    tags: ["error"]
    include_lines: ['\[error\]']
output.elasticsearch:
  enabled: true
  hosts: ["http://es01:9200","http://es02:9200","http://es03:9200"]
  #
  indices:
    - index: "oldboyedu-linux-web-nginx-access-%{+yyyy.MM.dd}"
# 匹配指定字段包含的内容
      when.contains:
        tags: "access"
    - index: "oldboyedu-linux-web-nginx-error-%{+yyyy.MM.dd}"
      when.contains:
        tags: "error"
# 禁用索引生命周期管理
setup.ilm.enabled: false
# 设置索引模板的名称
setup.template.name: "oldboyedu-linux"
# 设置索引模板的匹配模式 setup.template.pattern: "oldboyedu-linux*" # 覆盖已有的索引模板 setup.template.overwrite: true
# 配置索引模板
setup.template.settings:
# 设置分片数量
  index.number_of_shards: 3
# 设置副本数量,要求小于集群的数量
  index.number_of_replicas: 0

nginx和tomcat同时采集案例

filebeat.inputs:
  - type: log
    enabled: true
    paths:
      - /var/log/nginx/access.log*
    tags: ["nginx-access"]
    json.keys_under_root: true
  - type: log
    enabled: true
    paths:
      - /var/log/nginx/error.log*
    tags: ["nginx-error"]
    include_lines: ['\[error\]']
  - type: log
    enabled: true
    paths:
      - /lifei/soft/apache-tomcat-10.1.7/logs/*.txt
#    json.keys_under_root: true
    tags: ["tomcat-access"]
  - type: log
    enabled: true
    paths:
      - /lifei/soft/apache-tomcat-10.1.7/logs/*.out
    multiline.type: pattern
    multiline.pattern: '^\d{2}'
    multiline.negate: true
    multiline.match: after
    tags: ["tomcat-error"]
output.elasticsearch:
  enabled: true
  hosts: ["http://es01:9200","http://es02:9200","http://es03:9200"]
  indices:
    - index: "oldboyedu-linux-web-nginx-access-%{+yyyy.MM.dd}"
      when.contains:
        tags: "nginx-access"
    - index: "oldboyedu-linux-web-nginx-error-%{+yyyy.MM.dd}"
      when.contains:
        tags: "nginx-error"
    - index: "oldboyedu-linux-web-tomcat-access-%{+yyyy.MM.dd}"
      when.contains:
        tags: "tomcat-access"
    - index: "oldboyedu-linux-web-tomcat-error-%{+yyyy.MM.dd}"
      when.contains:
        tags: "tomcat-error"
# 禁用索引生命周期管理
setup.ilm.enabled: false
# 设置索引模板的名称
setup.template.name: "oldboyedu-linux"
# 设置索引模板的匹配模式
setup.template.pattern: "oldboyedu-linux*" # 覆盖已有的索引模板
setup.template.overwrite: true
# 配置索引模板
setup.template.settings:
# 设置分片数量
  index.number_of_shards: 3
# 设置副本数量,要求小于集群的数量
  index.number_of_replicas: 0











# filestream 多行匹配及基于json格式解析

filebeat.inputs:
- type: filestream
  enabled: true
  paths:
    - /lifei/soft/tomcat/logs/*.txt
  tags: ["access"]
  parsers:
   - ndjson:
       keys_under_root: true
- type: filestream
  enabled: true
  paths:
    - /lifei/soft/tomcat/logs/*.out
  tags: ["error"]
  parsers:
   - multiline:
       type: pattern
       pattern: '^\d{2}'
       negate: true
       match: after
output.elasticsearch:
  enabled: true
  hosts: ["http://es01:9200","http://es02:9200","http://es03:9200"]
  indices:
    - index: "oldboyedu-linux-web-tomcat-access-%{+yyyy.MM.dd}"
      when.contains:
        tags: "access"
    - index: "oldboyedu-linux-web-tomcat-error-%{+yyyy.MM.dd}"
      when.contains:
        tags: "error"
setup.ilm.enabled: false
setup.template.name: "oldboyedu-linux"
setup.template.pattern: "oldboyedu-linux*"
setup.template.overwrite: true
setup.template.settings:
  index.number_of_shards: 3
  index.number_of_replicas: 0










收集日志到redis服务
部署redis服务

# 安装 
yum -y install epel-release
yum -y install redis

# 修改配置文件 

 vim  /etc/redis.conf
bind 0.0.0.0
requirepass lifei

# 启动 
systemctl start redis
# 连接
redis-cli -a lifei -h 192.168.8.21 -p 6379 --raw -n 5 #连接的5号数据库
192.168.8.21:6379[5]> KEYS *

192.168.8.21:6379[5]> set zhaobo lifei
OK

192.168.8.21:6379[5]> keys *
zhaobo
192.168.8.21:6379[5]> key zhaobo

192.168.8.21:6379[5]> get zhaobo
lifei
192.168.8.21:6379[5]> keys *
zhaobo

filebeat配置文件

filebeat.inputs:
- type: tcp
  host: "0.0.0.0:9000"
output.redis:
# 写入redis的主机地址
hosts: ["10.0.0.101:6379"]
# 指定redis的认证口令
password: "oldboyedu"
# 指定连接数据库的编号
db: 5
# 指定的key值
key: "oldboyedu-linux80-filebeat" # 规定超时时间.
timeout: 3

测试

# 写入数据:
echo 33333333333333333333| nc 10.0.0.102 9000
# 查看数据:
[root@elk103.oldboyedu.com ~]# redis-cli -a oldboyedu -h 10.0.0.101 -p 6379 --raw -n 5
.....
10.0.0.101:6379[5]> LRANGE oldboyedu-linux80-filebeat 0 -1
{"@timestamp":"2023-03-24T05:57:55.045Z","@metadata":{"beat":"filebeat","type":"_doc","version":"7.17.3"},"agent":{"version":"7.17.3","hostname":"es02","ephemeral_id":"2bda90b2-70d7-45ea-8c8d-9f9783ac7d7b","id":"ae8aa42f-2d2c-454d-a3b1-be6b98e25057","name":"es02","type":"filebeat"},"message":"hahahaha","log":{"source":{"address":"192.168.8.11:59878"}},"input":{"type":"tcp"},"ecs":{"version":"1.12.0"},"host":{"name":"es02"}}
192.168.8.21:6379[5]> LRANGE oldboyedu-linux80-filebeat 0 -1
{"@timestamp":"2023-03-24T05:57:55.045Z","@metadata":{"beat":"filebeat","type":"_doc","version":"7.17.3"},"agent":{"version":"7.17.3","hostname":"es02","ephemeral_id":"2bda90b2-70d7-45ea-8c8d-9f9783ac7d7b","id":"ae8aa42f-2d2c-454d-a3b1-be6b98e25057","name":"es02","type":"filebeat"},"message":"hahahaha","log":{"source":{"address":"192.168.8.11:59878"}},"input":{"type":"tcp"},"ecs":{"version":"1.12.0"},"host":{"name":"es02"}}
{"@timestamp":"2023-03-24T05:59:38.315Z","@metadata":{"beat":"filebeat","type":"_doc","version":"7.17.3"},"input":{"type":"tcp"},"ecs":{"version":"1.12.0"},"host":{"name":"es02"},"agent":{"ephemeral_id":"2bda90b2-70d7-45ea-8c8d-9f9783ac7d7b","id":"ae8aa42f-2d2c-454d-a3b1-be6b98e25057","name":"es02","type":"filebeat","version":"7.17.3","hostname":"es02"},"log":{"source":{"address":"192.168.8.11:59880"}},"message":"hahahaha"}
{"@timestamp":"2023-03-24T05:59:39.238Z","@metadata":{"beat":"filebeat","type":"_doc","version":"7.17.3"},"input":{"type":"tcp"},"host":{"name":"es02"},"agent":{"hostname":"es02","ephemeral_id":"2bda90b2-70d7-45ea-8c8d-9f9783ac7d7b","id":"ae8aa42f-2d2c-454d-a3b1-be6b98e25057","name":"es02","type":"filebeat","version":"7.17.3"},"ecs":{"version":"1.12.0"},"message":"hahahaha","log":{"source":{"address":"192.168.8.11:59882"}}}
{"@timestamp":"2023-03-24T05:59:47.657Z","@metadata":{"beat":"filebeat","type":"_doc","version":"7.17.3"},"log":{"source":{"address":"192.168.8.11:59884"}},"input":{"type":"tcp"},"ecs":{"version":"1.12.0"},"host":{"name":"es02"},"agent":{"type":"filebeat","version":"7.17.3","hostname":"es02","ephemeral_id":"2bda90b2-70d7-45ea-8c8d-9f9783ac7d7b","id":"ae8aa42f-2d2c-454d-a3b1-be6b98e25057","name":"es02"},"message":"bbbbbbbbb"}

六 LOGSTASH环境及基础应用

安装

wget https://artifacts.elastic.co/downloads/logstash/logstash-7.17.3-x86_64.rpm
yum -y localinstall logstash-7.17.3-x86_64.rpm
ln -sv /usr/share/logstash/bin/logstash /usr/local/bin

常用命令

logstash -tf conf.d/01-stdin-to-stdout.conf #检查配置文件语法

logstash -f conf.d/01-stdin-to-stdout.conf #启动

logstash -rf conf.d/01-stdin-to-stdout.conf #热启动

如果启动第二个实例时,需要加--path.data 指定数据存放路由
logstash -f /root/config-logstash/01-stdin-to-stdout.conf --path.data /tmp/logstash
# 测试
stdin in and stdout

input {
stdin {}
}
output {
  stdout {}
}








# logstash input基于文件

input {
   file {
# 指定收集的路径
     path => ["/tmp/test/*.txt"]
# 指定文件的读取位置,仅在".sincedb*"文件中没有记录的情况下生效!
     start_position => "beginning"
#
#     start_position => "end"
        }
      }
output {
  stdout {}
}






# logstash input基于tcp

input {
   tcp {
     port => 8888
   }

   tcp {
     port => 9999
   }
}

output {
  stdout {}
}






# logstash input基于http

input {
  http {
    port => 8888
  }
  http {
     port => 9999
  }
}
output {
  stdout {}
}
1,input插件基于redis 案例
filebeat 配置文件

filebeat.inputs:
- type: tcp
  host: "0.0.0.0:9000"
output.redis:
# 写入redis的主机地址
  hosts: ["192.168.8.21:6379"]
# 指定redis的认证口令
  password: "lifei"
# 指定连接数据库的编号
  db: 5
# 指定的key值
  key: "oldboyedu-linux80-filebeat" # 规定超时时间.
  timeout: 3




logstash 配置文件

input {
  redis {
# 指定的是REDIS的键(key)的类型
    data_type => "list"
# 指定数据库的编号,默认值是0号数据库
    db => 5
# 指定数据库的ip地址,默认值是localhost
    host => "192.168.8.21"
# 指定数据库的端口号,默认值为6379
    port => 6379
# 指定redis的认证密码
    password => "lifei"
# 指定从redis的哪个key取数据
    key => "oldboyedu-linux80-filebeat"
  }
}
output {
 # stdout {}
  elasticsearch {
    hosts => ["http://es01:9200","http://es02:9200","http://es03:9200"]
    index => "redis-%{+YYYY.MM.dd}"
  }
}



logstash input插件基于beats案例
filebeat配置文件

filebeat.inputs:
- type: tcp
  host: "0.0.0.0:9000"
output.logstash:
  hosts: ["192.168.8.31:5044"]




logstash 配置文件

input {
  beats {
     port => 5044
  }
}

output {
 # stdout {}
  elasticsearch {
    hosts => ["http://es01:9200","http://es02:9200","http://es03:9200"]
    index => "lifeitest-%{+YYYY.MM.dd}"
  }
}





output插件基于redis案例

input {
  tcp {
    port => 9999
  }
}
output {
  redis {
# 指定redis的主机地址
    host => "192.168.8.21"
# 指定redis的端口号
    port => "6379"
# 指定redis数据库编号
    db => 10
# 指定redis的密码
    password => "lifei"
# 指定写入数据的key类型
    data_type => "list"
# 指定的写入的key名称
    key => "oldboyedu-linux80-logstash"
  }
}









logstash 综合案例

input {
  tcp {
     type => "oldboyedu-tcp"
     port => 6666
  }
  beats {
     type => "oldboyedu-beat"
     port => 7777
  }
  redis {
     type => "oldboyedu-redis"
     data_type => "list"
     db => 5
     host => "192.168.8.21"
     port => 6379
     password => "lifei"
     key => "oldboyedu-linux80-filebeat"
  }
}



output {
  if [type] == "oldboyedu-tcp" {
     elasticsearch {
        hosts => ["es01:9200","es02:9200","es03:9200"]
        index => "oldboyedu-linux80-tcp-%{+YYYY.MM.dd}"
     }
  } else if [type] == "oldboyedu-beat" {
      elasticsearch {
        hosts => ["es01:9200","es02:9200","es03:9200"]
        index => "oldboyedu-linux80-beat-%{+YYYY.MM.dd}"
    }
  } else if [type] == "oldboyedu-redis" {
    elasticsearch {
      hosts => ["es01:9200","es02:9200","es03:9200"]
      index => "oldboyedu-linux80-redis-%{+YYYY.MM.dd}"
    }
  } else {
    elasticsearch {
      hosts => ["es01:9200","es02:9200","es03:9200"]
      index => "oldboyedu-linux80-other-%{+YYYY.MM.dd}"
    }} 
}
2,企业级插件案例
grok 插件

gork插件: Grok是将非结构化日志数据解析为结构化和可查询的好方法。底层原理是基于正则匹配任意
文本格式。 该工具非常适合syslog日志、apache和其他网络服务器日志、mysql日志,以及通常为人
类而非计算机消耗而编写的任何日志格式。 内置120种匹配模式,当然也可以自定义匹配模式:
        https://github.com/logstash-plugins/logstash-patterns-core/tree/master/patterns

案例一 分析nginx的access 日志文件

input {
  beats {
    port => 8888
  }
}
filter {
  grok {
    match => {
# "message" => "%{COMBINEDAPACHELOG}"
# 上面的""变量官方github上已经废弃,建议使用下面的匹配模式
# https://github.com/logstash-plugins/logstash-patterns-core/blob/main/patterns/legacy/httpd
       "message" => "%{HTTPD_COMMONLOG}"
    }
  }
}
output {
  stdout {}
  elasticsearch {
      hosts => ["es01:9200","es02:9200","es03:9200"]
      index => "oldboyedu-linux80-logstash-%{+YYYY.MM.dd}"
} }


案例二 分析

input {
  stdin {}
}
filter {
  grok {
    match => {
      "message" => "%{IP:oldboyedu-client} %{WORD:oldboyedu-method} %{URIPATHPARAM:oldboyedu-request} %{NUMBER:oldboyedu-bytes} %{NUMBER:oldboyedu-duration}"
             }
        }
}
output {
  stdout {}
}

注:输入下列数据测试可看到结果
55.3.244.1 GET /index.html 15824 0.043 10.0.0.103 POST /oldboyedu.html 888888 5.20

使用GROK 自定义的正则案例

参考链接
https://www.elastic.co/guide/en/logstash/7.17/plugins-filters-grok.html

input {
  stdin {}
}
filter {
  grok {
    patterns_dir => ["./patterns"]

    #match => { "message" => "%{SYSLOGBASE} %{POSTFIX_QUEUEID:queue_id}: %{GREEDYDATA:syslog_message}" }
    #POSTFIX_QUEUEID [0-9A-F]{10,11}
    #OLDBOYEDU_LINUX80 [\d]{3}
    #match => { "message" => "%{POSTFIX_QUEUEID:oldboyedu_queue_id} ---> %{OLDBOYEDU_LINUX80:oldboyedu_linux80_elk}" }
    match => { "message" => "(?<oldboyedu_queue_id>[0-9A-F]{10,11}) ---> %{OLDBOYEDU_LINUX80:oldboyedu_linux80_elk}" }
    #match => { "message" => "(?<oldboyedu_queue_id>[0-9A-F]{10,11}) ---> (?<oldboyedu_linux80_elk>[\d]{3})" }
  }
}
output {
  stdout {}
}
测试数据输入 AAAABBB5678910 ---> 333FGHIJK 即可出结果 

使用filter插件的公用字段案例

input {
  beats {
    port => 8888 
  }
}
filter {
  grok {
    match => {
# "message" => "%{COMBINEDAPACHELOG}"
# 上面的""变量官方github上已经废弃,建议使用下面的匹配模式
# https://github.com/logstash-plugins/logstash-patterns-core/blob/main/patterns/legacy/httpd
      "message" => "%{HTTPD_COMMONLOG}"
  }
# 移除指定的字段
     remove_field => [  "host", "@version", "ecs","tags","agent","input", "log" ]
# 添加指定的字段 
    add_field => {
      "school" => "北京市昌平区沙河镇老男孩IT教育"
      "oldboyedu-clientip" => "clientip ---> %{clientip}"
    }
# 添加tag
    add_tag => [ "linux80","zookeeper","kafka","elk" ]
# 移除tag
    remove_tag => [ "zookeeper", "kafka" ]
# 创建插件的唯一ID,如果不创建则系统默认生成 id => "nginx"
    } 
  }
output {
  stdout {}
  elasticsearch {
        hosts => ["es01:9200","es02:9200","es03:9200"]
       index => "oldboyedu-linux80-logstash-%{+YYYY.MM.dd}"
  }
}

date插件

input {
  beats {
    port => 8888
  }
}
filter {
  grok {
    match => {
# "message" => "%{COMBINEDAPACHELOG}"
# 上面的""变量官方github上已经废弃,建议使用下面的匹配模式
# https://github.com/logstash-plugins/logstash-patterns-core/blob/main/patterns/legacy/httpd
      "message" => "%{HTTPD_COMMONLOG}"
  }
# 移除指定的字段
     remove_field => ["httpversion","host", "@version", "ecs","tags","agent","input", "log" ]
# 添加指定的字段
    add_field => {
      "school" => "北京市昌平区沙河镇老男孩IT教育"
      "oldboyedu-clientip" => "clientip ---> %{clientip}"
    }
# 添加tag
    add_tag => [ "linux80","zookeeper","kafka","elk" ]
# 移除tag
    remove_tag => [ "zookeeper", "kafka" ]
# 创建插件的唯一ID,如果不创建则系统默认生成 id => "nginx"
    }
  date {
# 匹配时间字段并解析,值得注意的是,logstash的输出时间可能会错8小时,但写入es但 数据是准确的!
# "13/May/2022:15:47:24 +0800", 以下2种match写法均可!
# match => ["timestamp","dd/MMM/yyyy:HH:mm:ss Z"]
# 当然,我们也可以不对时区字段进行解析,而是使用"timezone"指定时区哟!
  match => ["timestamp","dd/MMM/yyyy:HH:mm:ss +0800"]
# 设置时区字段为UTC时间,写入ES的数据时间是不准确的
# timezone => "UTC"
# 建议大家设置为"Asia/Shanghai",写入ES的数据是准确的! timezone => "Asia/Shanghai"
# 将匹配到到时间字段解析后存储到目标字段,若不指定,则默认字段 为"@timestamp"字段
     target => "oldboyedu-linux80-nginx-access-time"
  }

  }

output {
  stdout {}
  elasticsearch {
       hosts => ["es01:9200","es02:9200","es03:9200"]
       index => "oldboyedu-linux80-logstash-%{+YYYY.MM.dd}"
  }
}






收集useragent

input {
  beats {
     port => 8888
  }
}
filter {
  date {
     match => ["timestamp","dd/MMM/yyyy:HH:mm:ss Z"]
     timezone => "Asia/Shanghai"
     target => "oldboyedu-linux80-nginx-access-time"
  }
  mutate {
     add_field => {
     "school" => "北京市昌平区沙河镇老男孩IT教育"
     }
     remove_field => [  "agent", "host", "@version", "ecs","tags","input", "log" ]
  }
  geoip {
       source => "clientip"
       fields => ["city_name","country_name","ip"]
       target => "oldboyedu-linux80-geoip"
  }
  useragent {
# 指定客户端的设备相关信息的字段
       source => "http_user_agent"
# 将分析的数据存储在一个指定的字段中,若不指定,则默认存储在target字段中。
       target => "oldboyedu-linux80-useragent"
  }
}
output {
  stdout {}
 }


注:用的是基于json格式生成日志文件的方法。

logstash mulate 组件常用字段案例

产生数据python脚本

#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# @author : oldboyedu-linux80
import datetime
import random
import logging
import time
import sys
LOG_FORMAT = "%(levelname)s %(asctime)s [com.oldboyedu.%(module)s] - %(message)s "
DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
# 配置root的logging.Logger实例的基本配置
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT, datefmt=DATE_FORMAT, filename=sys.argv[1], filemode='a',)
actions = ["浏览⻚面", "评论商品", "加入收藏", "加入购物⻋", "提交订单", "使用优 惠券", "领取优惠券","搜索", "查看订单", "付款", "清空购物⻋"]
while True:
  time.sleep(random.randint(1, 5))
  user_id = random.randint(1, 10000)
# 对生成的浮点数保留2位有效数字.
  price = round(random.uniform(15000, 30000),2)
  action = random.choice(actions)
  svip = random.choice([0,1])
  logging.info("DAU|{0}|{1}|{2}|{3}".format(user_id,action,svip,price))

filebeat配置

filebeat.inputs:
- type: log
  enabled: true
  paths:
    - /tmp/app.log
  tags: ["access"]
  #json.keys_under_root: true
output.logstash:
  hosts: ["192.168.8.31:8888"]

logstash配置

input {
  beats {
     port => 8888
  }
}
filter {
  mutate {
     add_field => {
     "school" => "北京市昌平区沙河镇老男孩IT教育" }
     remove_field => [ "@timestamp", "agent", "host", "@version", "ecs","tags","input", "log" ]
}
  mutate {
# 对"message"字段内容使用"|"进行切分。
    split => {
        "message" => "|"
    }
  }
  mutate {
# 添加字段,其中引用到了变量
    add_field => {
         "user_id" => "%{[message][1]}"
         "action" => "%{[message][2]}"
         "svip" => "%{[message][3]}"
         "price" => "%{[message][4]}"
    }
  }
  mutate {
      strip => ["svip"]
}
    mutate {
# 将指定字段转换成相应对数据类型.
      convert => {
       "user_id" => "integer"
       "svip" => "boolean"
       "price" => "float"
      }
    }
    mutate {
# 将"price"字段拷⻉到"oldboyedu-linux80-price"字段中.
      copy => { "price" => "oldboyedu-linux80-price" }
    }
    mutate {
# 修改字段到名称
     rename => { "svip" => "oldboyedu-ssvip" }
   }
    mutate {
# 替换字段的内容
      replace => { "message" => "%{message}: My new message" }
    }
    mutate {
# 将指定字段的字母全部大写
     uppercase => [ "message" ]
    }
  }
output {
  stdout {}
#  elasticsearch {
#     hosts => ["es01:9200","es02:9200","es03:9200"]
#     index => "oldboyedu-linux80-logstash-%{+YYYY.MM.dd}"
#}
}

logstash 多if 分支案例

input {
  beats {
    type => "oldboyedu-beats"
    port => 8888
  }
  tcp {
     type => "oldboyedu-tcp"
     port => 9999
  }
  tcp {
     type => "oldboyedu-tcp-new"
     port => 7777
  }
  http {
     type => "oldboyedu-http"
     port => 6666
  }
  file {
     type => "oldboyedu-file"
     path => "/tmp/apps.log"
  }
}
filter {
  mutate {
    add_field => {
      "school" => "北京市昌平区沙河镇老男孩IT教育"
      #"lifei" => "shi shenjingbing"
    }
  }
  if [type] in ["oldboyedu-beats","oldboyedu-tcp-new","oldboyedu-http"]
  #if [type] == "oldboyedu-tcp-new"
  {
    mutate {
      add_field => {
      remove_field => [ "agent", "host", "@version", "ecs","tags","input", "log" ]
    }
    geoip {
       source => "clientip"
       target => "oldboyedu-linux80-geoip"
    }
    useragent {
       source => "http_user_agent"
       target => "oldboyedu-linux80-useragent"
    }
    }else if [type] == "oldboyedu-file" {
    mutate {
      add_field => {
      "class" => "oldboyedu-linux80" "address" => "北京昌平区沙河镇老男孩IT教育" "hobby" => ["LOL","王者荣耀"]
      }
      remove_field => ["host","@version","school"]
      }
  } else {
      mutate {
         remove_field => ["port","@version","host"]
      }
      mutate {
         split => {
            "message" => "|"
         }
         add_field => {
             "user_id" => "%{[message][1]}"
             "action" => "%{[message][2]}"
             "svip" => "%{[message][3]}"
             "price" => "%{[message][4]}"
}
# 利用完message字段后,在删除是可以等!注意代码等执行顺序! remove_field => ["message"]
         strip => ["svip"]
      }
      mutate {
        convert => {
           "user_id" => "integer"
           "svip" => "boolean"
           "price" => "float"
} }
} }
output {
  stdout {}
  if [type] == "oldboyedu-beats"{
    elasticsearch {
      hosts =>["es01:9200","es02:9200","es03:9200"]
      user => "logstash_system1"
      password => "m1rHKcad3eVngG3tR5av"
      index => "oldboyedu-linux80-logstash-beats"
    }
  }else {
      elasticsearch {
      hosts =>["es01:9200","es02:9200","es03:9200"]
      user => "logstash_system1"
      password => "m1rHKcad3eVngG3tR5av"
      index => "oldboyedu-linux80-logstash-tcp"
    }
  }
}

  • 6
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值