sixteenth work

总结ELK 各组件的主要功能

elasticsearch:负责数据存储及检索
logstash:负责日志收集、日志处理并发送至elasticsearch
kibana:负责从ES读取数据进行可视化展示及数据管理

部署ES cluster,并实现XPACK认证

服务器信息

IP:10.0.0.180 主机名:es-node1.wamg.com
IP:10.0.0.181 主机名:es-node2.wamg.com
IP:10.0.0.182 主机名:es-node3.wamg.com

内核参数优化

#每个es节点修改内核参数vm.max_map_count,需要大于或者等于262144,不然es启动不了
vim /etc/sysctl.conf 
vm.max_map_count=262144
sysctl -p

主机名解析

如果配置文件里面指定了主机名,就需要配置,如果没有,就不需要配置

# tail /etc/hosts

10.0.0.180 es-node1.wamg.com 
10.0.0.181 es-node2.wamg.com 
10.0.0.182 es-node3.wamg.com

资源limit优化

#每个es节点,需要修改
vim /etc/security/limits.conf
root soft core unlimited
root hard core unlimited
root soft nproc 1000000
root hard nproc 1000000
root soft nofile 1000000
root hard nofile 1000000
root soft memlock 32000
root hard memlock 32000
root soft msgqueue 8192000
root hard msgqueue 8192000
* soft core unlimited
* hard core unlimited
* soft nproc 1000000
* hard nproc 1000000
* soft nofile 1000000
* hard nofile 1000000
* soft memlock 32000
* hard memlock 32000
* soft msgqueue 8192000
* hard msgqueue 8192000

创建普通⽤户运⾏环境

#每个es节点创建普通用户运行环境。如果用root账号启动,es也启动不起来
groupadd -g 2888 elasticsearch && useradd -u 2888 -g 2888 -r -m -s /bin/bash elasticsearch
#设置密码
root@ubuntu:~# passwd elasticsearch
New password: 
Retype new password: 
passwd: password updated successfully

#创建项目目录和es数据目录
mkdir /data/esdata /data/eslogs /apps -pv

#给目录赋权elasticsearch
chown elasticsearch.elasticsearch /data /apps/ -R

部署es

es各个节点一样配置

#进入工作目录
cd /apps
#下载es二进制包
wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-8.5.1-linux-x86_64.tar.gz
#解压二进制包
tar xvf elasticsearch-8.5.1-linux-x86_64.tar.gz
#创建软连接
ln -sv /apps/elasticsearch-8.5.1 /apps/elasticsearch

#各个服务器再次赋权
chown elasticsearch.elasticsearch /data /apps/ -R
#重启各个服务器,使刚刚的系统配置生效
reboot

xpack认证签发环境

生成环境一般不用设置证书认证
可以开也可以不开,要不要给es提供登录认证
xpack就是让你花钱,来扩展你的ES的功能的。它里面有很多插件,需要收费

签发证书在es其中一个节点签发就行

#签发证书在es其中一个节点签发就行
#切换到elasticsearch账号
su - elasticsearch
#进入到/apps/elasticsearch目录
cd /apps/elasticsearch

#编辑es节点服务器信息文件;现在使用的3个节点,考虑到以后的扩展,可以把ip地址先规划好,这里也添加规划好的
vim instances.yml
instances:
  - name: "es-node1.wang.com"
    ip:
      - "10.0.0.180"
  - name: "es-node2.wang.com"
    ip:
      - "10.0.0.181"
  - name: "es-node3.wang.com"
    ip:
      - "10.0.0.182"
  - name: "es-node4.wang.com"
    ip:
      - "10.0.0.183"
  - name: "es-node5.wang.com"
    ip:
      - "10.0.0.184"

#⽣成CA私钥,默认名字为elastic-stack-ca.p12
elasticsearch@es-node1:/apps/elasticsearch$ bin/elasticsearch-certutil ca
This tool assists you in the generation of X.509 certificates and certificate
signing requests for use with SSL/TLS in the Elastic stack.

The 'ca' mode generates a new 'certificate authority'
This will create a new X.509 certificate and private key that can be used
to sign certificate when running in 'cert' mode.

Use the 'ca-dn' option if you wish to configure the 'distinguished name'
of the certificate authority

By default the 'ca' mode produces a single PKCS#12 output file which holds:
    * The CA certificate
    * The CA's private key

If you elect to generate PEM format certificates (the -pem option), then the output will
be a zip file containing individual files for the CA certificate and private key

 #指定输出文件,默认是elastic-stack-ca.p12,直接回车默认就行
Please enter the desired output file [elastic-stack-ca.p12]: 
#设置elastic-stack-ca.p12密码;
#后期我们会通过这个ca去颁发证书,如果这里指定了密码,当时候就要指定这个ca的密码。所以默认不要密码就行,直接回车
Enter password for elastic-stack-ca.p12 : 
elasticsearch@es-node1:/apps/elasticsearch$

#⽣产CA公钥,默认名称为elastic-certificates.p12
elasticsearch@es-node1:/apps/elasticsearch$ bin/elasticsearch-certutil cert --ca elastic-stack-ca.p12
...
#这里输入CA私钥的密码,我们刚刚没有设置密码,所以直接回车
Enter password for CA (elastic-stack-ca.p12) : 
#输入CA公钥的文件名,默认就行,直接回车
Please enter the desired output file [elastic-certificates.p12]: 
#设置CA公钥的密码,我们同样不设置,直接回车
Enter password for elastic-certificates.p12 : 

#查看当前目录;公钥和私钥已创建好
elasticsearch@es-node1:/apps/elasticsearch$ ll
total 2244
drwxr-xr-x  9 elasticsearch elasticsearch    4096 Nov 15 06:59 ./
drwxr-xr-x  3 elasticsearch elasticsearch    4096 Nov 15 06:38 ../
-rw-r--r--  1 elasticsearch elasticsearch    3860 Nov  9  2022 LICENSE.txt
-rw-r--r--  1 elasticsearch elasticsearch 2235851 Nov  9  2022 NOTICE.txt
-rw-r--r--  1 elasticsearch elasticsearch    8107 Nov  9  2022 README.asciidoc
drwxr-xr-x  2 elasticsearch elasticsearch    4096 Nov  9  2022 bin/
drwxr-xr-x  3 elasticsearch elasticsearch    4096 Nov 15 06:37 config/
-rw-------  1 elasticsearch elasticsearch    3596 Nov 15 06:59 elastic-certificates.p12
-rw-------  1 elasticsearch elasticsearch    2672 Nov 15 06:51 elastic-stack-ca.p12
-rw-rw-r--  1 elasticsearch elasticsearch     307 Nov 15 06:49 instances.yml
drwxr-xr-x  8 elasticsearch elasticsearch    4096 Nov  9  2022 jdk/
drwxr-xr-x  5 elasticsearch elasticsearch    4096 Nov  9  2022 lib/
drwxr-xr-x  2 elasticsearch elasticsearch    4096 Nov  9  2022 logs/
drwxr-xr-x 67 elasticsearch elasticsearch    4096 Nov  9  2022 modules/
drwxr-xr-x  2 elasticsearch elasticsearch    4096 Nov  9  2022 plugins/

#签发elasticsearch集群主机证书
#--pass magedu123 指定证书的密码
elasticsearch@es-node1:/apps/elasticsearch$ bin/elasticsearch-certutil cert --silent --in instances.yml --out certs.zip --pass magedu123 --ca elastic-stack-ca.p12
Enter password for CA (elastic-stack-ca.p12) :  #刚刚没有设置ca私钥密码,所以直接回车
elasticsearch@es-node1:/apps/elasticsearch$ 

#证书分发
elasticsearch@es-node1:/apps/elasticsearch$ unzip certs.zip
#各个节点创建证书目录;注意切换到elasticsearch用户创建
mkdir /apps/elasticsearch/config/certs
#将每个节点对应的证书放入到/apps/elasticsearch/config/certs目录下
cp es-node1.wang.com/es-node1.wang.com.p12 config/certs/
scp es-node2.wang.com/es-node2.wang.com.p12 10.0.0.181:/apps/elasticsearch/config/certs/
scp es-node3.wang.com/es-node3.wang.com.p12 10.0.0.182:/apps/elasticsearch/config/certs/

#⽣成 keystore ⽂件(keystore是保存了证书密码的认证⽂件magedu123)
elasticsearch@es-node1:/apps/elasticsearch$ bin/elasticsearch-keystore create
Created elasticsearch keystore in /apps/elasticsearch/config/elasticsearch.keystore

elasticsearch@es-node1:/apps/elasticsearch$ bin/elasticsearch-keystore add xpack.security.transport.ssl.keystore.secure_password
Enter value for xpack.security.transport.ssl.keystore.secure_password: #magedu123
elasticsearch@es-node1:/apps/elasticsearch$ bin/elasticsearch-keystore add xpack.security.transport.ssl.truststore.secure_password
Enter value for xpack.security.transport.ssl.truststore.secure_password: #magedu123

#分发密码认证⽂件
scp /apps/elasticsearch/config/elasticsearch.keystore 10.0.0.181:/apps/elasticsearch/config/elasticsearch.keystore
scp /apps/elasticsearch/config/elasticsearch.keystore 10.0.0.182:/apps/elasticsearch/config/elasticsearch.keystore

编辑配置文件

vim /apps/elasticsearch/config/elasticsearch.yml
# ======================== Elasticsearch Configuration =========================
#
# NOTE: Elasticsearch comes with reasonable defaults for most settings.
#       Before you set out to tweak and tune the configuration, make sure you
#       understand what are you trying to accomplish and the consequences.
#
# The primary way of configuring a node is via this file. This template lists
# the most important settings you may want to configure for a production cluster.
#
# Please consult the documentation for further information on configuration options:
# https://www.elastic.co/guide/en/elasticsearch/reference/index.html
#
# ---------------------------------- Cluster -----------------------------------
#
# Use a descriptive name for your cluster:
#集群名称,一个集群中所有节点,集群名称相同
cluster.name: my-es-application
#
# ------------------------------------ Node ------------------------------------
#
# Use a descriptive name for the node:
#节点名称,一个集群中,每个节点节点名称不相同
node.name: es-node1
#
# Add custom attributes to the node:
#自定义的属性信息,一般不用加
#node.attr.rack: r1
#
# ----------------------------------- Paths ------------------------------------
#
# Path to directory where to store the data (separate multiple locations by comma):
#es的数据目录
path.data: /data/esdata
#
# Path to log files:
#es的日志目录
path.logs: /data/eslogs
#
# ----------------------------------- Memory -----------------------------------
#
# Lock the memory on startup:
#内存配置,当你启动es的时候要不要直接给es分配内存;这个内存大小设置在/apps/elasticsearch/config/jvm.options中设置
#/apps/elasticsearch/config/jvm.options中设置内存,默认为1G;官方说最好设置为物理内存的一半;但是不要超过30G
#/apps/elasticsearch/config/jvm.options中;-Xms4g 最小内存;-Xmx4g最大内存
#bootstrap.memory_lock: true
#
# Make sure that the heap size is set to about half the memory available
# on the system and that the owner of the process is allowed to use this
# limit.
#
# Elasticsearch performs poorly when the system is swapping the memory.
#
# ---------------------------------- Network -----------------------------------
#
# By default Elasticsearch is only accessible on localhost. Set a different
# address here to expose this node on the network:
#当前主机绑定ip;可以设置为0.0.0.0
network.host: 0.0.0.0
#
# By default Elasticsearch listens for HTTP traffic on the first free port it
# finds starting at 9200. Set a specific HTTP port here:
#端口
http.port: 9200
#
# For more information, consult the network module documentation.
#
# --------------------------------- Discovery ----------------------------------
#
# Pass an initial list of hosts to perform discovery when this node is started:
# The default list of hosts is ["127.0.0.1", "[::1]"]
#发现配置,集群发现,向哪些主机通告我的状态,哪些节点想把它选举为master,就要加上
discovery.seed_hosts: ["10.0.0.180", "10.0.0.181", "10.0.0.182"]
#
# Bootstrap the cluster using an initial set of master-eligible nodes:
#哪些主机可以被选举为master节点
cluster.initial_master_nodes: ["10.0.0.180", "10.0.0.181", "10.0.0.182"]
#
# For more information, consult the discovery and cluster formation module documentation.
#
# --------------------------------- Readiness ----------------------------------
#
# Enable an unauthenticated TCP readiness endpoint on localhost
#容里面的探针检查,如果要跑到容器,就需要开启
#readiness.port: 9399
#
# ---------------------------------- Various -----------------------------------
#
# Allow wildcard deletion of indices:
#通过api删除es数据的时候是否需要精确匹配,一般我们设置为true;模糊匹配就需要正则表达式,写错了,容易出错
action.destructive_requires_name: true
#开启xpack认证
xpack.security.enabled: true
xpack.security.transport.ssl.enabled: true
xpack.security.transport.ssl.keystore.path: /apps/elasticsearch/config/certs/es-node1.wang.com.p12
xpack.security.transport.ssl.truststore.path: /apps/elasticsearch/config/certs/es-node1.wang.com.p12
#es-node1配置
elasticsearch@es-node1:/apps/elasticsearch/config$ grep -Ev '^($|#)' /apps/elasticsearch/config/elasticsearch.yml
cluster.name: my-es-application
node.name: es-node1
path.data: /data/esdata
path.logs: /data/eslogs
network.host: 0.0.0.0
http.port: 9200
discovery.seed_hosts: ["10.0.0.180", "10.0.0.181", "10.0.0.182"]
cluster.initial_master_nodes: ["10.0.0.180", "10.0.0.181", "10.0.0.182"]
action.destructive_requires_name: true
xpack.security.enabled: true
xpack.security.transport.ssl.enabled: true
xpack.security.transport.ssl.keystore.path: /apps/elasticsearch/config/certs/es-node1.wang.com.p12
xpack.security.transport.ssl.truststore.path: /apps/elasticsearch/config/certs/es-node1.wang.com.p12

#es-node2配置
elasticsearch@es-node2:~$ grep -Ev '^($|#)' /apps/elasticsearch/config/elasticsearch.yml
cluster.name: my-es-application
node.name: es-node2
path.data: /data/esdata
path.logs: /data/eslogs
network.host: 0.0.0.0
http.port: 9200
discovery.seed_hosts: ["10.0.0.180", "10.0.0.181", "10.0.0.182"]
cluster.initial_master_nodes: ["10.0.0.180", "10.0.0.181", "10.0.0.182"]
action.destructive_requires_name: true
xpack.security.enabled: true
xpack.security.transport.ssl.enabled: true
xpack.security.transport.ssl.keystore.path: /apps/elasticsearch/config/certs/es-node2.wang.com.p12
xpack.security.transport.ssl.truststore.path: /apps/elasticsearch/config/certs/es-node2.wang.com.p12

#es-node3配置
elasticsearch@es-node3:~$ grep -Ev '^($|#)' /apps/elasticsearch/config/elasticsearch.yml 
cluster.name: my-es-application
node.name: es-node3
path.data: /data/esdata
path.logs: /data/eslogs
network.host: 0.0.0.0
http.port: 9200
discovery.seed_hosts: ["10.0.0.180", "10.0.0.181", "10.0.0.182"]
cluster.initial_master_nodes: ["10.0.0.180", "10.0.0.181", "10.0.0.182"]
action.destructive_requires_name: true
xpack.security.enabled: true
xpack.security.transport.ssl.enabled: true
xpack.security.transport.ssl.keystore.path: /apps/elasticsearch/config/certs/es-node3.wang.com.p12
xpack.security.transport.ssl.truststore.path: /apps/elasticsearch/config/certs/es-node3.wang.com.p12

各node节点配置service⽂件并启动集群

#各个节点切换到root账号创建
vim  /lib/systemd/system/elasticsearch.service
[Unit]
Description=Elasticsearch
Documentation=http://www.elastic.co
Wants=network-online.target
After=network-online.target
[Service]
RuntimeDirectory=elasticsearch
Environment=ES_HOME=/apps/elasticsearch
Environment=ES_PATH_CONF=/apps/elasticsearch/config
Environment=PID_DIR=/apps/elasticsearch
WorkingDirectory=/apps/elasticsearch
User=elasticsearch
Group=elasticsearch
ExecStart=/apps/elasticsearch/bin/elasticsearch --quiet
# StandardOutput is configured to redirect to journalctl since
# some error messages may be logged in standard output before
# elasticsearch logging system is initialized. Elasticsearch
# stores its logs in /var/log/elasticsearch and does not use
# journalctl by default. If you also want to enable journalctl
# logging, you can simply remove the "quiet" option from ExecStart.
StandardOutput=journal
StandardError=inherit
# Specifies the maximum file descriptor number that can be opened by this process
LimitNOFILE=65536
# Specifies the maximum number of processes
LimitNPROC=4096
# Specifies the maximum size of virtual memory
LimitAS=infinity
# Specifies the maximum file size
LimitFSIZE=infinity
# Disable timeout logic and wait until process is stopped
TimeoutStopSec=0
# SIGTERM signal is used to stop the Java process
KillSignal=SIGTERM
# Send the signal only to the JVM rather than its control group
KillMode=process
# Java process is never killed
SendSIGKILL=no
# When a JVM receives a SIGTERM signal it exits with code 143
SuccessExitStatus=143
[Install]
WantedBy=multi-user.target

 #再次给各个节点赋权
 chown -R elasticsearch:elasticsearch /data/ /apps/

#设置开启启动,并启动。各个节点启动尽量同时
systemctl daemon-reload && systemctl start elasticsearch.service && systemctl enable elasticsearch.service

#查看日志有没有报错
tail -f /data/eslogs/my-es-application.log

掌握Logstash部署、配置文件编写及收集多个日志文件,并写入到ES不同的index中

logstash安装及环境测试

#下载logstash8.5.1 deb包
wget https://artifacts.elastic.co/downloads/logstash/logstash-8.5.1-amd64.deb
#安装
dpkg -i logstash-8.5.1-amd64.deb

#直接启动logsatsh测试标准输入和输出
/usr/share/logstash/bin/logstash -e 'input { stdin{} } output { stdout{ codec => rubydebug }}'

hello world
{
      "@version" => "1",
       "message" => "hello world",
    "@timestamp" => 2023-11-17T02:50:58.408347220Z,
          "host" => {
        "hostname" => "ubuntu"
    },
         "event" => {
        "original" => "hello world"
    }
}

#测试输出到文件
/usr/share/logstash/bin/logstash -e 'input { stdin{} } output { file { path => "/tmp/logstash-test-%{+YYYY.MM.dd}.txt"}}'
#查看输出的文件
root@ubuntu:~# tail -f /tmp/logstash-test-2023.11.17.txt 
{"@timestamp":"2023-11-17T02:52:53.908079058Z","@version":"1","message":"hello","event":{"original":"hello"},"host":{"hostname":"ubuntu"}}
{"@timestamp":"2023-11-17T02:52:59.932950720Z","@version":"1","message":"ni hao","event":{"original":"ni hao"},"host":{"hostname":"ubuntu"}}
{"@timestamp":"2023-11-17T02:53:03.054339005Z","@version":"1","message":"yes","event":{"original":"yes"},"host":{"hostname":"ubuntu"}}

logstash多日志收集案例

#主要是用type进行if语句判断。
vim /etc/logstash/conf.d/syslog-to-es.conf 
input {
  file {
    path => "/var/log/dpkg.log"
    start_position => "beginning"
    stat_interval => "1"
    type => "systemlog"
 }
  file {
    path => "/var/log/systemlog"
    start_position => "beginning"
    stat_interval => "1"
    type => "syslog"
 }
}

output {
  if [type] == "systemlog" {
    elasticsearch {
      hosts => ["10.0.0.180:9200","10.0.0.181:9200","10.0.0.182:9200"]
      index => "logstash-server1-syslog-%{+YYYY.MM.dd}"
      user => "magedu"
      password => "123456"
  }
 }
  if [type] == "syslog" {
    elasticsearch {
      hosts => ["10.0.0.180:9200","10.0.0.181:9200","10.0.0.182:9200"]
      index => "logstash-server1-systsdlog-%{+YYYY.MM.dd}"
      user => "magedu"
      password => "123456"
  }
 }

}

#检查文件有没有错误
 /usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/syslog-to-es.conf -t

Kibana的安装及使用、heartbeat和metricbeat的安装使用

Kibana的安装及使用

kibana版本要和ES版本一致
下载地址:https://www.elastic.co/cn/downloads/past-releases#kibana

#由于我们ES使用的8.5.1;这里我们下载8.5.1的版本
wget https://artifacts.elastic.co/downloads/kibana/kibana-8.5.1-amd64.deb
#安装
dpkg -i kibana-8.5.1-amd64.deb
#修改配置文件
vim /etc/kibana/kibana.yml
server.port: 5601
server.host: "0.0.0.0"
elasticsearch.hosts: ["http://10.0.0.180:9200"]
elasticsearch.username: "kibana_system"
elasticsearch.password: "123456"
i18n.locale: "zh-CN"

#重启kibana
systemctl restart kibana
#查看日志是否有报错
tail -f /var/log/kibana/kibana.log

登录kibana页面
10.0.0.180:5601
账号密码就是ES的账号密码
在这里插入图片描述
这里选择”自己浏览“,不添加集成
在这里插入图片描述
Stack Management–>数据视图–>创建数据视图
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

heartbeat和metricbeat的安装使用

Metricbeat安装及配置:https://www.elastic.co/cn/beats/metricbeat
下载地址:https://www.elastic.co/cn/downloads/beats
版本应该和ES保持一致

#安装
dpkg -i metricbeat-8.5.1-amd64.deb
#编辑配置文件
vim /etc/metricbeat/metricbeat.yml
setup.kibana:

  host: "10.0.0.180:5601"
  setup_kibana_username: "magedu"
  setup_kibana_password: "123456"

output.elasticsearch:
  
  hosts: ["10.0.0.180:9200"]
  username: "magedu"
  password: "123456"

#重启metricbeat
systemctl restart metricbeat

kibana查看
在这里插入图片描述
Heartbeat安装及配置: https://www.elastic.co/cn/beats/heartbeat
下载地址:https://www.elastic.co/cn/downloads/beats
版本应该和ES保持一致

#安装
dpkg -i heartbeat-8.5.1-amd64.deb
#编辑配置文件
vim /etc/heartbeat/heartbeat.yml
heartbeat.config.monitors:
  path: ${path.config}/monitors.d/*.yml
  reload.enabled: false
  reload.period: 5s
heartbeat.monitors:
- type: http
  enabled: true
  id: http-monitor
  name: http-deamon-monitor
  urls: ["www.baidu.com","www.magedu.com"]
  schedule: '@every 10s'
- type: icmp
  enabled: true
  id: icmp-monitor
  name: icmp-deamon-monitor
  schedule: '*/5 * * * * * *'
  host: ["10.0.0.180","10.0.0.181"]
setup.template.settings:
  index.number_of_shards: 1
  index.codec: best_compression
setup.kibana:
  host: "10.0.0.180:5601"
  setup_kibana_username: "magedu"
  setup_kibana_password: "123456"
output.elasticsearch:
  hosts: ["10.0.0.180:9200"]
  username: "magedu"
  password: "123456"
processors:
  - add_observer_metadata:

#重启heartbeat
systemctl restart heartbeat-elastic.service

kibana验证
在这里插入图片描述

基于Logstash Filter总结,并基于Gork解析Nginx默认格式的访问日志及错误日志为JSON格式、并写入Elasticsearch并在Kibana展示

filter 插件可实现从input方向进入的event按照指定的条件进行数据解析、字段删除、数据类型转换等操作,然后再从ouput方向发送到elasticsearch等目的server进行存储及展示,filter 阶段主要基于不同的插件实现不同的功能,官方连接:
https://www.elastic.co/guide/en/logstash/current/plugins-filters-date.html
 aggregate:同一个事件的多行日志聚合功能,https://www.elastic.co/guide/en/logstash/current/plugins-filters-aggregate.html
 bytes: 讲存储单位MB、GB、TB等转换为字节,https://www.elastic.co/guide/en/logstash/current/plugins-filters-bytes.html
 date:从事件中解析日期,然后作为logsatsh的时间戳,https://www.elastic.co/guide/en/logstash/current/plugins-filters-date.html
 …
 geoip:对IP进行地理信息识别并添加到事件中,https://www.elastic.co/guide/en/logstash/current/plugins-filters-geoip.html
 grok:基于正则表达式对事件进行匹配并以json格式输出,grok经常用于对系统errlog、mysql及zookeeper等中间件服务、网络设备日志等进行重新结构化处理(将非json格式日志转换为json格式),然后将转换后的日志重新输出到elasticsearch进行存储、在通过kibana进行绘图展示,https://www.elastic.co/guide/en/logstash/current/plugins-filters-grok.html
/usr/share/logstash/vendor/bundle/jruby/2.6.0/gems/logstash-patterns-core-4.3.4/patterns/legacy/grok-patterns #内置正则
 mutate: 对事件中的字段进行重命名、删除、修改等操作,https://www.elastic.co/guide/en/logstash/current/plugins-filters-mutate.html

基于logstash收集Nginx日志

#下载nginx源码
wget https://nginx.org/download/nginx-1.24.0.tar.gz
tar xvf nginx-1.24.0.tar.gz
cd nginx-1.24.0
#安装nginx需要的基础环境
apt install iproute2 ntpdate tcpdump telnet traceroute nfs-kernel-server nfs-common lrzsz tree openssl libssl-dev libpcre3 libpcre3-dev zlib1g-dev gcc openssh-server iotop unzip zip make

#编译
./configure --prefix=/apps/nginx \
--with-http_ssl_module \
--with-http_v2_module \
--with-http_realip_module \
--with-http_stub_status_module \
--with-http_gzip_static_module \
--with-pcre \
--with-file-aio \
--with-stream \
--with-stream_ssl_module \
--with-stream_realip_module

#安装
make && make install

#编辑nginx配置文件
vim /apps/nginx/conf/nginx.conf
 server {
        listen       80;
        server_name  www.wang.com;

访问nginx页面生成日志
http://www.wang.com
在这里插入图片描述
配置logstash配置文件

vim /etc/logstash/conf.d/filter-nginx-log-to-es.conf
input {
  file {
    path => "/apps/nginx/logs/access.log"
    type => "nginx-accesslog"
    stat_interval => "1"
    start_position => "beginning"
  }

  file {
    path => "/apps/nginx/logs/error.log"
    type => "nginx-errorlog"
    stat_interval => "1"
    start_position => "beginning"
  }

}

filter {
  if [type] == "nginx-accesslog" {
  grok {
    match => { "message" => ["%{IPORHOST:clientip} - %{DATA:username} \[%{HTTPDATE:request-time}\] \"%{WORD:request-method} %{DATA:request-uri} HTTP/%{NUMBER:http_version}\" %{NUMBER:response_code} %{NUMBER:body_sent_bytes} \"%{DATA:referrer}\" \"%{DATA:useragent}\""] }
    remove_field => "message"
    add_field => { "project" => "magedu"}
  }
  mutate {
    convert => [ "[response_code]", "integer"]
    }
  }
  if [type] == "nginx-errorlog" {
    grok {
      match => { "message" => ["(?<timestamp>%{YEAR}[./]%{MONTHNUM}[./]%{MONTHDAY} %{TIME}) \[%{LOGLEVEL:loglevel}\] %{POSINT:pid}#%{NUMBER:threadid}\: \*%{NUMBER:connectionid} %{GREEDYDATA:message}, client: %{IPV4:clientip}, server: %{GREEDYDATA:server}, request: \"(?:%{WORD:request-method} %{NOTSPACE:request-uri}(?: HTTP/%{NUMBER:httpversion}))\", host: %{GREEDYDATA:domainname}"]}
      remove_field => "message"
    }
  }
}

output {
  if [type] == "nginx-accesslog" {
    elasticsearch {
      hosts => ["172.31.2.101:9200"]
      index => "magedu-nginx-accesslog-%{+yyyy.MM.dd}"
      user => "magedu"
      password => "123456"
  }}

  if [type] == "nginx-errorlog" {
    elasticsearch {
      hosts => ["172.31.2.101:9200"]
      index => "magedu-nginx-errorlog-%{+yyyy.MM.dd}"
      user => "magedu"
      password => "123456"
  }}

}

#检查文件是否有错误
/usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/filter-nginx-log-to-es.conf -t

#重启logstash
systemctl restart logstash

es界面验证
在这里插入图片描述
kibana界面验证
在这里插入图片描述

基于logstash收集Nginx JSON格式访问日志

现在大多数服务都支持json格式日志,可以交开发把日志弄成json的格式

#nginx配置json格式的日志
    log_format access_json '{"@timestamp":"$time_iso8601",'
                           '"host":"$server_addr",'
                           '"clientip":"$remote_addr",'
                           '"size":$body_bytes_sent,'
                           '"responsetime":$request_time,'
                           '"upstreamtime":"$upstream_response_time",'
                           '"upstreamhost":"$upstream_addr",'
                           '"http_host":"$host",'
                           '"uri":"$uri",'
                           '"domain":"$host",'
                           '"xff":"$http_x_forwarded_for",'
                           '"referer":"$http_referer",'
                           '"tcp_xff":"$proxy_protocol_addr",'
                           '"http_user_agent":"$http_user_agent",'
                           '"status":"$status"}';
    access_log /apps/nginx/logs/json-ccess.log access_json;

访问web界面生成日志

#查看nginx日志
root@ubuntu:~/nginx-1.24.0# tail -f /apps/nginx/logs/json-ccess.log 
{"@timestamp":"2023-11-17T06:57:23+00:00","host":"10.0.0.183","clientip":"10.0.0.1","size":0,"responsetime":0.000,"upstreamtime":"-","upstreamhost":"-","http_host":"www.wang.com","uri":"/index.html","domain":"www.wang.com","xff":"-","referer":"-","tcp_xff":"-","http_user_agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36","status":"304"}
{"@timestamp":"2023-11-17T06:57:23+00:00","host":"10.0.0.183","clientip":"10.0.0.1","size":0,"responsetime":0.000,"upstreamtime":"-","upstreamhost":"-","http_host":"www.wang.com","uri":"/index.html","domain":"www.wang.com","xff":"-","referer":"-","tcp_xff":"-","http_user_agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36","status":"304"}

编辑logstash配置文件

vim /etc/logstash/conf.d/nginx-jsonlog-to-es.conf
input {
  file {
    path => "/apps/nginx/logs/json-ccess.log"
    start_position => "end"
    type => "nginx-json-accesslog"
    stat_interval => "1"
    codec => json
  }
}


output {
  if [type] == "nginx-json-accesslog" {
    elasticsearch {
      hosts => ["10.0.0.180:9200"]
      index => "nginx-json-accesslog-%{+YYYY.MM.dd}"
      user => "magedu"
      password => "123456"
  }}
}

#重启logstash
systemctl restart logstash

kibana界面验证
在这里插入图片描述

  • 23
    点赞
  • 22
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值