ELK日志监控部署(三)

一、elasticsearch插件

  • elk各种版本下载地址https://www.elastic.co/downloads/past-releases
    1、可以用bin/elasticsearch-plugin命令直接安装的插件地(es5.3.1测试可用)
#安装完可以在elasticsearch-5.3.1/plugins/下生产目录
bin/elasticsearch-plugin install analysis-icu
bin/elasticsearch-plugin install analysis-kuromoji
bin/elasticsearch-plugin install analysis-phonetic
bin/elasticsearch-plugin install analysis-smartcn
bin/elasticsearch-plugin install analysis-stempel
bin/elasticsearch-plugin install analysis-ukrainian
bin/elasticsearch-plugin install discovery-file
bin/elasticsearch-plugin install ingest-attachment
bin/elasticsearch-plugin install ingest-geoip
bin/elasticsearch-plugin install ingest-user-agent
bin/elasticsearch-plugin install mapper-attachments
bin/elasticsearch-plugin install mapper-size
bin/elasticsearch-plugin install mapper-murmur3
bin/elasticsearch-plugin install lang-javascript
bin/elasticsearch-plugin install lang-python
bin/elasticsearch-plugin install repository-hdfs
bin/elasticsearch-plugin install repository-s3
bin/elasticsearch-plugin install repository-azure
bin/elasticsearch-plugin install repository-gcs
bin/elasticsearch-plugin install store-smb
bin/elasticsearch-plugin install discovery-ec2
bin/elasticsearch-plugin install discovery-azure-classic
bin/elasticsearch-plugin install discovery-gce

--------------------------------------------

1.1、获取所有可以监控的信息

curl -XGET 'http://172.18.68.11:9200/_cat?pretty'  

2、安装head插件(es-5.3.1测试可用)

(1)先安装nodejs插件,因为head插件依赖于nodejs插件
# curl -sL https://rpm.nodesource.com/setup_8.x | bash -
# yum install -y nodejs

(2)验证node插件安装结果
[root@localhost soft]# node -v
v8.15.0
[root@localhost soft]# npm -v
6.4.1

(3)下载head插件,将下载好的插件上传到linux中,注意不要放在es目录下,与es目录同级即可
https://github.com/mobz/elasticsearch-head

(4)cd 进入elasticsearch-head-master 文件夹下并安装grunt插件,需要先安装淘宝代理
# cd /application/elasticsearch-head-master/
# npm install -g cnpm --registry=https://registry.npm.taobao.org     #安装淘宝代理
# cnpm install -g grunt-cli       #安装grunt-cli插件
# cnpm install grunt --save-dev   #安装grunt插件
# cnpm install           #安装head插件
# grunt -V               #查看grunt安装结果
grunt-cli v1.3.2
grunt v1.0.3

(5)cd 进入elasticsearch-head-master 文件夹下编辑Gruntfile.js文件:增加hostname属性,设置为*
# cd /application/elasticsearch-head-master/
# vim Gruntfile.js   #找到connect位置并增加【hostname: '*',】

在这里插入图片描述

(6)cd 进入elasticsearch-head-master 文件夹下,编辑_site/app.js 文件:修改head的连接地址,ip改为es所在服务器的ip
# cd /application/elasticsearch-head-master/
# vim _site/app.js   #找到app.App位置并修改ip地址

在这里插入图片描述

(7)编辑elasticsearch.yml并增加以下两行 ,允许head插件访问elasticsearch
# vi /application/elasticsearch-5.3.1/config/elasticsearch.yml 
http.cors.enabled: true  
http.cors.allow-origin: "*"
    
(8)进入elasticsearch-head-master 文件夹下,启动grunt
# grunt server     
Running "connect:server" (connect) task
Waiting forever...
Started connect web server on http://localhost:9100

(9)访问head插件http://172.16.12.33:9100/

在这里插入图片描述
3、es5.3.1安装bigdesk插件

(1)修改es配置文件增加以下两行,并重启es服务
http.cors.enabled: true  
http.cors.allow-origin: "*"
    
(2)下载bigdesk插件,软件放在es同级目录就行,不要放到es目录里面。
git clone https://github.com/hlstudio/bigdesk
cd bigdesk/_site/
python -m SimpleHTTPServer     #启动bigdesk

(3)访问bigdesk插件
http://172.16.12.33:8000

二、elk的报错

1、kibana查询Timelion报错
意思说这一限制防止查询大量的碎片而造成的cpu和内存的紧缺,如果需要取消这个限制需要将【action.search.shard_count.limit】参数调到最大值
在这里插入图片描述
2、解决方法是更具报错去修改{action.search.shard_count.limit} 的值,报错说是查询2301个分配但是受限于1000,所以改成该值大于2301就可以了

(1)修改elasticsearch的查询限制改为3500,如果没有设置用户名和密码可以不用加
curl -u admin:admin -XPUT 'https://localhost:9200/_cluster/settings' -H 'Content-Type: application/json' -d' 
{
    "persistent" : {
        "action.search.shard_count.limit" : "3500"
    }
}
'
(2)查看参数是否更改
[root@localhost ~]# curl   http://172.16.12.33:9200/_cluster/settings
{"persistent":{"action":{"search":{"shard_count":{"limit":"3500"}}}},"transient":{}}

(3)重启kibana(kill -1 相当于reload的效果)
# kill -1 pid
(4)再次查询kibana的Timelion,如果还报错那就继续改大(但是这样做会增加cpu和内存的消耗)

3、Discover: Request Timeout after 30000ms

#需要调整JVM(java虚拟机)的内存栈(zhan四声)
# vim /application/elasticsearch-5.3.1/config/jvm.options
#改成4G
-Xms4g
-Xmx4g

4、有时候没有索引是应为日志里面没有写入内容可以手动写内容,还有一种原因是filebeat之前没有卸载干净

三、logstash的具体用法

1、logstash的基本插件讲解

(1)标准输入插件
input{                                       #这是一个标准输入插件    
    stdin{                                   #只写这个表示从键盘输入【标准输入】
    }
    
    stdin{                                   #代表标准输入的意思
        add_field => {"name" => "elk"}       #添加固定字段[可选]
        codec => "plain"                     #编解码器[可选]
        tags => ["test"]                     #标签的意思,test表示这是一个宿主[可选]
    }
    file{                                    #表示从文件读取,file插件能够实时的读取文件里面的信息类似tail -f的功能
        type => "log"                        #自定义类型划分
        path => "/var/log/access.log"        #表示日志从哪里读取,支持收集多个日志:path => ["/xxx/xxx","/xxx/xxx"]
        start_position => "beginning"        #表示从文件什么位置读取数据,beginning表示文件开头,默认是end,end表示从当前位置
    }
    elasticsearch {                          #表示从elasticsearch读取
        index => "nginx_error_10.1.1.10-%{+yyyy.MM.dd}"         #生成索引名字,kibana创建索引的时候添加这个名字
        hosts => ["10.1.2.200:9200"]                            #指定elasticsearch的访问地址
    }
}

(2)过滤器插件
filter{
    grok{                                            #grok插件,用于过滤需要的东西,需要掌握正则匹配
    }
    mutate{                                          #也是过滤器插件,将grok过滤出的字段进行数据转换
    }
    split{                                           #也是过滤器插件,对数据进行拆分
    }
    
    date {                                                      #时间字段
          match => ["time", "yyyy-MM-dd HH:mm:ss,SSS"]          #生成logstash自己的时间字段
    }
}


(3)标准输出插件
output{                                      #这是一个标准输出插件
    stdout{                                  #输出到屏幕
        codec => rubydebug                   #用于调试
    }
    elasticsearch {                          #输出到elasticsearch
        index => "nginx_error_10.1.1.10-%{+yyyy.MM.dd}"         #生成索引名字,kibana创建索引的时候添加这个名字
        hosts => ["10.1.2.200:9200"]                            #指定elasticsearch的访问地址
    }
    file{                                                       #输出到文件
        path => "/var/log/test.txt"
    }
}

2、logstash之 if语句的使用

(1)例一
#分别读取系统日志和安全日志
input{
    file{
        path => "/var/log/messages"
        type => "system-log"
    }
    
    file{
        path => "/var/log/secure"
        type => "secure-log"
    }
}

filter{
}

#判断input中的type匹配哪个日志就将日志写入到对应的索引中
output{
    if [type] == "system-log" {
        elaticsearch {
            hosts => ["172.16.12.33:9200"]
            index => "system-log-%{+YYYY.MM.dd}"    #系统日志写在这个索引
        }
    }
    
    if [type] == "secure-log" {
        elaticsearch {
            hosts => ["172.16.12.33:9200"]
            index => "secure-log-%{+YYYY.MM.dd}"     #安全日志写在这个索引
        }
    }
}

(2)例二
input{
        file{                                        #收集apache日志
                type => "apache-access"
                path => "/etc/http/logs/access.log"
                start_position => "beginning"
        }

        file {                                       #收集nginx日志
                type => "nginx-access"
                path => "/etc/nginx/logs/access.log"
                start_position => "beginning"
        }
}


filter {
    if [type] == "apache-access" {
        grok {
            match => { "message" => "%{COMBINEDAPACHELOG}" }
        }    
    }
}


output{
       if [type] == "apache-access" {             #把apache日志发送到elk
            elasticsearch {
                index => "messsage-access-172.16.12.33-%{+yyyy.MM.dd}"
                hosts => ["172.16.12.33:9200"]
            }
        }
    
       if [type] == "nginx-access" {             #把nginx日志发送到redis
            redis {
                host => "172.16.12.33"
                port => "6379"
                db => "6"
                data_type => "list"
                key => "nginx-log"
            }
       }
}

3、kibana中每一个日志都叫一个事件,比如下图中有3个事件
在这里插入图片描述
3.1、我们可以自定义事件,比如当遇到日志中有【 符号的就另起一个事件

(1)自定义logstash配置文件
[root@localhost shell]# cat /application/logstash-5.3.1/config/codec.conf 
input{
    stdin {                               #从屏幕输入内容
        codec => multiline{               #codec是一种编码插件
            pattern => "^\["              #匹配到以[开头
            negate => true                #就启动新的事件
            what => "previous"            #将之前的数据归为新的事件中去
        }
    }
}

filter{
}


output{
    stdout {                               #将收集的日志信息输出到屏幕
        codec => rubydebug
    }
}

(2)验证
# ./bin/logstash -f /application/logstash-5.3.1/config/codec.conf   #启动logstash
hello world                     #输入了一个"hello world"并没有输出事件
hi Mr.wen                       #然后又输入了一个 "hi Mr.wen" 还是没有输出事件
[                               #直到我输入了一个[ 才生成了新的事件
{
    "@timestamp" => 2019-01-23T09:45:43.409Z,
      "@version" => "1",
          "host" => "localhost.localdomain",
       "message" => "hello world\nhi Mr.wen",   #并且将之前的数据写入到新的事件当中
          "tags" => [
        [0] "multiline"
    ]
}
hello world2        #我又输入了一个“hello world2”
[                   #当再次遇到中括号时[ 又生成了一个新的事件,并将hello world2写入新的事件中
{
    "@timestamp" => 2019-01-23T09:46:20.719Z,
      "@version" => "1",
          "host" => "localhost.localdomain",
       "message" => "[\nhello world2",     
          "tags" => [
        [0] "multiline"
    ]
}
    
(3)那么codec用于什么情况呢?比如有些日志在打印的时候会一下子打印多行,而logstash是一行一行读取并记录事件的,这就会造成通过kibana查看日志的时候很不友好,就拿kibana自身的日志来说,下面的日志其实是一行日志,但是logstash会多行显示,这个时候就用到codec编码插件了,这个日志是以{}来开头结尾的,那么就匹配以{开头的记录到一个事件中就可以了
codec语法:
input{
    file{
        path => "/application/kibana-5.3.1-linux-x86_64/logs/kibana.log"
        type => "kibana-log"
        start_position => "beginning"

        codec => multiline{            #使用code编码插件定义事件
            pattern => "^\{"           #只要匹配到以{开头的行
            negate => true             #就启动新的事件
            what => "previous"         #将之前的内容记录到事件
        }
    }
}

日志例子:
{"type":"ops","@timestamp":"2019-01-24T06:26:27Z","tags":[],"pid":5284,"os":{"load":[0.07666015625,0.337890625,0.2900390625],"mem":{"total":3472035840,"free":160493568},"uptime":16644},"proc":{"uptime":131.896,"mem":{"rss":80441344,"heapTotal":65286144,"heapUsed":50450224,"external":27834},"delay":0.4665459990501404},"load":{"requests":{},"concurrents":{"5601":0},"responseTimes":{},"sockets":{"http":{"total":0},"https":{"total":0}}},"message":"memory: 48.1MB uptime: 0:02:12 load: [0.08 0.34 0.29] delay: 0.467"}

4、nginx日志转换成json格式(json格式可以把nginx日志每个字段单独分开)

(1)#修改nginx配置文件并增加以下格式的nginx日志(前面是自定义注释,后面是日志变量)
log_format json '{ "@timestamp": "$time_iso8601", '         #elk系统时间
                         '"time": "$time_iso8601", '        #日志时间
                         '"remote_addr": "$remote_addr", '  #客户端ip(有可能是代理服务器ip)
                         '"remote_user": "$remote_user", '  #客户端用户名
                         '"body_bytes_sent": "$body_bytes_sent", '   #发送给客户端文件内容的大小
                         '"request_time": "$request_time", '         #请求某个页面所使用的时间
                         '"status": "$status", '                     #HTTP状态码
                         '"host": "$host", '                         #客户端在浏览器输入的域名或地址
                         '"request": "$request", '                   #请求的url内容
                         '"request_method": "$request_method", '     #请求方法(比如:get post HEAD等等)
                         '"uri": "$uri", '
                         '"http_referrer": "$http_referer", '        #url跳转来源
                         '"body_bytes_sent": "$body_bytes_sent", '   #发送给客户端文件内容的大小
                         '"http_x_forwarded_for": "$http_x_forwarded_for", '  #客户端的真实ip地址(客户端第一个出口的公网ip)
                         '"http_user_agent": "$http_user_agent" '    #访问者用的什么操作系统和浏览器
                    '}';
      access_log  /var/log/nginx/json_access.log  json; #然后增加一个json格式的nginx日志
    
(2)#重启nginx,访问并查看日志输出
# /etc/init.d/nginx restart
# ab -n 1000 -c 1 http://127.0.0.1/
# tail -f /var/log/nginx/json_access.log    #日志输出如下格式
--------------------------------------------------------
{ "@timestamp": "2019-01-29T10:40:02+08:00", "time": "2019-01-29T10:40:02+08:00", "remote_addr": "172.16.12.33", "remote_user": "-", "body_bytes_sent": "612", "request_time": "0.000", "status": "200", "host": "172.16.12.33", "request": "GET / HTTP/1.0", "request_method": "GET", "uri": "/index.html", "http_referrer": "-", "body_bytes_sent":"612", "http_x_forwarded_for": "-", "http_user_agent": "ApacheBench/2.3" }
----------------------------------------------------------

(3)#配置logstash
input{
    file{
        path => "/var/log/nginx/json_access.log"           #这里调用nginx日志
        type => "access-log"
        codec => "json"                #这里用来指定json格式的nginx日志
    }

}

filter{
}

output{
    stdout {                               #输出到屏幕便于调试
        codec => rubydebug
    }

(4)#查看logstash输出效果,可以看到这些字段是分开的。
# ab -n 10 -c 1 http://172.16.12.33/      #通过ab命令先往nginx里面输入10条访问日志做测试

#查看logstash输出到屏幕的结果
{
                 "request" => "GET / HTTP/1.0",
         "body_bytes_sent" => "612",
          "request_method" => "GET",
                    "type" => "access-log",
                     "uri" => "/index.html",
         "http_user_agent" => "ApacheBench/2.3",
             "remote_user" => "-",
                    "path" => "/var/log/nginx/access.log",
              "@timestamp" => 2019-01-29T06:39:35.000Z,
            "request_time" => "0.000",
                    "host" => "172.16.12.33",
    "http_x_forwarded_for" => "-",
                "@version" => "1",
           "http_referrer" => "-",
                    "time" => "2019-01-29T14:39:35+08:00",
              "remote_add" => "172.16.12.33",
                  "status" => "200"
}

(5)然后就可以写入elasticsearch里面了
input{
    file{
        path => "/var/log/nginx/access.log"
        type => "access-log"
        codec => "json"                   #这里用来指定json格式的nginx日志
    }

}

filter{
}

output{
    stdout {
        codec => rubydebug
    }

    elasticsearch {
        index => "json_access_172.16.12.33-%{+yyyy.MM.dd}"
        hosts => ["172.16.12.33:9200"]
    }
}

apache和tomcat改为转换成json格式的方法: https://blog.csdn.net/zypzilong/article/details/77763520

5、通过logstash监听并收集系统日志

(1)配置logstash
[root@localhost config]# cat syslog.conf 
input{
    syslog {
        type => "system-syslog"
        port => "514"                    #定义syslog的端口号
    }
}

output{
    stdout {
        codec => rubydebug
    }
}

(2)启动logstash并查看514端口已经处于监听状态
# ./bin/logstash -f /application/logstash-5.3.1/config/syslog.conf
# netstat -npult|grep java
tcp        0      0 ::ffff:127.0.0.1:9600       :::*                        LISTEN      2991/java           
tcp        0      0 :::514                      :::*                        LISTEN      2991/java           
udp        0      0 :::514                      :::*                                    2991/java 


(3)修改系统日志配置文件rsyslog.conf(centos6.4以后syslog.conf改为了rsyslog.conf)
# tail -1  /etc/rsyslog.conf 
*.* @@172.16.12.33:514        #表示将所有类型的系统日志都提交到logstash

(4)查看logstash已经将系统日志输出到屏幕
{
          "severity" => 6,
           "program" => "kernel",
           "message" => "imklog 5.8.10, log source = /proc/kmsg started.\n",
              "type" => "system-syslog",
          "priority" => 6,
         "logsource" => "localhost",
        "@timestamp" => 2019-01-31T07:26:32.000Z,
          "@version" => "1",
              "host" => "172.16.12.33",
          "facility" => 0,
    "severity_label" => "Informational",
         "timestamp" => "Jan 31 15:26:32",
    "facility_label" => "kernel"
}
{
          "severity" => 6,
           "program" => "rsyslogd",
           "message" => "[origin software=\"rsyslogd\" swVersion=\"5.8.10\" x-pid=\"3106\" x-info=\"http://www.rsyslog.com\"] start\n",
              "type" => "system-syslog",
          "priority" => 46,
         "logsource" => "localhost",
        "@timestamp" => 2019-01-31T07:26:32.000Z,
          "@version" => "1",
              "host" => "172.16.12.33",
          "facility" => 5,
    "severity_label" => "Informational",
         "timestamp" => "Jan 31 15:26:32",
    "facility_label" => "syslogd"
}

(5)logger命令可以产生系统日志
# logger hehe

6、如果想把某些日志添加到某个索引里面去,或者有丢失的日志可以通过tcp插件补上去

(1)增加一个tcp.conf
# vi /application/logstash-5.3.1/config/tcp.conf
    }
input{
    tcp {
        type => "tcp"          #监听类型
        port => "6666"         #监听端口
        mode => "server"       #告诉其他服务器都向本机提交数据,本机是服务端
    }
}

output{
    stdout {
        codec => rubydebug     #先输出到屏幕,测试没问题再改成elasticsearch
    }
}

(2)启动并查看监听端口
# /application/logstash-5.3.1/bin/logstash -f /application/logstash-5.3.1/config/tcp.conf 
# netstat -npult|grep java          
tcp        0      0 :::6666       :::*        LISTEN      4601/java 

(3)然后在任意服务器上都可以使用nc命令补交想要提交的日志
# echo "hehe" |nc 172.16.12.33 6666
# nc 172.16.12.33 6666 < /etc/resolv.conf   #或这样

(4)查看效果
{
    "@timestamp" => 2019-02-12T03:15:55.996Z,
          "port" => 47497,
      "@version" => "1",
          "host" => "172.16.12.33",
       "message" => "hehe",                         #这里
          "type" => "tcp"
}

{
    "@timestamp" => 2019-02-12T03:17:00.092Z,
          "port" => 47498,
      "@version" => "1",
          "host" => "172.16.12.33",
       "message" => "nameserver 202.106.0.20",      #这里
          "type" => "tcp"
}

四、grok的用法之apache插件

一、logstash之filter插件之grok
介绍:grok正则表达式可以自定义过滤日志里面的字段,不像nginx可以直接支持json,所以需要使用grok来为apache日志过滤字段。
注意:线上一般不用grok,主要原因如下
1.grok非常影响性能
2.不灵活,除非你很懂ruby
线上的用法:
logstash [收集数据交给] redis [再将数据交给python处理] python [代替grok将筛选好的数据交给] elasticsearch
1、logstash内置设定好的grok正则表达式存放位置(这里可以参考)

# pwd
/application/logstash-5.3.1/vendor/bundle/jruby/1.9/gems/logstash-patterns-core-4.1.0/patterns/
[root@localhost patterns]# ll
total 112
-rw-rw-r-- 1 root root  1831 Apr 18  2017 aws
-rw-rw-r-- 1 root root  4831 Apr 18  2017 bacula
-rw-rw-r-- 1 root root   260 Apr 18  2017 bind
-rw-rw-r-- 1 root root  2154 Apr 18  2017 bro
-rw-rw-r-- 1 root root   879 Apr 18  2017 exim
-rw-rw-r-- 1 root root 10095 Apr 18  2017 firewalls
-rw-rw-r-- 1 root root  5335 Apr 18  2017 grok-patterns   
-rw-rw-r-- 1 root root  3251 Apr 18  2017 haproxy
-rw-rw-r-- 1 root root   980 Apr 18  2017 httpd           #apache的
-rw-rw-r-- 1 root root  1265 Apr 18  2017 java
-rw-rw-r-- 1 root root  1087 Apr 18  2017 junos
-rw-rw-r-- 1 root root  1037 Apr 18  2017 linux-syslog
-rw-rw-r-- 1 root root    74 Apr 18  2017 maven
-rw-rw-r-- 1 root root    49 Apr 18  2017 mcollective
-rw-rw-r-- 1 root root   190 Apr 18  2017 mcollective-patterns
-rw-rw-r-- 1 root root   614 Apr 18  2017 mongodb
-rw-rw-r-- 1 root root  9597 Apr 18  2017 nagios
-rw-rw-r-- 1 root root   142 Apr 18  2017 postgresql
-rw-rw-r-- 1 root root   845 Apr 18  2017 rails
-rw-rw-r-- 1 root root   224 Apr 18  2017 redis
-rw-rw-r-- 1 root root   188 Apr 18  2017 ruby
-rw-rw-r-- 1 root root   404 Apr 18  2017 squid

2、比如官网给出的例子,将apache的日志字段进行拆分
官方文档:https://www.elastic.co/guide/en/logstash/current/plugins-filters-grok.html
参考文档:https://www.cnblogs.com/liaojiafa/p/6159591.html?utm_source=itdadao&utm_medium=referral

(1)以下为apache日志字段,我需要使用grok插件将这个字段拆分显示
192.168.10.100 GET /index.html 15824 0.043

(2)配置的grok过滤插件
# vim /etc/logstash/conf.d/grok.conf
input{
    stdin{}         #这里什么都不写表示接收从键盘上输入的内容
}
 
filter {
  grok {
    match => { "message" => "%{IP:client} %{WORD:method} %{URIPATHPARAM:request} %{NUMBER:bytes} %{NUMBER:duration}" }
  }
}
 
 
output{
   stdout{
        codec => rubydebug       #这里讲处理完的数据输出到屏幕
   }
}

#其中对应关系(更多grok变量可以查看logstash内置grok)
#grok变量解析:%{grok变量名:变量名解释}  
%{IP:client}       对应 192.168.10.100
%{WORD:method}     对应 GET
%{URIPATHPARAM:request} 对应/index.html
%{NUMBER:bytes}    对应 15824
%{NUMBER:duration} 对应 0.043


(3)启动logstahs
# /application/logstash-5.3.1/bin/logstash -f /application/logstash-5.3.1/config/apache_grok.conf
192.168.10.100 GET /index.html 15824 0.043   #把这段字段敲上去,logstash就会自动分割内容(注意:不能多一个空格也不能少一个空格)
{
      "duration" => "0.043",
       "request" => "/index.html",
    "@timestamp" => 2019-02-14T06:30:21.435Z,
        "method" => "GET",
         "bytes" => "15824",
      "@version" => "1",
          "host" => "localhost.localdomain",
        "client" => "192.168.10.100",
       "message" => "192.168.10.100 GET /index.html 15824 0.043"
}


(4)以上是举的一个官网的例子来说明grok的用法,下面来配置线上apache日志
#首先查看我们使用的apache日志是combined这个日志
# vi /etc/httpd/conf/httpd.conf
LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined
CustomLog logs/access_log combined

#然后配置我们的logstash
# vim /application/logstash-5.3.1/config/apache_grok.conf
input{
    file{
        path => "/etc/httpd/logs/access_log" 
        type => "apache-log"
    }     
}            

filter{
    grok {
    match => { "message" => "%{COMBINEDAPACHELOG}" }     #这里我们使用logstash自带的combined apachelog模块就可以了
  } 
    
}
     
output{
    stdout { 
        codec => rubydebug
    }
    
#查看输出结果,apache日志已经被拆分了

        "message" => "172.16.12.6 - - [14/Feb/2019:14:48:41 +0800] \"GET / HTTP/1.1\" 304 - \"-\" \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36\"",
        "@timestamp" => 2019-02-14T06:48:42.170Z,
        "request" => "/",                           #请求的url页面
          "agent" => "\"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36\"",                #客户端使用的浏览器
           "auth" => "-",
          "ident" => "-",
           "verb" => "GET",                          #请求的类型
           "type" => "apache-log",
           "path" => "/etc/httpd/logs/access_log",
       "referrer" => "\"-\"",
       "response" => "304",                           #HTTP状态码
       "clientip" => "172.16.12.6",                   #客户端ip
       "@version" => "1",                             
           "host" => "localhost.localdomain",          #请求的主机
    "httpversion" => "1.1",                            #http版本
      "timestamp" => "14/Feb/2019:14:48:41 +0800"      #请求时间
}
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

会飞的爱迪生

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值