ELK实时日志分析平台 --(3)扩展 Filebeat

elk3

实验环境

实验机器准备:

主机名IP备注配置
elk1192.168.245.170es、es-head1核1G
elk2192.168.245.171es1核1G
elk3192.168.245.172es、kibana1核2G
elk4192.168.245.173LogStash2核2G

hosts映射参考elk1

安装filebeat

注意:你需要采集哪台服务器上的日志就在哪台机子上进行安装(不需要java环境)

1.安装

上传filebeat-7.17.0-x86_64到elk4的/opt目录下

[root@elk4 opt]# cd /opt/  && wget https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-7.17.0-x86_64.rpm
[root@elk4 opt]# yum -y install filebeat-7.17.0-x86_64.rpm
2.修改配置文件

注:filebeat配置文件非常注重格式缩进两个空格

[root@elk4 opt]# cp /etc/filebeat/filebeat.yml /etc/filebeat/filebeat.yml.bak

# 修改filebeat.yml,修改如下
[root@elk4 opt]# grep -Ev "#|^$" /etc/filebeat/filebeat.yml
filebeat.inputs:
- type: log   # 可以写多个type,具体看后面采集配置
  enabled: true
  paths:
    - /usr/local/nginx/logs/access.log     # 可以写多个,下一行减号开头和原格式一样就行
filebeat.config.modules:
  path: ${path.config}/modules.d/*.yml
  reload.enabled: false
setup.template.settings:
  index.number_of_shards: 1   # 切片数量
setup.kibana:
output.elasticsearch:
  hosts: ["192.168.245.170:9200"]   # 可以多个["IP1","IP2"],下面还有账号密码,如果有得取消注释写好配置
processors:
  - add_host_metadata: ~
  - add_cloud_metadata: ~
  - add_docker_metadata: ~
  - add_kubernetes_metadata: ~
3.启动filebeat
# 先停止logstash
[root@elk4 opt]# systemctl stop logstash

# 启动filebeat
[root@elk4 opt]# systemctl start filebeat
[root@elk4 opt]# systemctl status filebeat
# 去到es集群的head页面(port:9100)刷新出来,如下图

image-20221011114627824

filebeat采集nginx

1.修改配置文件
# 修改filebeat.yml,修改如下
[root@elk4 opt]# grep -Ev "#|^$" /etc/filebeat/filebeat.yml
filebeat.inputs:
- type: log
  enabled: true
  paths:
    - /usr/local/nginx/logs/access.log
  tags: ["nginx"]
filebeat.config.modules:
  path: ${path.config}/modules.d/*.yml
  reload.enabled: false
setup.template.settings:
  index.number_of_shards: 2   
setup.kibana:
setup.ilm.enabled: false
setup.template.name: "nginx2-access"
setup.template.pattern: "nginx2-access-*"
setup.template.enabled: true
setup.template.overwrite: true
output.elasticsearch:
  hosts: ["192.168.245.170:9200"]
  indices:
    - index: "nginx2-access-web-%{+yyyy.MM.dd}"
      when.contains:
        tags: "nginx"
processors:
  - add_host_metadata: ~
  - add_cloud_metadata: ~
  - add_docker_metadata: ~
  - add_kubernetes_metadata: ~
2.重启filebeat
[root@elk4 opt]# systemctl restart filebeat

## 到 ES 中观察, 如果长时间没有索引产生, 试着重启filebeat ,或者刷新一下网页, 促使网页日志文件有新的行产生,又或者访问一下nginx,让其产生日志
# 去到es集群的head页面(port:9100)刷新出来,如下图

image-20220601143249872

filebeat采集系统日志

1.修改配置文件
# 修改filebeat.yml,修改如下
[root@elk4 opt]# grep -Ev "#|^$" /etc/filebeat/filebeat.yml
filebeat.inputs:
- type: log
  enabled: true
  paths:
    - /usr/local/nginx/logs/access.log
    - /usr/local/nginx/logs/nginx.log
  tags: ["nginx"]
- type: log
  enabled: true
  paths:
    - /var/log/messages
  tags: ["system"]
filebeat.config.modules:
  path: ${path.config}/modules.d/*.yml
  reload.enabled: false
setup.template.settings:
  index.number_of_shards: 2
setup.kibana:
setup.ilm.enabled: false
setup.template.name: "nginx2-access"
setup.template.pattern: "nginx2-access-*"
setup.template.enabled: true
setup.template.overwrite: true
output.elasticsearch:
  hosts: ["192.168.245.170:9200"]
  indices:
    - index: "nginx2-access-web-%{+yyyy.MM.dd}"
      when.contains:
        tags: "nginx"
  indices:
    - index: "system-%{+yyyy.MM.dd}"
      when.contains:
        tags: "system"
processors:
  - add_host_metadata: ~
  - add_cloud_metadata: ~
  - add_docker_metadata: ~
  - add_kubernetes_metadata: ~
2.重启filebeat
[root@elk4 opt]# systemctl restart filebeat

# 去到es集群的head页面(port:9100)刷新出来,如下图

image-20221011144008421

filebeat与logstash连用

1.配置文件先关闭es配置项

image-20220530020803141

2.打开logstash配置项

image-20220601152700165

3.修改logstash配置文件
[root@elk4 opt]# cp /etc/logstash/conf.d/access.conf /etc/logstash/conf.d/nginx.conf

[root@elk4 opt]# vim /etc/logstash/conf.d/nginx.conf
input {
    beats {
      port => 5044
      type => "beat-nginx"
      codec => "json"
    }
}
output {
     elasticsearch {
       hosts => ["192.168.245.170:9200"]
       index => "%{type}-%{+yyyy.MM.dd}"
     }
}
4.重启filebeat
[root@elk4 opt]# systemctl restart filebeat
5.指定配置文件启动logstash
[root@elk4 opt]# logstash -f /etc/logstash/conf.d/nginx.conf >> /dev/null &
6.验证
[root@elk4 opt]# netstat -ntpl |grep 5044

image-20220601152216072

去到es集群的head页面(port:9100)刷新出来,如下图

image-20220601152854128

ELK+FileBeat+Redis架构

架构图:

image-20220530021141697

1.上传redis

上传redis-6.2.6.tar.gz到elk4的/opt目录下

[root@elk4 ~]# cd /opt

[root@elk4 opt]# tar xvf redis-6.2.6.tar.gz
2.安装

解压安装到指定路径

[root@elk4 opt]# cd redis-6.2.6
[root@elk4 redis-6.2.6]# make prefix=/usr/local/redis && make install
3.修改redis配置文件
# ls看一下Redis的二进制文件已经在/usr/local/bin下
[root@elk4 redis-6.2.6]# ls /usr/local/bin/
redis-benchmark  redis-check-rdb  redis-sentinel
redis-check-aof  redis-cli        redis-server


# 修改配置文件
[root@elk4 redis-6.2.6]# grep -Ev "#|^$" /opt/redis-6.2.6/redis.conf 
bind 192.168.245.173   # 更改IP
protected-mode yes
port 6379
tcp-backlog 511
timeout 0
tcp-keepalive 300
daemonize yes   # 后台运行
pidfile /var/run/redis_6379.pid
loglevel notice
logfile ""
databases 16
always-show-logo no
set-proc-title yes
proc-title-template "{title} {listen-addr} {server-mode}"
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename dump.rdb
rdb-del-sync-files no
dir ./
replica-serve-stale-data yes
replica-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-diskless-load disabled
repl-disable-tcp-nodelay no
replica-priority 100
acllog-max-len 128
requirepass 123456   # 设置密码
lazyfree-lazy-eviction no
lazyfree-lazy-expire no
lazyfree-lazy-server-del no
replica-lazy-flush no
lazyfree-lazy-user-del no
lazyfree-lazy-user-flush no
oom-score-adj no
oom-score-adj-values 0 200 800
disable-thp yes
appendonly no
appendfilename "appendonly.aof"
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
aof-use-rdb-preamble yes
lua-time-limit 5000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events ""
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
stream-node-max-bytes 4096
stream-node-max-entries 100
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit replica 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
dynamic-hz yes
aof-rewrite-incremental-fsync yes
rdb-save-incremental-fsync yes
jemalloc-bg-thread yes
4.启动redis
[root@elk4 redis-6.2.6]# redis-server /opt/redis-6.2.6/redis.conf 
[root@elk4 redis-6.2.6]# ps -ef | grep redis
root      33504      1  0 13:29 ?        00:00:00 redis-server 192.168.245.173:6379
root      33510  29095  0 13:30 pts/0    00:00:00 grep --color=auto redis
5.验证
[root@elk4 redis-6.2.6]# redis-cli -h 192.168.245.173
192.168.245.173:6379> AUTH 123456
OK
192.168.245.173:6379> ping
PONG
6.修改filebeat配置文件
6.1、关闭logstash配置项

image-20220601161941435

6.2、添加对应的redis配置项
# ------------------------------- redis --------------------------------------
output.redis:
  hosts: ["192.168.245.173:6379"]
  password: "123456"
  key: "nginx"
  db: 0
6.3、关掉一个tags

image-20220530021535769

6.4、修改logstash配置文件
[root@elk4 redis-6.2.6]# vim /etc/logstash/conf.d/redis.conf
input {
    redis {
      port => "6379"
      type => "nginx-redis"
      data_type => "list"
      db => "0"
      key => "nginx"
      password => "123456"
      host => "192.168.245.173"
    }
}
filter{
     date{
        match => ["time_local","dd/MMM/yyyy:HH:mm:ss Z"]
        target=>"@timestamp"        
     }
     geoip{
        source => "remote_addr"
        target => ["geoip"]
        fields => ["city_name","region_name","country_name","ip"]
    }
}
output {
     elasticsearch {
       hosts => ["192.168.245.170:9200"]
       index => "%{type}-%{+yyyy.MM.dd}"
     }
}
6.5、检测配置文件
[root@elk4 conf.d]# logstash -f ./redis.conf -t
6.6、重启服务
# 重启filebeat
[root@elk4 redis-6.2.6]# systemctl restart filebeat

# 需要把之前启动的logstash关闭先,否则会报错
[root@elk4 redis-6.2.6]# nohup logstash -f /etc/logstash/conf.d/redis.conf &   # 指定配置文件启动

# 去到es集群的head页面(port:9100)刷新出来,如下图

image-20220601165023155

7、其它成功案例配置
# Logstash 的配置  ###################################3

# 消息队列–>>Logstash–>>Elasticsearch 

# 这部分是Logstash Indexer的工作,涉及input、filter和output三种插件。
# 在input部分,我们通过redis插件将数据从消息队列中取出来。
# 在output部分,我们通过elasticsearch插件将数据写入Elasticsearch。

# 从redis输入数据

[root@logstash-224 /test]# cat  /etc/logstash/conf.d/file.conf 
input {
   redis {    
       data_type => "list"
       host => "192.168.10.32"
       db => "0"
       port => "6379"
       key => "nginx-21"
       password => "123123"
   }
}

filter{
     date{
        match => ["time_local","dd/MMM/yyyy:HH:mm:ss Z"]
        target=>"@timestamp"        
     }
     geoip{
        source => "remote_addr"
        target => ["geoip"]
        fields => ["city_name","region_name","country_name","ip"]
    }
}

output {
   stdout {}
}





# FileBeat 参考:=====================

## 输出(所有日志合并为同一个key ,不带判断)
output.redis:
  hosts: ["192.168.10.32"]
  ports: 6379
  password: "123123"
  key: "nginx-21"
  db: 0
  
## 输出(日志分别存放在不同的Key,带判断)

output.redis:
  hosts: ["192.168.10.32"]
  ports: 6379
  password: "123123"
  keys:
    - key: "nginx_access"   
      when.contains:
        tags: "access"
    - key: "nginx_error"
      when.contains:
        tags: "error"


# LogStash  参考 :====================

# INPUT 参考(带条件判断)

input {
  redis {
    host => "192.168.10.32"
    port => "6379"
    db => "0"
    password => "123123"
    key => "nginx_access"
    data_type => "list"
  }
  redis {
    host => "192.168.10.32"
    port => "6379"
    db => "0"
    password => "123123"
    key => "nginx_error"
    data_type => "list"
  }
}


# Logstash 的 OutPut  参考(带条件判断)

output {
    stdout {}
    if "access" in [tags] {
      elasticsearch {
        hosts => "http://localhost:9200"
        manage_template => false
        index => "nginx_access-%{+yyyy.MM.dd}"
      }
    }
    if "error" in [tags] {
      elasticsearch {
        hosts => "http://localhost:9200"
        manage_template => false
        index => "nginx_error-%{+yyyy.MM.dd}"
      }
    }
}
db => "0"
password => "123123"
key => "nginx_error"
data_type => "list"

}
}

Logstash 的 OutPut 参考(带条件判断)

output {
stdout {}
if “access” in [tags] {
elasticsearch {
hosts => “http://localhost:9200”
manage_template => false
index => “nginx_access-%{+yyyy.MM.dd}”
}
}
if “error” in [tags] {
elasticsearch {
hosts => “http://localhost:9200”
manage_template => false
index => “nginx_error-%{+yyyy.MM.dd}”
}
}
}


  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值