elk引入redis,kafka

Filebeat 
Filebeat -> Logstash -> Elasticsearch-> Kibana
Filebeat
Logstash性能不足的时候,可能会丢失一些日志数据
在使用filebeat时,配置文件都是直接拷贝的,而扩容Logstash,Filebeat的配置就会不一致,会比较难管控

架构优化
Filebeat                                       Logstash
Filebeat  ->  Redis、Kafka ->     Logstash(正则) -> Elasticsearch(入库) -> Kibana展现
Filebeat                                       Logstash
引入Redis或kafka,对日志进行缓存。

安装Redis

#Redis服务器搭建
yum install -y wget net-tools gcc gcc-c++ make tar openssl openssl-devel cmake
cd /usr/local/src
wget 'http://download.redis.io/releases/redis-4.0.9.tar.gz'
tar -zxf redis-4.0.9.tar.gz
cd redis-4.0.9
make
mkdir -pv /usr/local/redis/conf /usr/local/redis/bin
cp src/redis* /usr/local/redis/bin/
cp redis.conf /usr/local/redis/conf

#更改Redis配置(daemon、dir、requirepass)
密码设置为luoshuyu
[root@k8s-node01 redis-4.0.9]# grep -Ev '^$|^#' redis.conf 
bind 0.0.0.0
protected-mode yes
port 6379
tcp-backlog 511
timeout 0
tcp-keepalive 300
daemonize yes
supervised no
pidfile /var/run/redis_6379.pid
loglevel notice
logfile ""
databases 16
always-show-logo yes
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename dump.rdb
dir /tmp
slave-serve-stale-data yes
slave-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-disable-tcp-nodelay no
slave-priority 100
 requirepass luoshuyu
lazyfree-lazy-eviction no
lazyfree-lazy-expire no
lazyfree-lazy-server-del no
slave-lazy-flush no
appendonly no
appendfilename "appendonly.aof"
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
aof-use-rdb-preamble no
lua-time-limit 5000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events ""
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
aof-rewrite-incremental-fsync yes

 

#Filebeat配置写入到Redis
filebeat.inputs:
- type: log
  tail_files: true
  backoff: "1s"
  paths:
      - /usr/local/nginx/logs/access.json.log
  fields:
    type: access
  fields_under_root: true
output:
  redis:
      hosts: ["192.168.1.104"]
      port: 6379
      password: 'luoshuyu'
      key: 'access'
#Logstash从Redis中读取数据 只改input
input {
  redis {
    host => '192.168.1.104'
    port => 6379
    key => "access"
    data_type => "list"
    password => 'luoshuyu'
  }
}
#Redis是完全透明的 

引入Kafka
Kafka依赖于Zookkeeper

tar -zxf zookeeper-3.4.13.tar.gz
mv zookeeper-3.4.13 /usr/local/
cp /usr/local/zookeeper-3.4.13/conf/zoo_sample.cfg  /usr/local/zookeeper-3.4.13/conf/zoo.cfg

#ZK的启动
#更改配置
clientPortAddress=0.0.0.0
#启动
/usr/local/zookeeper-3.4.13/bin/zkServer.sh start

cd /usr/local/src/
tar -zxf kafka_2.11-2.1.1.tgz
mv kafka_2.11-2.1.1 /usr/local/kafka_2.11
#Kafka的启动
#更改kafka的配置:更改监听地址、更改连接zk的地址
#前台启动
/usr/local/kafka_2.11/bin/kafka-server-start.sh /usr/local/kafka_2.11/config/server.properties
#后台启动kafka
nohup /usr/local/kafka_2.11/bin/kafka-server-start.sh /usr/local/kafka_2.11/config/server.properties >/tmp/kafka.log 2>&1 &
#Filebeat日志发送到Kafka
filebeat.inputs:
- type: log
  tail_files: true
  backoff: "1s"
  paths:
      - /usr/local/nginx/logs/access.json.log
  fields:
    type: access
  fields_under_root: true

output:
  kafka:
    hosts: ["192.168.1.104:9092"] #可以使集群
    topic: luoshuyu

#Logstash读取Kafka #只改input
input {
  kafka {
    bootstrap_servers => "192.168.1.104:9092"
    topics => ["luoshuyu"]
    group_id => "luoshuyu"
    codec => "json"
  }
}
#Kafka查看队列信息
#查看Group 
./kafka-consumer-groups.sh  --bootstrap-server 192.168.1.104:9092 --list
#查看队列
./kafka-consumer-groups.sh  --bootstrap-server 192.168.1.104:9092 --group luoshuyu --describe

elk7


ELK7之前的版本,Kibana都是开放访问
ELK7对增加了认证功能,设置用户名和密码 


#Kibana7的安装跟kibana6一模一样

#es安装
cd /usr/local/src/
tar -zxf elasticsearch-7.1.1-linux-x86_64.tar.gz -C /usr/local/
#环境变量设置
export PATH=$PATH:/usr/local/elasticsearch-7.1.1/bin

#Elasticsearch配置elasticsearch.yml
path.data: data
path.logs: logs
network.host: 127.0.0.1
http.port: 9200
xpack.security.enabled: true  
discovery.type: single-node

#JVM的内存限制更改jvm.options
-Xms200M
-Xmx200M

#Elasticsearch的启动,
useradd -s /sbin/nologin elk
chown -R elk:elk /usr/local/elasticsearch-7.1.1/
su - elk -s /bin/bash
elasticsearch -d

#7版本的Elasticsearch需要设置密码
elasticsearch-setup-passwords interactive
200803854

#验证启动是否成功
curl -u elastic:shijiangepwd 127.0.0.1:9200 #用户名加密码访问
#更改Kibana配置中es密码的配置,重启Kibana,kibana网页需要用es账户登录


#Logstash的安装
cd /usr/local/src
tar -zxf logstash-7.1.1.tar.gz -C /usr/local/
#环境变量设置
export PATH=$PATH:/usr/local/logstash-7.1.1/bin

#Logstash的JVM配置文件更新jvm.options 
-Xms200M
-Xmx200M

#Logstash分析Nginx日志logstash.conf
input {
  file {
    path => "/usr/local/nginx/logs/access.log"
  }
}
filter {
    grok {
        match => {
            "message" => '(?<remote_ip>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}) - - \[(?<timestamp>\S+ \+\d+)\] "(?<method>[A-Z]+) (?<request>\S+) HTTP/\d.\d" (?<status>\d+) (?<bytes>\d+) "[^"]+" "(?<agent>[^"]+)"'
        }
        remove_field => ["message","@version","path"]
    }
    date {
        match => ["timestamp", "dd/MMM/yyyy:HH:mm:ss Z"]
        target => "@timestamp"
    }
}
output {
  elasticsearch {
    hosts => ["http://localhost:9200"]
    user => "elastic" #变化的地方 指定es账户密码
    password => "200803854" #变化的地方,密码
    index => "logstash-%{+YYYY.MM.dd}"
  }
}

 

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值