Rsyslog+kafka+ELK(集群)部署

原文链接: http://www.suixinl.top/index.php/archives/39/

目前公司做等保,需要有日志审计,初步考虑使用rsyslog把所有服务器的日志收集起来。同时考虑到我们运维过程中也要查询日志,要把rsyslog收集的日志可以统一界面来查询使用
收集的日志类型:系统日志,mysql日志,防火墙,waf,F5日志,sftp,smtp日志等
开源产品:Rsyslog、Kafka、ELK
处理流程为:Vm Rsyslog--> Rsyslog Server --omkafka--> Kafka --> Logstash --> Elasticsearch --> Kibana
ps:omkafka模块在rsyslog v8.7.0之后的版本才支持

环境:

ServerIPADDRApplicaiton
ELK Node110.10.27.125zookeeper kafka elasticsearch logstash kibana
ELK Node110.10.27.126zookeeper kafka elasticsearch logstash
ELK Node110.10.27.127zookeeper kafka elasticsearch logstash
Rsyslog server10.10.27.121Rsyslog server
Rsyslog Node10.10.27.122Rsyslog client

1、安装docker和docker-compose

本人使用的是RHEL系统,使用redhat的extras源安装docker

yum install -y docker
wget https://github.com/docker/compose/releases/download/1.25.5/docker-compose-Linux-x86_64
mv docker-compose-Linux-x86_64 /usr/bin/docker-compose

2、拉取需要用到的镜像

docker pull zookeeper:3.4.13
docker pull wurstmeister/kafka
docker pull elasticsearch:7.7.0
docker pull daocloud.io/library/kibana:7.7.0
docker pull daocloud.io/library/logstash:7.7.0
docker tag wurstmeister/kafka:latest kafka:2.12-2.5.0
docker tag docker.io/zookeeper:3.4.13 docker.io/zookeeper:3.4.13
docker tag daocloud.io/library/kibana:7.7.0 kibana:7.7.0
docker tag daocloud.io/library/logstash:7.7.0 logstash:7.7.0

3、准备应用的配置文件

mkdir -p /data/zookeeper
mkdir -p /data/kafka
mkdir -p /data/logstash/conf
mkdir -p /data/es/conf 
mkdir -p /data/es/data
chmod 777 /data/es/data
mkdir -p /data/kibana

~]# cat /data/es/conf/elasticsearch.yml 
cluster.name: es-cluster
network.host: 0.0.0.0
node.name: master1    #每台节点需要更改此node.name,e.g master2,master3
http.cors.enabled: true
http.cors.allow-origin: "*"
node.master: true
node.data: true
network.publish_host: 10.10.27.125      #每台节点需要更改为本机IP地址
discovery.zen.minimum_master_nodes: 1
discovery.zen.ping.unicast.hosts: ["10.10.27.125","10.10.27.126","10.10.27.127"]
cluster.initial_master_nodes: ["10.10.27.125","10.10.27.126","10.10.27.127"]

~]# cat /data/logstash/conf/logstash.conf 
input{
   kafka{
        topics => ["system-log"]   #必须与rsyslog的topic统一
        bootstrap_servers => ["10.10.27.125:9092,10.10.27.126:9092,10.10.27.127:9092"]
    }
}
output{
    elasticsearch {
        hosts => ["10.10.27.125:9200","10.10.27.126:9200","10.10.27.127:9200"]
        index => "system-log-%{+YYYY.MM.dd}"
        }
   stdout {
    codec => rubydebug
  }
}

~]# cat /data/kibana/conf/kibana.yml 
#
# ** THIS IS AN AUTO-GENERATED FILE **
#

# Default Kibana configuration for docker target
server.name: kibana
server.host: "0.0.0.0"
elasticsearch.hosts: [ "http://10.10.27.125:9200","http://10.10.27.126:9200","http://10.10.27.127:9200" ]
monitoring.ui.container.elasticsearch.enabled: true

 

4、编辑docker-compose.yml配置

~]# mkdir /data/elk
~]# cat /data/elk/docker-compose.yml
version: '2.1'   #必须2.1往上,不然会报版本格式错误
services:
  elasticsearch:
    image: elasticsearch:7.7.0
    container_name: elasticsearch
    environment:
      ES_JAVA_OPTS: -Xms1g -Xmx1g
    network_mode: host
    volumes:
      - /data/es/conf/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
      - /data/es/data:/usr/share/elasticsearch/data
    ports:
      - 9200:9200
  kibana:
    image: kibana:7.7.0
    container_name: kibana
    links:
      - elasticsearch
    depends_on:
      - elasticsearch   #kibana在elasticsearch启动之后再启动
    volumes:
      - /data/kibana/conf/kibana.yml:/usr/share/kibana/config/kibana.yml
    ports:
      - 5601:5601
  logstash:
    image: logstash:7.7.0
    container_name: logstash
    volumes:
      - /data/logstash/conf/logstash.conf:/usr/share/logstash/pipeline/logstash.conf
    depends_on:
      - elasticsearch
    links:
      - elasticsearch:es
    ports:
      - 4560:4560

  zookeeper:
    image: zookeeper:3.4.13
    container_name: zookeeper
    environment:
      ZOO_PORT: 2181
      ZOO_DATA_DIR: /data/zookeeper/data
      ZOO_DATA_LOG_DIR: /data/zookeeper/logs
      ZOO_MY_ID: 1    #三台机器做集群的话,其他2台需要更改此ID,e.g 2,3
      ZOO_SERVERS: "server.1=10.10.27.125:2888:3888 server.2=10.10.27.126:2888:3888 server.3=10.10.27.127:2888:3888"
    volumes:
      - /data/zookeeper:/data/zookeeper
    network_mode: host
    ports:
      - 2181:2181
  
  kafka:
    image: kafka:2.12-2.5.0
    container_name: kafka
    depends_on:
      - zookeeper
    environment:
      KAFKA_BROKER_ID: 1  #kafka 的 broker 集群标识, 每台节点 broker 不一样,其他2台更改此ID
      KAFKA_PORT: 9092
      KAFKA_HEAP_OPTS: "-Xms1g -Xmx1g"
      KAFKA_HOST_NAME: 10.10.27.125        #其他2台更改为自己IP地址
      KAFKA_ADVERTISED_HOST_NAME: 10.10.27.125    #其他2台更改为自己IP地址
      KAFKA_LOG_DIRS: /data/kafka
      KAFKA_ZOOKEEPER_CONNECT: 10.10.27.125:2181,10.10.27.126:2181,10.10.27.127:2181
    network_mode: host
    volumes:
      - /data:/data

5、部署ELK

#开始部署(三台节点分别修改配置文件和docker-compose配置)
~]# docker-compose up -d
#停止运行的容器实例
~]# docker-compose stop
#单独启动容器
~]# docker-compose up -d kafka

6、验证集群状态

(1) 验证zookeeper:

]# docker exec -it zookeeper bash
bash-4.4# zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /conf/zoo.cfg
Mode: follower

(2) 验证kafka:

]# docker exec -it kafka bash
bash-4.4# kafka-topics.sh --list --zookeeper 10.10.27.125:2181
__consumer_offsets
system-log

(3) 验证elasticsearch

]# curl '10.10.27.125:9200/_cat/nodes?v'
ip            heap.percent ram.percent cpu load_1m load_5m load_15m node.role master name
10.10.27.126           57          81   0    0.37    0.15     0.09 dilmrt    *      master2
10.10.27.125           34          83   0    0.11    0.10     0.06 dilmrt    -      master1
10.10.27.127           24          81   0    0.03    0.06     0.06 dilmrt    -      master3

(4) 验证kibana

浏览器打开http://10.10.27.125:5601

7、部署rsyslog把日志导入ELK

(1) rsyslog服务端

~]# cat /etc/rsyslog.conf 
# Provides UDP syslog reception
$ModLoad imudp
$UDPServerRun 514

# Provides TCP syslog reception
$ModLoad imtcp
$InputTCPServerRun 514

~]# cat /etc/rsyslog.d/default.conf
#### GLOBAL DIRECTIVES ####
# Use default timestamp format  # 使用自定义的日志格式
$ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat
$template myFormat,"%timestamp% %fromhost-ip% %syslogtag% %msg%\n"
$ActionFileDefaultTemplate myFormat

# 根据客户端的IP单独存放主机日志在不同目录,rsyslog需要手动创建
$template RemoteLogs,"/data/rsyslog/%fromhost-ip%/%fromhost-ip%_%$YEAR%-%$MONTH%-%$DAY%.log"
# 排除本地主机IP日志记录,只记录远程主机日志
:fromhost-ip, !isequal, "127.0.0.1" ?RemoteLogs
~]# systemctl restart rsyslog

为了把rsyslog server收集的日志数据导入到ELK中,需要在rsyslog server使用到omkafka的模块

~]# yum -y install rsyslog-kafka
~]# cat /etc//rsyslog.d/kafka.conf
# 加载omkafka和imfile模块
module(load="omkafka")
module(load="imfile")
 
# nginx template
template(name="SystemlogTemplate" type="string" string="%hostname%<-+>%syslogtag%<-+>%msg%\n")
 
# ruleset
ruleset(name="systemlog-kafka") {
    #日志转发kafka
    action (
        type="omkafka"
    template="SystemlogTemplate"
        topic="system-log"
        broker="10.10.27.125:9092,10.10.27.126:9092,10.10.27.127:9092"
    )
}

input(type="imfile" Tag="Systemlog" File="/data/rsyslog/*/*.log" Ruleset="systemlog-kafka"

~]# systemctl restart rsyslog

2、 Rsyslog客户端

~]# cat /etc/rsyslog.conf #追加一行
*.*    @10.10.27.121:514
#所有日志通过UDP传输给rsyslog server
~]# systemctl restart rsyslog

至此,rsyslog准备完毕,验证/data/rsyslog下是否产生日志文件,验证ELK内是否出现索引,创建索引后看到日志内容

 

 

  • 1
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值