简单搭建一个ELK系统 (all in)安装
如果测试环境内存过于小。记得 调节jvm的内存参数
[root@ansible01 ~]# ll | grep rpm
-rw-r--r-- 1 root root 322922070 Feb 26 18:17 elasticsearch-7.11.1-x86_64.rpm
-rw-r--r-- 1 root root 34284829 Feb 26 19:44 filebeat-7.11.1-x86_64.rpm
-rw-r--r-- 1 root root 255478021 Feb 26 20:03 kibana-7.11.1-x86_64.rpm
-rw-r--r-- 1 root root 365825086 Feb 26 20:03 logstash-7.11.1-x86_64.rpm
[root@ansible01 ~]# ll | grep tgz
-rw-r--r-- 1 root root 68684719 Mar 2 23:27 kafka_2.12-2.7.0.tgz
filebeat------>kafka ------>logstash----->Elasticsearch---->kibana
filebeat:组件配置文件
filebeat.inputs:
- type: log
enabled: ture
paths:
- /var/log/*/*.log
fields_under_root: true
fields:
system: true
tag: syslog
log_topic: systomlog
output.kafka:
hosts: ["localhost:9092"]
topic: 'test-logs'
partition.round_robin:
reachable_only: false
required_acks: 1
compression: gzip
max_message_bytes: 1000000
单节点安装kafka
- 先下载kafka压缩包(解压不做参数修改)
[root@ansible01 ~]# cd kafka_2.12-2.7.0
[root@ansible01 kafka_2.12-2.7.0]# nohup bin/zookeeper-server-start.sh config/zookeeper.properties >> /root/zookeeper.out 2>&1
[root@ansible01 kafka_2.12-2.7.0]# nohup bin/kafka-server-start.sh config/server.properties >> /root/kafka.out 2>&1
logstash配置文件
input {
kafka {
bootstrap_servers => "localhost:9092"
topics => ["test-logs"]
consumer_threads => 5
decorate_events => true
auto_offset_reset => "latest"
}
}
output {
elasticsearch {
hosts => ["http://192.168.30.121:9200"]
index => "test-logs-%{+YYYY.MM.dd}"
#user => "elastic"
#password => "changeme"
}
}
# 这里可通过lostash所接受的额一些参数值进行筛选过滤
Elasticsearch的简单配置文件(单节点)
cluster.name: elasticsearch
node.name: qt-elasticsearch-master
path.data: /var/lib/elasticsearch
path.logs: /var/log/elasticsearch
bootstrap.memory_lock: false
network.host: 0.0.0.0
discovery.seed_hosts: ["xx.xx.xx.xx"]
cluster.initial_master_nodes: ["elasticsearch-master"]
kibana
配置文件
server.host: "0.0.0.0"
elasticsearch.hosts: ["http://localhost:9200"]
i18n.locale: "zh-CN"
#开启中文
配置好以后就可以启动以上所有服务,进行参数配置研究了。