第一步 下载所有的软件包
wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-8.4.0-linux-x86_64.tar.gz
wgte https://artifacts.elastic.co/downloads/kibana/kibana-8.4.0-linux-x86_64.tar.gz
wget https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-8.4.0-linux-x86_64.tar.gz
第二步 解压文件到对/usr/local文件夹下
tar -zxvf elasticsearch-8.4.0-linux-x86_64.tar.gz -C /usr/local
tar -zxvf kibana-8.4.0-linux-x86_64.tar.gz -C /usr/local
tar -zxvf filebeat-8.4.0-linux-x86_64.tar.gz -C /usr/local
第三步
cd /usr/local/elasticsearch-8.4.0/config/
vim elasticsearch.yml
node.name: node-1
path.data: /usr/local/elasticsearch-8.4.0/data
path.logs: /usr/local/elasticsearch-8.4.0/logs
network.host: 127.0.0.1
http.host: 0.0.0.0
http.port: 9200
discovery.seed_hosts: ["127.0.0.1"]
cluster.initial_master_nodes: ["node-1"]
找到config/目录下面的elasticsearch.yml配置文件,把安全认证开关从原先的true都改成false,实现免密登录访问即可,修改这两处都为false后:
firewall-cmd --zone=public --add-port=9300/tcp --permanent
firewall-cmd --zone=public --add-port=9200/tcp --permanent
systemctl restart firewalld.service
sysctl -w vm.max_map_count=262144
useradd es
chown -R es:es /usr/local/elasticsearch-8.4.0
/usr/local/elasticsearch-8.4.0/bin/elasticsearch -d
最后记得打开ip端口 然后就可以用ip地址访问了
mv filebeat-8.4.0-linux-x86_64 filebeat-8.4.0
mv /usr/local/filebeat-8.4.0-linux-x86_64 /usr/local/filebeat-8.4.0
cd /usr/local/filebeat-8.4.0/
vim filebeat.yml
max_procs: 1
filebeat.inputs:
# Each - is an input. Most options can be set at the input level, so
# # you can use different inputs for various configurations.
# # Below are the input specific configurations.
#
- type: log
id: my-logs-id
#
# # Change to true to enable this input configuration.
enabled: true
#
# # Paths that should be crawled and fetched. Glob based paths.
paths:
# #- /www/wwwlogs/*.log
- /www/wwwlogs/ww.baidu.com.log
#- c:\programdata\elasticsearch\logs\*
#这两行参数配置表示按json格式去解析
json.keys_under_root: true
overwrite_keys: true
output.elasticsearch:
# Array of hosts to connect to.
hosts: ["127.0.0.1:9200"]
indices:
- index: "filebeat-index-%{+YYYY.MM.dd}"
chown -R es:es /usr/local/filebeat-8.4.0
su se
cd
cd /usr/local/kibana-8.4.0/config
vim kibana.yml
server.port: 5601
server.host: "0.0.0.0"
elasticsearch.hosts: ["http://127.0.0.1:9200"]
#kibana.index: ".kibana"
chown -R es:es /usr/local/kibana-8.4.0/
firewall-cmd --zone=public --add-port=5601/tcp --permanent
systemctl restart firewalld.service
su es
/usr/local/kibana-8.4.0/bin/kibana &
或者 nohup /usr/local/kibana-8.4.0/bin/kibana>/dev/null 2>&1 &
查看es的健康状况
curl http://localhost:9200/_cluster/health?pretty
查看有哪些index
curl http://localhost:9200/_cat/indices?v
es 参考文档 https://blog.csdn.net/u013111855/article/details/122719018
filebeat 参考文档 https://www.cnblogs.com/zxone/p/13668571.html
## -d 以debug方式启动
nohup ./filebeat -e -c filebeat.yml -d "publish">/dev/null 2>&1 &
#查看filebeat的端口占用情况
kill -9 `ps -ef | grep filebeat | grep $user | awk '{print $2}'
ps -ef|lsof -i:9200 |grep -v grep|awk '{print $2}'|xargs kill -9
ps -ef|grep filebeat |grep -v grep|awk '{print $2}'|xargs kill -9
参考文档 https://blog.csdn.net/qq_43753286/article/details/123532979
官方文档所有配置options https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-reference-yml.html
filebeat的配置文件
max_procs: 1 # 限制一个CPU核心,避免过多抢占业务资源
queue.mem.events: 2048 # 存储于内存队列的事件数,排队发送 (默认4096)
queue.mem.flush.min_events: 1536 # 小于 queue.mem.events ,增加此值可提高吞吐量 (默认值2048)
#queue.mem.flush.timeout: 1s # 这是一个默认值,到达 min_events 需等待多久刷出
filebeat.inputs:
- type: log
enabled: true
ignore_older: 48h # 忽略这个时间之前的文件(根据文件改变时间)
max_bytes: 20480 # *单条日志的大小限制,建议限制(默认为10M,queue.mem.events * max_bytes 将是占有内存的一部分)
recursive_glob.enabled: true # 是否启用glob匹配,可匹配多级路径(最大8级):/A/**/*.log => /A/*.log ~ /A/**/**/**/**/**/**/**/**/*.log
paths: # 日志文件路径
- /data/logs/**/*.log
exclude_files: [.*file1.*|stdout.log|.*file2.*] # 忽略的文件列表,正则匹配
fields: # 在事件json中添加字段
appName: ${serviceName}
agentHost: ${hostIp}
fields_under_root: true # 将添加的字段加在JSON的最外层
tail_files: false # 不建议一直开启,从日志文件的最后开始读取新内容(保证读取最新文件),但是如果有日志轮转,可能导致文件内容丢失,建议结合 ignore_older 将其设置为false
multiline: # 多行匹配日志 (https://www.elastic.co/guide/en/beats/filebeat/7.2/multiline-examples.html)
pattern: '\[\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}' # 匹配一个以 [YYYY-MM-DD HH:mm:ss 开头的行
negate: true # 将 pattern 取否(即不匹配pattern的情况)
match: after # 将其追加到上一行之后 pattern + negate + match 组合成一条语意为: 如果不匹配 [YYYY-MM-DD HH:mm:ss 开头的行,则将其合并到当前行的上一行
max_lines: 200 # 最多匹配多少行,如果超出最大行数,则丢弃多余的行(默认500)
timeout: 1s # 超时时间后,即使还未匹配到下一个行日志(下一个多行事件),也将此次匹配的事件刷出 (默认5s)
output.kafka:
enabled: true
hosts: ['ip1:9092','ip2:9092']
topic: 'my_topic'
partition.round_robin:
reachable_only: true
worker: 4
required_acks: 1
compression: gzip
max_message_bytes: 1000000 # 10MB
ngnix的日志格式
log_format json '{ "@timestamp": "$time_iso8601", '
'"remote_addr": "$remote_addr", '
'"request": "$uri", '
'"request_uri": "$request_uri", '
'"request_method":"$request_method",'
'"server_protocol":"$server_protocol",'
'"status": $status, '
'"bytes": $body_bytes_sent, '
'"up_addr": "$upstream_addr",'
'"request_time": $request_time,'
'"http_referer": "$http_referer",'
'"http_host": "$host"'
' }';