- 安装elasticsearch
- 上传安装包
- 解压
tar -zxvf elasticsearch-7.9.3-linux-x86_64.tar.gz tar -zxvf kibana-7.9.3-linux-x86_64.tar.gz tar -zxvf logstash-7.9.3.tar.gz tar -zxvf filebeat-7.9.3-linux-x86_64.tar.gz tar -zxvf metricbeat-7.9.3-linux-x86_64.tar.gz
- 创建普通用户并设置参数
// 创建分组 groupadd basic // 创建用户 密码123456 useradd elk -g basic -p 123456 // 授权 chown -R elk:basic /usr/local/elk
// 使用root用户配置 vim /etc/sysctl.conf ##############新增如下配置##################### vm.max_map_count=655360 ##############保存,退出######################### // 执行下面命令 sysctl -p
vim /etc/security/limits.conf ###########配置如下内容, '*'号也要######################### * soft nofile 65536 * hard nofile 131072 * soft nproc 2048 * hard nproc 4096 ##############保存、退出########################
- 配置elasticsearch
vim /usr/local/elk/elasticsearch-7.9.3/config/elasticsearch.yml ########################修改配置如下################# cluster.name: es-cluster node.name: node-1 network.host: 0.0.0.0 http.port: 9200 discovery.seed_hosts: ["192.168.2.122", "192.168.2.123","192.168.2.124"] cluster.initial_master_nodes: ["node-1"]
- 配置kibana
vim /usr/local/elk/kibana-7.9.3-linux-x86_64/config/kibana.yml ########################编辑内容如下####################### server.port: 5601 server.host: "0.0.0.0" ## 集群可配置多个es elasticsearch.hosts: ["http://192.168.2.122:9200""] i18n.locale: "zh-CN" kibana.index: ".kibana" ## 日志路径 需要提前创建 logging.dest: /usr/local/elk/kibana-7.9.3-linux-x86_64/logs/kibana.log
- 配置logstash
入门配置
区分不同环境日志cp logstash-sample.conf logstash-es.conf vim logstash-es.conf ###########################配置内容如下############################## input { file { path => "/var/log/messages" } } filter { } output { elasticsearch { hosts => ["192.168.2.123:9200"] index => "mytest-%{+YYYY.MM.dd}" } }
input { file { path => "/usr/local/elk/test.log" add_field => { "log_type" => "test" } } file { path => "/usr/local/elk/prod.log" add_field => { "log_type" => "prod" } } } filter { if [log_type] in ["test","dev"] { mutate { add_field => { "[@metadata][target_index]" => "test-%{+YYYY.MM}" } } } else if [log_type] == "prod" { mutate { add_field => { "[@metadata][target_index]" => "prod-%{+YYYY.MM.dd}" } } } else { mutate { add_field => { "[@metadata][target_index]" => "unknown-%{+YYYY}" } } } } output { elasticsearch { hosts => "192.168.2.122:9200" index => "%{[@metadata][target_index]}" } }
启动命令// 启动 nohup bin/logstash -f config/logstash-sample.conf & // 测试数据 echo 12345678911111111111 >> /var/log/messages
查看kibana: http://192.168.2.123:5601
索引管理-->索引模式-->创建mytest-* - 配置filebeat
1) 配置filebeat.ymlfilebeat.inputs: - type: log enabled: true paths: - /usr/local/elk/test.log tags: ["test"] fields_under_root: true fields: project: mytest app: mytest - type: log enabled: true paths: - /usr/local/elk/prod.log tags: ["prod"] fields_under_root: true fields: project: myprod app: myprod output.logstash: hosts: ["192.168.2.122:5044"]
2) 配置logstash-beat.confinput { beats { port => 5044 } } filter { if [app] == "mytest" { mutate { add_field => { "[@metadata][target_index]" => "mytest-%{+YYYY.MM}" } } } else if [app] == "myprod" { mutate { add_field => { "[@metadata][target_index]" => "myprod-%{+YYYY.MM.dd}" } } } else { mutate { add_field => { "[@metadata][target_index]" => "unknown-%{+YYYY}" } } } } output { elasticsearch { hosts => "192.168.2.122:9200" index => "%{[@metadata][target_index]}" } }
3) 启动logstash和filebeats
4) 测试// 启动filebeat ./filebeat & // 查看filebeat日志 journalctl -u filebeat // 启动logstash bin/logstash -f config/logstash-beat.conf
echo aaaa >> test.log echo bbbb >> prod.log
- 采集nginx日志
1)配置filebeat.yml
%{IPV4:remote_addr} - (%{USERNAME:remote_user}|-) \[%{HTTPDATE:time_local}\] \"%{WORD:request_method} %{URIPATHPARAM:request_uri} HTTP/%{NUMBER:http_protocol}\" %{NUMBER:http_status} %{NUMBER:body_bytes_sent} \"%{GREEDYDATA:http_referer}\" \"%{GREEDYDATA:http_user_agent}\"
2) 配置logstash-beat.conf
grok { match => { "message" => "%{IPV4:remote_addr} - (%{USERNAME:remote_user}|-) \[%{HTTPDATE:time_local}\] \"%{WORD:request_method} %{URIPATHPARAM:request_uri} HTTP/%{NUMBER:http_protocol}\" %{NUMBER:http_status} %{NUMBER:body_bytes_sent} \"%{GREEDYDATA:http_referer}\" \"%{GREEDYDATA:http_user_agent}\"" } }
3) 启动
// 启动filebeat ./filebeat & // 查看filebeat日志 journalctl -u filebeat // 启动logstash bin/logstash -f config/logstash-beat.conf
- 采集java日志
filebeat.inputs: - type: log enabled: true paths: - /home/aaa/logs/agent/agent.log tags: ["java"] fields_under_root: true fields: project: java app_server: 127.0.0.1 multiline.pattern: '^\s' multiline.negate: false multiline.match: after - type: log enabled: true paths: - /home/aaa/server/nginx/logs/access.log tags: ["nginx"] fields_under_root: true fields: project: nginx app_server: 127.0.0.1 output.logstash: hosts: ["127.0.0.1:5044"]