建立EFK系统日志分析平台

 

 

一、拓扑图

 

二、下载

elasticsearch-7.2.0-linux-x86_64.tar.gz  

filebeat-7.2.0-linux-x86_64.tar.gz  

kibana-7.2.0-linux-x86_64.tar.gz


此处的本机IP是192.168.100.11

三个包

配置安装elasticsearch
​
root@node01:~/ELK# tar zxvf elasticsearch-7.2.0-linux-x86_64.tar.gz
​
root@node01:~/ELK# ls
elasticsearch-7.2.0  elasticsearch-7.2.0-linux-x86_64.tar.gz  filebeat-7.2.0-linux-x86_64.tar.gz  kibana-7.2.0-linux-x86_64.tar.gz
​
root@node01:~/ELK# mv elasticsearch-7.2.0 /usr/local/es
​
root@node01:~/ELK# cd /usr/local/es/
​
root@node01:/usr/local/es# ls
bin  config  data  jdk  lib  LICENSE.txt  logs  modules  NOTICE.txt  plugins  README.textile
​
root@node01:/usr/local/es# vim config/elasticsearch.yml
​
#cluster.name: my-application
node.name: node01
node.master: true
node.data: true
#node.attr.rack: r1
#path.data: /path/to/data
#path.logs: /path/to/logs
bootstrap.memory_lock: false
bootstrap.system_call_filter: false
network.host: 192.168.100.11
#http.port: 9200
#discovery.seed_hosts: ["host1", "host2"]
cluster.initial_master_nodes: ["node01"]
#gateway.recover_after_nodes: 3
#action.destructive_requires_name: true
​
root@node01:/usr/local/es# vim config/jvm.options 调整JVM内存参数最大32G,一般为内存的一半

安装kibana

root@node01:~/ELK# tar zxvf kibana-7.2.0-linux-x86_64.tar.gz
​
root@node01:~/ELK# mv kibana-7.2.0-linux-x86_64 /usr/local/kibana
​
root@node01:~/ELK# cd /usr/local/kibana/
​
root@node01:/usr/local/kibana# ls
bin           config  LICENSE.txt  node_modules  optimize      plugins     src     webpackShims
built_assets  data    node         NOTICE.txt    package.json  README.txt  target  x-pack
​
root@node01:/usr/local/kibana# vim config/kibana.yml
server.port: 5601
server.host: 192.168.100.11
#server.basePath: ""
#server.rewriteBasePath: false
#server.maxPayloadBytes: 1048576
#server.name: "your-hostname"
elasticsearch.hosts: ["http://192.168.100.11:9200"]
#elasticsearch.preserveHost: true
#kibana.index: ".kibana"
#kibana.defaultAppId: "home"
#elasticsearch.username: "user"
#elasticsearch.password: "pass"
#server.ssl.enabled: false
#server.ssl.certificate: /path/to/your/server.crt
#server.ssl.key: /path/to/your/server.key
#elasticsearch.ssl.certificate: /path/to/your/client.crt
#elasticsearch.ssl.key: /path/to/your/client.key
#elasticsearch.ssl.certificateAuthorities: [ "/path/to/your/CA.pem" ]
#elasticsearch.ssl.verificationMode: full
#elasticsearch.pingTimeout: 1500
#elasticsearch.requestTimeout: 30000
#elasticsearch.requestHeadersWhitelist: [ authorization ]
#elasticsearch.customHeaders: {}
#elasticsearch.shardTimeout: 30000
#elasticsearch.startupTimeout: 5000
#elasticsearch.logQueries: false
#pid.file: /var/run/kibana.pid
#logging.dest: stdout
#logging.silent: false
#logging.quiet: false
#logging.verbose: false
#ops.interval: 5000
#i18n.locale: "en"
​

安装filebeat,此处只配置一台机器 

 

 


​
root@node01:~/ELK# tar zxvf filebeat-7.2.0-linux-x86_64.tar.gz
root@node01:~/ELK# mv filebeat-7.2.0-linux-x86_64 /usr/local/filebeat
root@node01:~/ELK# cd /usr/local/filebeat/
root@node01:/usr/local/filebeat# ls
data        filebeat                     filebeat.reference.yml  kibana       logs    modules.d   README.md
fields.yml  filebeat-7.2.0-linux-x86_64  filebeat.yml            LICENSE.txt  module  NOTICE.txt  start.log
root@node01:/usr/local/filebeat# vim filebeat.yml
​
#=========================== Filebeat inputs =============================
filebeat.inputs:
- type: log
  enabled: true
  paths:
    - /var/log/*.log
    #- c:\programdata\elasticsearch\logs\*
  #exclude_lines: ['^DBG']
  #include_lines: ['^ERR', '^WARN']
  #exclude_files: ['.gz$']
  #fields:
  #  level: debug
  #  review: 1
  #multiline.pattern: ^\[
  #multiline.negate: false
  # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash
  #multiline.match: after
#============================= Filebeat modules ===============================
  path: ${path.config}/modules.d/*.yml
  reload.enabled: false
  #reload.period: 10s
#==================== Elasticsearch template setting ==========================
setup.template.settings:
  index.number_of_shards: 1
  #index.codec: best_compression
  #_source.enabled: false
#================================ General =====================================
#name:
# The tags of the shipper are included in their own field with each
# transaction published.
#tags: ["service-X", "web-tier"]
# Optional fields that you can specify to add additional information to the
# output.
#fields:
#  env: staging
#============================== Dashboards =====================================
#setup.dashboards.enabled: false
#setup.dashboards.url:
#============================== Kibana =====================================
setup.kibana:
  # In case you specify and additional path, the scheme is required: http://localhost:5601/path
  # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
  host: "192.168.100.11:5601"
  #space.id:
#============================= Elastic Cloud ==================================
#cloud.id:
# The cloud.auth setting overwrites the `output.elasticsearch.username` and
# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.
#cloud.auth:
#================================ Outputs =====================================
# Configure what output to use when sending the data collected by the beat.
#-------------------------- Elasticsearch output ------------------------------
output.elasticsearch:
  # Array of hosts to connect to.
  hosts: ["192.168.100.11:9200"]
  # Optional protocol and basic auth credentials.
  #protocol: "https"
  #username: "elastic"
  #password: "changeme"
#----------------------------- Logstash output --------------------------------
#output.logstash:
  # The Logstash hosts
  #hosts: ["localhost:5044"]
  # Optional SSL. By default is off.
  # List of root certificates for HTTPS server verifications
  #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
  # Certificate for SSL client authentication
  #ssl.certificate: "/etc/pki/client/cert.pem"
  # Client Certificate Key
  #ssl.key: "/etc/pki/client/cert.key"
#================================ Processors =====================================
processors:
  - add_host_metadata: ~
  - add_cloud_metadata: ~
#================================ Logging =====================================
#logging.level: debug
# To enable all selectors use ["*"]. Examples of other selectors are "beat",
# "publish", "service".
#logging.selectors: ["*"]
#============================== Xpack Monitoring ===============================
# Set to true to enable the monitoring reporter.
#monitoring.enabled: false
#monitoring.elasticsearch:
#================================= Migration ==================================
# This allows to enable 6.7 migration aliases
#migration.6_to_7.enabled: true
​
​
​
选择分析什么日志,此处做的是系统日志

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值