ELK日志分析系统的简单搭建

1、系统环境:CENTOS7

用三台主机部署

[root@V76 elk]# cat /etc/hosts
127.0.0.1 V76   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1   V76     localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.156.76  v76
192.168.156.82  m2
192.168.156.83  m3

实验当中三台主机均部署elasticsearch,搭建elasticsearch集群

192.168.156.76部署kibana,head

192.168.156.82部署logstash,filebeat

软件下载链接:https://www.elastic.co/cn/downloads/past-releases

node及head下载:https://nodejs.org/en/download/  https://github.com/mobz/elasticsearch-head/archive/master.zip

 

2、系统环境的准备,在三台机器上面均执行,sh执行如下的脚本,脚本只执行一次,不能重复执行

关闭防火墙及SELINUX

[root@V76 elk]# cat /shared/app/sh/75/init/1.sh 
#!/bin/bash
sed -i "s/SELINUX=enforcing/SELINUX=disabled/" /etc/selinux/config
service firewalld stop
chkconfig firewalld off

setenforce 0

[root@V76 elk]# cat mlimits.sh 
#!/bin/bash
echo "* soft nofile 65536" >> /etc/security/limits.conf
echo "* hard nofile 131072" >> /etc/security/limits.conf
echo "* soft nproc 2048" >> /etc/security/limits.conf
echo "* hard nproc 4096" >> /etc/security/limits.conf
echo "vm.max_map_count=655360" >> /etc/sysctl.conf
sysctl -p

安装JDK,JDK先拷贝到脚本指定的路径

[root@V76 mesos]# cat java.sh 
#!/bin/bash
cd /shared/app
if [ ! -d /tmp/java ];then                                                      
  mkdir -p /tmp/java
fi 
tar -zxvf jdk-8u181-linux-x64.tar.gz -C /tmp/java/
if [ -d /usr/local/java ];then
  mv /usr/local/java /usr/local/javaold
fi 
mv /tmp/java/jdk1.8.0_181 /usr/local/java
cp -r /etc/profile /etc/profile.bak
echo "export JAVA_HOME=/usr/local/java" >> /etc/profile
echo "export PATH=\$JAVA_HOME/bin:\$PATH" >> /etc/profile
echo "export CLASSPATH=\$JAVA_HOME/jre/lib/ext:\$JAVA_HOME/lib/tools.jar" >> /etc/profile
source /etc/profile

执行完成再source /etc/profile

 

3、安装NODE.JS,HEAD插件192.168.156.76机器执行

[root@V76 elk]# cat node.sh 
#!/bin/bash
tar -Jxf /shared/app/sh/75/elk/node-v10.16.0-linux-x64.tar.xz -C /usr/local/ && mv /usr/local/node-v10.16.0-linux-x64 /usr/local/node
echo "export NODE_HOME=/usr/local/node" >> /etc/profile
echo "export PATH=\$NODE_HOME/bin:\$PATH" >> /etc/profile
echo "export NODE_PATH=\$NODE_HOME/lib/node_modules:\$PATH" >> /etc/profile
source /etc/profile

[root@V76 elk]# cat head.sh 
#!/bin/bash
unzip master.zip -d /tmp/
mv /tmp/elasticsearch-head-master/ /usr/local/elasticsearch-head
cd /usr/local/elasticsearch-head
npm install -g cnpm --registry=https://registry.npm.taobao.org
cnpm install -g grunt-cli
cnpm install -g grunt
cnpm install grunt-contrib-clean
cnpm install grunt-contrib-concat
cnpm install grunt-contrib-watch
cnpm install grunt-contrib-connect
cnpm install grunt-contrib-copy
cnpm install grunt-contrib-jasmine
row=`awk '/port:/{print NR}' /usr/local/elasticsearch-head/Gruntfile.js`
str="hostname: '0.0.0.0',"
sed -i ''"$row"'i '"$str"'' /usr/local/elasticsearch-head/Gruntfile.js
cd /usr/local/elasticsearch-head
nohup grunt server &
eval "cd /usr/local/elasticsearch-head/ ; nohup  npm run start >/dev/null 2>&1 & "

部署后WEB界面已能访问:

 

 

在/usr/bin目录写好启动脚本便于启动

[root@V76 elk]# cat /usr/bin/elasticsearch-head
# vim /usr/bin/elasticsearch-head

#!/bin/bash
#chkconfig: 2345 55 24
#description: elasticsearch-head service manager

data="cd /usr/local/elasticsearch-head/ ; nohup  npm run start >/dev/null 2>&1 &   "
START() {
                eval $data
}

STOP() {
                ps -ef | grep grunt | grep -v "grep" | awk '{print $2}' | xargs kill -s 9 >/dev/null
}


case "$1" in
  start)
        START
        ;;
  stop)
        STOP
        ;;
  restart)
        STOP
        sleep 2
        START
        ;;
  *)
        echo "Usage: elasticsearch-head (|start|stop|restart)"
        ;;
esac

 

chmod +x /usr/bin/elasticsearch-head

 

 

4、安装elasticsearch,全部机器

[root@V76 elk]# cat elasticsearch.sh 
#!/bin/bash
useradd elk
tar -zxvf /shared/app/elasticsearch-6.7.1.tar.gz -C /tmp
mv /tmp/elasticsearch-6.7.1/ /usr/local/elasticsearch
mkdir -p /usr/local/elasticsearch/data
chown -R elk:elk /usr/local/elasticsearch
hname=`hostname`
ip1=`hostname -I |awk '{print $1}'`
(
cat <<EOF


cluster.name: elkcluster       
node.name: $hname              
node.master: true               
node.data: true                
path.data: /usr/local/elasticsearch/data                
path.logs: /usr/local/elasticsearch/logs                
bootstrap.memory_lock: false                
network.host: $ip1               
http.port: 9200                 
transport.tcp.port: 9300
discovery.zen.ping.unicast.hosts: ["192.168.156.76", "192.168.156.82", "192.168.156.83"]      
discovery.zen.minimum_master_nodes: 2               
http.enabled: true                  
http.cors.enabled: true             
http.cors.allow-origin: "*"


EOF
)>>/usr/local/elasticsearch/config/elasticsearch.yml
su - elk -c "/usr/local/elasticsearch/bin/elasticsearch -d"

安装好查看状态

[root@V76 elk]# curl '192.168.156.76:9200/_cluster/health?pretty'
{
  "cluster_name" : "elkcluster",
  "status" : "green",
  "timed_out" : false,
  "number_of_nodes" : 3,
  "number_of_data_nodes" : 3,
  "active_primary_shards" : 15,
  "active_shards" : 30,
  "relocating_shards" : 0,
  "initializing_shards" : 0,
  "unassigned_shards" : 0,
  "delayed_unassigned_shards" : 0,
  "number_of_pending_tasks" : 0,
  "number_of_in_flight_fetch" : 0,
  "task_max_waiting_in_queue_millis" : 0,
  "active_shards_percent_as_number" : 100.0
}
[root@V76 elk]# curl '192.168.156.76:9200/_cat/master?v'
id                     host           ip             node
tWjj20NqTyycUPz9ofATfw 192.168.156.83 192.168.156.83 m3

此时可以通过上一步部署的插件连接集群,可选集群机器的任一IP+端口

 

将elasticsearch注册为服务,启动前先kill掉原有进程测试服务是否正常

[root@V76 elk]# cat elserver.sh 
#!/bin/bash
(
cat <<EOF
ES_HOME=/usr/local/elasticsearch
JAVA_HOME=/usr/local/java
CLASSPATH=.:\$JAVA_HOME/lib/dt.jar:\$JAVA_HOME/lib/tools.jar:\$JAVA_HOME/jre/lib
ES_PATH_CONF=/usr/local/elasticsearch/config
PID_DIR=/usr/local/elasticsearch/run
ES_STARTUP_SLEEP_TIME=5
EOF
)>/etc/sysconfig/elasticsearch

(
cat <<EOF
[Unit]
Description=Elasticsearch
Documentation=http://www.elastic.co
Wants=network-online.target
After=network-online.target

[Service]
RuntimeDirectory=elasticsearch
PrivateTmp=true
Environment=ES_HOME=/usr/local/elasticsearch
Environment=ES_PATH_CONF=/usr/local/elasticsearch/config
Environment=PID_DIR=/usr/local/elasticsearch/run
EnvironmentFile=-/etc/sysconfig/elasticsearch

WorkingDirectory=/usr/local/elasticsearch

User=elk
Group=elk

ExecStart=/usr/local/elasticsearch/bin/elasticsearch -p \${PID_DIR}/elasticsearch.pid --quiet

# StandardOutput is configured to redirect to journalctl since
# some error messages may be logged in standard output before
# elasticsearch logging system is initialized. Elasticsearch
# stores its logs in /var/log/elasticsearch and does not use
# journalctl by default. If you also want to enable journalctl
# logging, you can simply remove the "quiet" option from ExecStart.
StandardOutput=journal
StandardError=inherit

# Specifies the maximum file descriptor number that can be opened by this process
LimitNOFILE=65535

# Specifies the maximum number of processes
LimitNPROC=4096

# Specifies the maximum size of virtual memory
LimitAS=infinity

# Specifies the maximum file size
LimitFSIZE=infinity

# Disable timeout logic and wait until process is stopped
TimeoutStopSec=0

# SIGTERM signal is used to stop the Java process
KillSignal=SIGTERM

# Send the signal only to the JVM rather than its control group
KillMode=process

# Java process is never killed
SendSIGKILL=no

# When a JVM receives a SIGTERM signal it exits with code 143
SuccessExitStatus=143

[Install]
WantedBy=multi-user.target

# Built for packages-6.7.1 (packages)


EOF
)>/usr/lib/systemd/system/elasticsearch.service
chmod +x /usr/lib/systemd/system/elasticsearch.service
mkdir /usr/local/elasticsearch/run
touch /usr/local/elasticsearch/run/elasticsearch.pid && chown -R elk:elk /usr/local/elasticsearch
systemctl daemon-reload 
systemctl restart elasticsearch

 

 

5、部署kibana  192.168.156.76

[root@V76 elk]# cat kibana.sh 
#!/bin/bash
#tar -zxvf /shared/app/kibana-6.7.1-linux-x86_64.tar.gz -C /tmp
mv /tmp/kibana-6.7.1-linux-x86_64 /usr/local/kibana
(
cat <<EOF
server.port: 5601 
server.host: "0.0.0.0"
elasticsearch.hosts: ["http://192.168.156.76:9200","http://192.168.156.82:9200","http://192.168.156.83:9200"]
logging.dest: /usr/local/kibana/logs/kibana.log 
kibana.index: ".kibana"

EOF
)>>/usr/local/kibana/config/kibana.yml

(
cat <<EOF
user="elk"
group="elk"
chroot="/"
chdir="/"
nice=""

KILL_ON_STOP_TIMEOUT=0


EOF
)>/etc/default/kibana

#启动服务的命令

#/usr/local/kibana/bin/kibana &

(
cat <<EOF
[Unit]
Description=Kibana
StartLimitIntervalSec=30
StartLimitBurst=3

[Service]
Type=simple
User=elk
Group=elk
EnvironmentFile=-/etc/default/kibana
EnvironmentFile=-/etc/sysconfig/kibana
ExecStart=/usr/local/kibana/bin/kibana "-c /usr/local/kibana/config/kibana.yml"
Restart=always
WorkingDirectory=/

[Install]
WantedBy=multi-user.target


EOF
)>/etc/systemd/system/kibana.service

mkdir -p /usr/local/kibana/logs
chown -R elk:elk /usr/local/kibana
systemctl daemon-reload
systemctl enable kibana
systemctl start kibana

访问:

 

汉化kibana

汉化包下载地址:https://github.com/anbai-inc/Kibana_Hanization

执行sh hankubana.sh 

[root@V76 elk]# cat hankubana.sh 
#!/bin/bash
#unzip /shared/app/Kibana_Hanization-master.zip -d /tmp/
cp -r /tmp/Kibana_Hanization-master/translations/ /usr/local/kibana/src/legacy/core_plugins/kibana/
echo "i18n.locale: \"zh-CN\"" >> /usr/local/kibana/config/kibana.yml
systemctl restart kibana

效果如图:

 

6、部署logstash ,挑一台机器测试即可,实验用了192.168.156.82

先安装NGINX,再用LOGSTASH对NGINX的日志进行处理

yum install -y nginx

修改NG的配置

[root@m2 bin]# cat /etc/nginx/nginx.conf | grep -v ^#     

user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;

include /usr/share/nginx/modules/*.conf;

events {
    worker_connections 1024;
}

http {
    log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                      '$status $body_bytes_sent "$http_referer" '
                      '"$http_user_agent" "$http_x_forwarded_for"';

    access_log  /var/log/nginx/access.log  main;

    sendfile            on;
    tcp_nopush          on;
    tcp_nodelay         on;
    keepalive_timeout   65;
    types_hash_max_size 2048;

    include             /etc/nginx/mime.types;
    default_type        application/octet-stream;

    # Load modular configuration files from the /etc/nginx/conf.d directory.
    # See http://nginx.org/en/docs/ngx_core_module.html#include
    # for more information.
    include /etc/nginx/conf.d/*.conf;

    server {
        listen       80 default_server;
        listen       [::]:80 default_server;
        server_name  _;
        root         /usr/share/nginx/html;

        # Load configuration files for the default server block.
        include /etc/nginx/default.d/*.conf;

        location / {
                   proxy_pass  http://192.168.156.76:5601;
                   proxy_set_header Host   $host;
                   proxy_set_header X-Real-IP      $remote_addr;
                   proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
                   }
                   access_log  /var/log/nginx/elk_access.log main;
          
        error_page 404 /404.html;
            location = /40x.html {
        }

        error_page 500 502 503 504 /50x.html;
            location = /50x.html {
        }
    }


}

 

安装并配置LOGSTASH对NGINX的日志进行处理

[root@V76 elk]# cat logstash.sh 
#!/bin/bash
tar -zxvf /shared/app/logstash-6.7.1.tar.gz -C /tmp/
mv /tmp/logstash-6.7.1/ /usr/local/logstash
mkdir /usr/local/logstash/conf.d
(
cat <<EOF
http.host: "192.168.156.82"
http.port: 9600
EOF
) >> /usr/local/logstash/config/logstash.yml

 

[root@V76 elk]# cat logstash2.sh 
#!/bin/bash
(
cat <<EOF

input {
  file {
    path => "/var/log/nginx/elk_access.log"          
    start_position => "beginning"
    type => "nginx"
  }
}
filter {
    grok {
        match => { "message" => "%{IPORHOST:http_host} %{IPORHOST:clientip} - %{USERNAME:remote_user} \[%{HTTPDATE:timestamp}\] \"(?:%{WORD:http_verb} %{NOTSPACE:http_request}(?: HTTP/%{NUMBER:http_version})?|%{DATA:raw_http_request})\" %{NUMBER:response} (?:%{NUMBER:bytes_read}|-) %{QS:referrer} %{QS:agent} %{QS:xforwardedfor} %{NUMBER:request_time:float}"}
    }
    geoip {
        source => "clientip"
    }
}
output {
    stdout { codec => rubydebug }
    elasticsearch {
        hosts => ["192.168.156.76:9200"]                
        index => "nginx-test-%{+YYYY.MM.dd}"
  }
}

EOF
) > /usr/local/logstash/conf.d/nginx_access.conf

nohup /usr/local/logstash/bin/logstash --path.settings /usr/local/logstash/ -f /usr/local/logstash/conf.d/nginx_access.conf &

 

 

7、部署filebeat

tar -zxvf /shared/app/filebeat-6.7.1-linux-x86_64.tar.gz -C /tmp/
mv /tmp/filebeat-6.7.1-linux-x86_64 /usr/local/filebeat

编辑/usr/local/filebeat/filebeat.yml,如下:

[root@m2 bin]# cat /usr/local/filebeat/filebeat.yml|grep -Ev '^$|#'
filebeat.inputs:
- type: log
  paths:
    - /var/log/*.log
filebeat.config.modules:
  path: ${path.config}/modules.d/*.yml
  reload.enabled: false
setup.template.settings:
  index.number_of_shards: 3
setup.kibana:
output.elasticsearch:
  hosts: ["192.168.156.82:9200"]
processors:
  - add_host_metadata: ~
  - add_cloud_metadata: ~

启动服务
[root@V76 elk]#  nohup /usr/local/filebeat/filebeat -c /usr/local/filebeat/filebeat.yml &

验证:

[root@V76 elk]# curl '192.168.156.82:9200/_cat/indices?v' 
health status index                     uuid                   pri rep docs.count docs.deleted store.size pri.store.size
green  open   .kibana_task_manager      KhGAJ4dhTouNfpw0TezS0Q   1   1          2            0     25.2kb         12.6kb
green  open   .kibana_1                 ssZIK4FtQgWCnF3OurA_1w   1   1          6            1     60.9kb         30.4kb
green  open   nginx-test-2019.06.27     yVdTGwyhTvSsWxNiAbyUoA   5   1        207            0    803.1kb        409.1kb
green  open   nginx-test-2019.06.28     EcpZifKWTTuFUCznNtaGkw   5   1       1030            0      1.5mb        728.3kb
green  open   filebeat-6.7.1-2019.06.27 HRICNQ94RC6xKkgm2c49NQ   3   1        667            0      427kb        224.5kb

服务已经启动:

 

 

因为NGINX配置了转发,可以通过访问NGINX转发到kibana

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值