ELK大规模搜索引擎详解

实验环境

192.168.170.8   node1 kibana
192.168.170.9   node2 els
192.168.170.10 node3 logstash1
192.168.170.11 node4 redis
192.168.170.12 node5 logstash2
192.168.170.22 node6 haproxy+web

实验拓扑:

安装redis服务

下载redis包编译安装,编辑配置文件

[root@node4 src]# wget http://download.redis.io/releases/redis-5.0.3.tar.gz
[root@node4 src]# tar -zxvf redis-5.0.3.tar.gz
[root@node4 src] cd redis-5.0.3
[root@node4 redis-5.0.3]# make
[root@node4 redis-5.0.3]# ln -sv /usr/local/src/redis-5.0.3 /usr/local/redis
[root@node4 redis-5.0.3]# cp src/redis-server  src/redis-cli /usr/bin/
[root@node4 redis]#
[root@node4 redis]# vi redis.conf 
bind 0.0.0.0
daemonize yes #关闭持久缓存功能
#关闭持久化功能
#save 900 1
#save 300 10
#save 60 10000
requirepass 123456

启动服务查看接口状态

[root@node4 redis]# redis-server /usr/local/redis/redis.conf 
13577:C 13 Apr 2019 20:45:48.109 # oO0OoO0OoO0Oo Redis is starting oO0OoO0OoO0Oo
13577:C 13 Apr 2019 20:45:48.109 # Redis version=5.0.3, bits=64, commit=00000000, modified=0, pid=13577, just started
13577:C 13 Apr 2019 20:45:48.109 # Configuration loaded
[root@node4 redis]# ss -tunlp
Netid State      Recv-Q Send-Q               Local Address:Port                              Peer Address:Port              
udp   UNCONN     0      0                        127.0.0.1:323                                          *:*                   users:(("chronyd",pid=3545,fd=1))
udp   UNCONN     0      0                              ::1:323                                         :::*                   users:(("chronyd",pid=3545,fd=2))
tcp   LISTEN     0      128                              *:6379                                         *:*                   users:(("redis-server",pid=13578,fd=6))
tcp   LISTEN     0      128                              *:22                                           *:*                   users:(("sshd",pid=812,fd=3))
tcp   LISTEN     0      100                      127.0.0.1:25                                           *:*                   users:(("master",pid=1032,fd=13))
tcp   LISTEN     0      128                             :::22                                          :::*                   users:(("sshd",pid=812,fd=4))
tcp   LISTEN     0      100                            ::1:25                                          :::*                   users:(("master",pid=1032,fd=14))
[root@node4 redis]#

redis登录测试

[root@node4 redis]# redis-cli 
127.0.0.1:6379> AUTH 123456
OK
127.0.0.1:6379> ping
PONG
127.0.0.1:6379> 

logstash2配置示例:

[root@node5 ~]# vim /etc/logstash/conf.d/syslog.conf
#logstash收集日志,通过rsyslog接收日志:
input{
	syslog {
  	type => "system-rsyslog"
  	port => "5140"
  }
}
#输出到redis中去,并把日志保存在/tmp/nginx.conf中
output{
  stdout{
  	codec => rubydebug
  }
  redis {
  	data_type => "list"
  	key => "system-rsyslog"   #创建key为system-syslog
  	host => "192.168.170.11"
  	port => "6379"
  	db => "0"
  	password => "123456"
  }
}

测试logstash语法是否正确 

[root@node5 conf.d]# /usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/syslog.conf  -t
WARNING: Could not find logstash.yml which is typically located in $LS_HOME/config or /etc/logstash. You can specify the path using --path.settings. Continuing using the defaults
Could not find log4j2 configuration at path /usr/share/logstash/config/log4j2.properties. Using default config which logs errors to the console
[WARN ] 2019-04-13 21:14:31.536 [LogStash::Runner] multilocal - Ignoring the 'pipelines.yml' file because modules or command line options are specified
Configuration OK
[INFO ] 2019-04-13 21:14:34.324 [LogStash::Runner] runner - Using config.test_and_exit mode. Config Validation Result: OK. Exiting Logstash

重启logstash服务,查看5140端口是否启动 

[root@node5 conf.d]# systemctl restart logstash
[root@node5 conf.d]# ss -tunlp | grep 5140
udp    UNCONN     0      0         *:5140                  *:*                   users:(("java",pid=9486,fd=81))
tcp    LISTEN     0      50       :::5140                 :::*                   users:(("java",pid=9486,fd=78))
[root@node5 conf.d]#

安装HAproxy

下载haproxy包,解压编译安装

[root@node6 ~]# wget http://www.haproxy.org/download/1.8/src/haproxy-1.8.10.tar.gz
[root@node6 ~]# tar xvf haproxy-1.8.10.tar.gz
[root@node6 ~]# cd haproxy-1.8.10
[root@node6 haproxy-1.8.10]# yum install gcc pcre pcre-devel openssl openssl-devel –y
[root@node6 haproxy-1.8.10]# make TARGET=linux2628 USE_PCRE=1 USE_OPENSSL=1 USE_ZLIB=1 PREFIX=/usr/local/haproxy
[root@node6 haproxy-1.8.10]# make install PREFIX=/usr/local/haproxy
[root@node6 haproxy-1.8.10]# /usr/local/haproxy/sbin/haproxy -v #确认版本

编辑写启动配置文件 

[root@node6 haproxy-1.8.10]# vi /usr/lib/systemd/system/haproxy.service
[Unit]
Description=HAProxy Load Balancer
After=syslog.target network.target

[Service]
ExecStartPre=/usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -c -q
ExecStart=/usr/sbin/haproxy -Ws -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid
ExecReload=/bin/kill -USR2 $MAINPID

[Install]
WantedBy=multi-user.target

创建haproxy启动配置文件夹及配置文件

[root@node6 haproxy-1.8.10]# vi /etc/haproxy/haproxy.cfg
global
maxconn 100000
chroot /usr/local/haproxy
uid 99
gid 99
daemon
nbproc 1
pidfile /usr/local/haproxy/run/haproxy.pid
log 127.0.0.1 local6 info

defaults
option http-keep-alive
option forwardfor
maxconn 100000
mode http
timeout connect 300000ms
timeout client 300000ms
timeout server 300000ms

listen stats
 mode http
 bind 0.0.0.0:9999
 stats enable
 log global
 stats uri	/haproxy-status
 stats auth haadmin:123456

#frontend web_port
frontend web_port
	bind 0.0.0.0:8008
	mode http
	option httplog
	log global     
	option forwardfor
###############ACL Setting###################
	acl pc 		   hdr_dom(host)	-i www.elk.com
	acl mobile	 hdr_dom(host)	-i m.elk.com
###############USE ACL###################
	use_backend	 pc_host	      if   pc
	use_backend	 mobile_host    if   mobile
backend pc_host
	mode 	http
	option httplog
	log global 
	balance source
	server web1 192.168.170.22:80 check inter 2000 rise 3 fall 2 weight 1

backend mobile_host
	mode 	http
	option httplog
	log global 
	balance source
	server web1 192.168.170.22:80 check inter 2000 rise 3 fall 2 weight 1

编辑服务器rsyslog配置文件:

[root@node6 ~]# vim /etc/rsyslog.conf 
local6.* @@192.168.170.12:5140 #接收日志的logstash服务器IP:PORT,local6对应haproxy的日志级别
haproxy服务器配置rsyslog服务:
$ModLoad imudp
$UDPServerRun 514
$ModLoad imtcp
$InputTCPServerRun 514
local6.* /var/log/haproxy.log

重启rsyslog服务,查看

[root@node6 ~]# systemctl restart haproxy

客户端浏览器访问haproxy,本地查看是否有日志

[root@node5 nginx]# tail /var/log/haproxy.log 
Apr 14 14:52:14 localhost haproxy[16236]: 172.17.1.98:53891 [14/Apr/2019:14:52:14.105] web_port web_port/<NOSRV> -1/-1/-1/-1/774 503 212 - - SC-- 0/0/0/0/0 0/0 "GET / HTTP/1.1"
Apr 14 14:52:14 localhost haproxy[16236]: 172.17.1.98:53891 [14/Apr/2019:14:52:14.105] web_port web_port/<NOSRV> -1/-1/-1/-1/774 503 212 - - SC-- 0/0/0/0/0 0/0 "GET / HTTP/1.1"
Apr 14 14:52:16 localhost haproxy[16236]: 172.17.1.98:53893 [14/Apr/2019:14:52:15.250] web_port web_port/<NOSRV> -1/-1/-1/-1/838 503 212 - - SC-- 0/0/0/0/0 0/0 "GET /favicon.ico HTTP/1.1"
Apr 14 14:52:16 localhost haproxy[16236]: 172.17.1.98:53893 [14/Apr/2019:14:52:15.250] web_port web_port/<NOSRV> -1/-1/-1/-1/838 503 212 - - SC-- 0/0/0/0/0 0/0 "GET /favicon.ico HTTP/1.1"
Apr 14 14:52:19 localhost haproxy[16236]: 172.17.1.98:53895 [14/Apr/2019:14:52:19.172] web_port web_port/<NOSRV> -1/-1/-1/-1/41 503 212 - - SC-- 0/0/0/0/0 0/0 "GET / HTTP/1.1"
Apr 14 14:52:19 localhost haproxy[16236]: 172.17.1.98:53895 [14/Apr/2019:14:52:19.172] web_port web_port/<NOSRV> -1/-1/-1/-1/41 503 212 - - SC-- 0/0/0/0/0 0/0 "GET / HTTP/1.1"
[root@node5 nginx]# 

收集rsyslog日志到logstash2

配置本地host解析

[root@node6 ~]#vi /etc/hosts
192.168.170.22 www.elk.com
192.168.170.22 m.elk.com

测试logstash语法,访问haproxy页面,看在本地是否有log日志输出:

[root@node5 ~]# /usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/syslog.conf
{
          "priority" => 182,
    "facility_label" => "local6",
    "severity_label" => "Informational",
          "severity" => 6,
              "host" => "192.168.170.22",
        "@timestamp" => 2019-04-14T07:05:44.000Z,
           "program" => "haproxy",
         "timestamp" => "Apr 14 15:05:44",
               "pid" => "16236",
         "logsource" => "localhost",
          "facility" => 22,
           "message" => "172.17.1.98:53937 [14/Apr/2019:15:05:44.205] stats stats/<NOSRV> 196/-1/-1/-1/196 503 212 - - SC-- 0/0/0/0/0 0/0 \"GET /favicon.ico HTTP/1.1\"\n",
              "type" => "system-rsyslog",
          "@version" => "1"
}
{
          "priority" => 182,
    "facility_label" => "local6",
    "severity_label" => "Informational",
          "severity" => 6,
              "host" => "192.168.170.22",
        "@timestamp" => 2019-04-14T07:05:44.000Z,
           "program" => "haproxy",
         "timestamp" => "Apr 14 15:05:44",
               "pid" => "16236",
         "logsource" => "localhost",
          "facility" => 22,
           "message" => "172.17.1.98:53936 [14/Apr/2019:15:05:44.205] stats stats/<NOSRV> 2/-1/-1/-1/2 503 212 - - SC-- 1/1/0/0/0 0/0 \"GET / HTTP/1.1\"\n",
              "type" => "system-rsyslog",
          "@version" => "1"
}
{
          "priority" => 182,
    "facility_label" => "local6",
    "severity_label" => "Informational",
          "severity" => 6,
              "host" => "192.168.170.22",
        "@timestamp" => 2019-04-14T07:05:44.000Z,
           "program" => "haproxy",
         "timestamp" => "Apr 14 15:05:44",
               "pid" => "16236",
         "logsource" => "localhost",
          "facility" => 22,
           "message" => "172.17.1.98:53937 [14/Apr/2019:15:05:44.205] stats stats/<NOSRV> 196/-1/-1/-1/196 503 212 - - SC-- 0/0/0/0/0 0/0 \"GET /favicon.ico HTTP/1.1\"\n",
              "type" => "system-rsyslog",
          "@version" => "1"
}

验证redis是否接收到数据

[root@node4 ~]# redis-cli
127.0.0.1:6379> AUTH 123456
OK
127.0.0.1:6379> KEYS *
1) "system-rsyslog"
127.0.0.1:6379> 

收集tcp日志到logstash2

编辑logstash配置文件:

[root@node5 ~]# vim /etc/logstash/conf.d/tcp.conf
input {
	tcp {
		port => "5500"
		type => "tcp-syslog"
		mode => "server"
}}
output {
	redis {
	data_type => "list"
	key => "tcp-syslog"
	host => "192.168.170.11"
	port => "6379"
	db => "15"
}}

测试logstash语法并启动服务: 

[root@node5 ~]# /usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/tcp.conf -t
[root@node5 ~]# /usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/tcp.conf

在web测试日志收集:

使用nc命令:

[root@node6 ~]# yum install nc –y
[root@node6 ~]#  echo "nc test" | nc 10.10.0.16 5500
[root@node6 ~]#  nc 10.10.0.16 5500 < /root/anaconda-ks.cfg

在redis查看是否有数据收到:

[root@node4 ~]# redis-cli 
127.0.0.1:6379> AUTH 123456
OK
127.0.0.1:6379> KEYS *
1) "tcp-syslog"
2) "system-rsyslog"
127.0.0.1:6379> 

logstash1服务器

配置另一台logstash从redis取日志并写入到elasticsearch:

[root@node6 ~]# vi /etc/logstash/conf.d/redis2.conf
input {
    redis {
        data_type => "list"
        host => "192.168.170.11"
        port => "6379"
        key => "tcp-syslog"
        db => "0"
        password => "123456"
    }
    redis {
        data_type => "list"
        host => "192.168.170.11"
        port => "6379"
        key => "system-rsyslog"
        db => "0"
        password => "123456"
    }
}
output {
    if [type] == "tcp-syslog" {
         elasticsearch {
                hosts => ["192.168.170.9:9200"]
                index => "tcp-syslog-%{+YYYY.MM.dd}"
        }
    }
    if [type] == "system-rsyslog" {
         elasticsearch {
                hosts => ["192.168.170.9:9200"]
                index => "system-rsyslog-%{+YYYY.MM.dd}"
        }
    }
}

 elasticsearch配置

验证elasticsearch数据目录是否有数据,或通过浏览器查看是否有tcp-syslog和system-rsyslog信息http://192.168.170.9:9100

kibana配置

浏览器输入http://kibana1013.test.com,在kibana界面添加index并确认有数据

注意:部分配置都是根据上次实验,所以省略了。

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值