ELK收集messages线程

server 192.168.88.8 (elasticsearch)
server 192.168.88.88(logstash)
server 192.168.88.99 (kibana )

jdk

[root@localhost ~]# rpm -ivh jdk-8u131-linux-x64_.rpm
准备中...                          ################################# [100%]
正在升级/安装...
   1:jdk1.8.0_131-2000:1.8.0_131-fcs  ################################# [100%]
Unpacking JAR files...
        tools.jar...
        plugin.jar...
        javaws.jar...
        deploy.jar...
        rt.jar...
        jsse.jar...
        charsets.jar...
        localedata.jar...
[root@localhost ~]# java -version
java version "1.8.0_131"
Java(TM) SE Runtime Environment (build 1.8.0_131-b11)
Java HotSpot(TM) 64-Bit Server VM (build 25.131-b11, mixed mode)

elasticsearch

1.安装

[root@localhost ~]# yum -y install elasticsearch-6.6.2.rpm

2.编辑配置文件

[root@localhost ~]# cat /etc/elasticsearch/elasticsearch.yml | grep -v "^#"
cluster.name: ylm666
node.name: node-1
path.data: /var/lib/elasticsearch
path.logs: /var/log/elasticsearch
network.host: 192.168.88.8
http.port: 9200

3.启动

[root@localhost ~]# sudo systemctl daemon-reload
[root@localhost ~]# sudo systemctl enable elasticsearch.service
Created symlink from /etc/systemd/system/multi-user.target.wants/elasticsearch.service to /usr/lib/systemd/system/elasticsearch.service.
[root@localhost ~]# sudo systemctl start elasticsearch.service

4.查看日志

[root@localhost ~]# tailf /var/log/elasticsearch/ylm666.log
[2020-04-10T09:45:15,147][INFO ][o.e.g.GatewayService     ] [node-1] recovered [0] indices into cluster_state
[2020-04-10T09:45:15,366][INFO ][o.e.c.m.MetaDataIndexTemplateService] [node-1] adding template [.watch-history-9] for index patterns [.watcher-history-9*]
[2020-04-10T09:45:15,395][INFO ][o.e.c.m.MetaDataIndexTemplateService] [node-1] adding template [.triggered_watches] for index patterns [.triggered_watches*]
[2020-04-10T09:45:15,414][INFO ][o.e.c.m.MetaDataIndexTemplateService] [node-1] adding template [.monitoring-logstash] for index patterns [.monitoring-logstash-6-*]
[2020-04-10T09:45:15,445][INFO ][o.e.c.m.MetaDataIndexTemplateService] [node-1] adding template [.monitoring-es] for index patterns [.monitoring-es-6-*]
[2020-04-10T09:45:15,464][INFO ][o.e.c.m.MetaDataIndexTemplateService] [node-1] adding template [.watches] for index patterns [.watches*]
[2020-04-10T09:45:15,485][INFO ][o.e.c.m.MetaDataIndexTemplateService] [node-1] adding template [.monitoring-beats] for index patterns [.monitoring-beats-6-*]
[2020-04-10T09:45:15,504][INFO ][o.e.c.m.MetaDataIndexTemplateService] [node-1] adding template [.monitoring-alerts] for index patterns [.monitoring-alerts-6]
[2020-04-10T09:45:15,527][INFO ][o.e.c.m.MetaDataIndexTemplateService] [node-1] adding template [.monitoring-kibana] for index patterns [.monitoring-kibana-6-*]
[2020-04-10T09:45:15,625][INFO ][o.e.l.LicenseService     ] [node-1] license [c1abdffc-6f8d-4c43-84b9-52f0a6a90935] mode [basic] - valid

5.查看端口

[root@localhost ~]# netstat -ntlp | grep java
tcp6       0      0 192.168.88.8:9200       :::*                    LISTEN      23052/java
tcp6       0      0 192.168.88.8:9300       :::*                    LISTEN      23052/java

logstash

1.安装

[root@localhost ~]# yum -y install logstash-6.6.0.rpm

2.编辑配置文件

[root@localhost ~]# cat /etc/logstash/conf.d/messages.conf
input {
        file {
                path => "/var/log/messages"
                type => "msg-log"
                start_position => "beginning"
        }

}
output {
        elasticsearch {
                hosts => "192.168.88.8:9200"
                index => "msg_log-%{+YYYY.MM.dd}"
        }
}

3.权限

[root@localhost ~]# chmod -R 777 /var/log

4.启动

[root@localhost ~]# systemctl start logstash

5.查看日志


[root@localhost ~]# tailf /var/log/logstash/logstash-plain.log
[2020-04-10T09:53:18,454][WARN ][logstash.outputs.elasticsearch] Detected a 6.x and above cluster: the `type` event field won't be used to determine the document _type {:es_version=>6}
[2020-04-10T09:53:18,531][INFO ][logstash.outputs.elasticsearch] New Elasticsearch output {:class=>"LogStash::Outputs::ElasticSearch", :hosts=>["//192.168.88.8:9200"]}
[2020-04-10T09:53:18,541][INFO ][logstash.outputs.elasticsearch] Using mapping template from {:path=>nil}
[2020-04-10T09:53:18,583][INFO ][logstash.outputs.elasticsearch] Attempting to install template {:manage_template=>{"template"=>"logstash-*", "version"=>60001, "settings"=>{"index.refresh_interval"=>"5s"}, "mappings"=>{"_default_"=>{"dynamic_templates"=>[{"message_field"=>{"path_match"=>"message", "match_mapping_type"=>"string", "mapping"=>{"type"=>"text", "norms"=>false}}}, {"string_fields"=>{"match"=>"*", "match_mapping_type"=>"string", "mapping"=>{"type"=>"text", "norms"=>false, "fields"=>{"keyword"=>{"type"=>"keyword", "ignore_above"=>256}}}}}], "properties"=>{"@timestamp"=>{"type"=>"date"}, "@version"=>{"type"=>"keyword"}, "geoip"=>{"dynamic"=>true, "properties"=>{"ip"=>{"type"=>"ip"}, "location"=>{"type"=>"geo_point"}, "latitude"=>{"type"=>"half_float"}, "longitude"=>{"type"=>"half_float"}}}}}}}}
[2020-04-10T09:53:18,666][INFO ][logstash.outputs.elasticsearch] Installing elasticsearch template to _template/logstash
[2020-04-10T09:53:19,106][INFO ][logstash.inputs.file     ] No sincedb_path set, generating one based on the "path" setting {:sincedb_path=>"/var/lib/logstash/plugins/inputs/file/.sincedb_452905a167cf4509fd08acb964fdb20c", :path=>["/var/log/messages"]}
[2020-04-10T09:53:19,193][INFO ][logstash.pipeline        ] Pipeline started successfully {:pipeline_id=>"main", :thread=>"#<Thread:0x2c19867d run>"}
[2020-04-10T09:53:19,263][INFO ][logstash.agent           ] Pipelines running {:count=>1, :running_pipelines=>[:main], :non_running_pipelines=>[]}
[2020-04-10T09:53:19,363][INFO ][filewatch.observingtail  ] START, creating Discoverer, Watch with file and sincedb collections
[2020-04-10T09:53:19,776][INFO ][logstash.agent           ] Successfully started Logstash API endpoint {:port=>9600}

6.查看端口

[root@localhost ~]# netstat -ntlp | grep 9600
tcp6       0      0 127.0.0.1:9600          :::*                    LISTEN      19368/java

kibana

1.安装

[root@localhost ~]# yum -y install kibana-6.6.2-x86_64.rpm

2.编辑配置文件

[root@localhost ~]# cat /etc/kibana/kibana.yml | grep -v "^#" |  sed '/^$/d'
server.port: 5601
server.host: "192.168.88.99"
elasticsearch.hosts: ["http://192.168.88.8:9200"]

3.启动

[root@localhost ~]# systemctl start kibana

4.查看端口

[root@localhost ~]# netstat -ntlp | grep 5601
tcp        0      0 192.168.88.99:5601      0.0.0.0:*               LISTEN      13760/node

访问前我们去查看一下索引

[root@localhost ~]# curl -X GET http://192.168.88.8:9200/_cat/indices?v
health status index              uuid                   pri rep docs.count docs.deleted store.size pri.store.size
green  open   .kibana_1          c2riR6WeT8-Zv_I-r-_xeQ   1   0          1            0        5kb            5kb
yellow open   msg_log-2020.04.10 _qLj50YKSAiqCCaV63I03w   5   1       1867            0    572.5kb        572.5kb

添加nginx日志的可视化图形

1 添加状态码统计饼状图
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
2 添加一个统计pv的图形
在这里插入图片描述
在这里插入图片描述
3 添加ip访问量前10的ip
在这里插入图片描述
4 访问量趋势图
在这里插入图片描述
5 添加一个仪表盘
在这里插入图片描述
在这里插入图片描述

常用命令:

验证服务

curl -X GET http://127.0.0.1:9200

elasticsearch 查看集群统计信息

curl -XGET ‘http://localhost:9200/_cluster/stats?pretty’

查看集群状态

curl -X GET ‘localhost:9200/_cat/health?v’

创建索引:test_index

curl -X PUT HTTP://localhost:9200/test_index?pretty

elasticsearch 查看所有索引

curl -X GET HTTP://localhost:9200/_cat/indices?v

curl -s http://192.168.1.9:9200/_cat/indices|grep “msg”|awk ‘{print $3}’|sort

删除索引:test_index

curl -X DELETE ‘localhost:9200/test_index?pretty’

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

云原生解决方案

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值