环境
# jdk
yum isntall -y java-1.8.0-openjdk
下载
- elasticsearch
https://www.elastic.co/cn/downloads/elasticsearch
- kibana
https://www.elastic.co/cn/downloads/kibana
- logstash
https://www.elastic.co/cn/downloads/logstash
安装
本文 centos7.5 elasticsearch-7.3.1 kibana-7.3.2-linux-x86_64 logstash-7.3.2
cd /opt
# 解压es
tar -zxf elasticsearch-7.3.1-linux-x86_64.tar.gz
# 解压kibana
tar -zxf kibana-7.3.2-linux-x86_64.tar.gz
# 解压logstash
tar -axf logstash-7.3.2.tar.gz
(本文下载到/opt下)
配置
- kibana
# 确认配置文件中连接的es地址
cat /opt/kibana-7.3.2-linux-x86_64/config/kibana.yml
# The URLs of the Elasticsearch instances to use for all your queries.
elasticsearch.hosts: ["http://localhost:9200"] # 28行左右
- logstash
# 新建logstash.conf 处理文件
input {
redis { #从redis中读取数据
type => "common_api_access_log" #redis list名称
host => "localhost" #redis host
password => '1234' #redis pwd
port => "6379" #redis port
data_type => "list" #redis key type
key => "common_api_access_log" #redis key
}
}
filter {
json {
source => "message" #数据
target => "msg" #json解析为
skip_on_invalid_json => true
}
}
output {
elasticsearch { #
hosts => ["127.0.0.1:9200"]
index => "%{[tags]}-%{+YYYY.MM.dd}"
}
}
vim /opt/logstash-7.3.2/config/pipelines.yml
# 13行左右
- pipeline.id: another_test
queue.type: persisted
path.config: "/opt/logstash-7.3.2/config/logstash.conf" #加载刚才的配置
启动
# 启动es
cd /elasticsearch-7.3.1/bin
su www
./elasticsearch &
# 启动kibana
cd /opt/kibana-7.3.2-linux-x86_64/bin
su www
./kibana &
# 启动logstash
cd /opt/logstash-7.3.2/bin
./logstash &
业务
结合logstash.conf 配置 日志格式如下
# tags 和 message必须 值可自定义,message值最好为json 这样可以解析到索引,否则整个message将为一个字符串
log := `{"tags": "ceshi", "message": {"request_path": "/hello/world", "params": {"a": "b", "c": "d"}}}`
redis := libs.Redis{}
#Lpush为封装的redis方法 参数1位redis key 参数2位redis value
redis.Lpush("common_api_access_log", log)
查询
打开kibana dashboard
http://localhost:5601
点击左侧 Management
点击 Kibana 下的 Index Patterns
点击 create index pattern
输入 ceshi*
点击 next step
选择 Time Filter field name 为 @timestamp
点击 create index pattern
添加完成就可以到 Discover中查询我们刚写入的日志了