一、使用背景
当生产环境有很多服务器、很多业务模块的日志需要每时每刻查看时
二、环境
系统:ubuntu14
JDK:1.8
Elasticsearch-6.1
Logstash-6.1
kibana-6.1
三、安装
1、安装JDK
下载JDK:http://www.oracle.com/technetwork/java/javase/downloads/jdk8-downloads-2133151.html
本环境下载的是64位tar.gz包,将安装包拷贝至安装服务器/usr/local目录
[root@localhost ~]# cd /usr/local/
[root@localhost local]# tar -xzvf jdk-8u111-linux-x64.tar.gz
配置环境变量
[root@localhost local]# vim /etc/profile
将下面的内容添加至文件末尾(假如服务器需要多个JDK版本,为了ELK不影响其它系统,也可以将环境变量的内容稍后添加到ELK的启动脚本中)
JAVA_HOME=/usr/local/jdk1.8.0_111
JRE_HOME=/usr/local/jdk1.8.0_111/jre
CLASSPATH=.:$JAVA_HOME/lib:/dt.jar:$JAVA_HOME/lib/tools.jar
PATH=$PATH:$JAVA_HOME/bin
export JAVA_HOME
export JRE_HOME
ulimit -u 16384
ulimit -n 65536
[root@localhost local]# source /etc/profile 确认修改
配置limit相关参数
[root@localhost local]# vim /etc/security/limits.conf
添加以下内容
* soft nproc 65536
* hard nproc 65536
* soft nofile 65536
* hard nofile 65536
创建运行ELK的用户
[root@localhost local]# groupadd elk
[root@localhost local]# useradd -g elk elk
当然也可以设置密码:
useradd www # 创建用户
passwd www # 设置该用户名密码
创建ELK运行目录
[root@localhost local]# mkdir /elk
[root@localhost local]# chown -R elk:elk /elk
关闭防火墙:
[root@localhost ~]# iptables -F
以上全部是root用户完成
2、安装ELK
以下由elk用户操作
以elk用户登录服务器
下载ELK安装包:https://www.elastic.co/downloads,并上传到服务器且解压,解压命令:tar -xzvf 包名
不过也可以下载zip格式的,那么解压命令就是 unzip
配置Elasticsearch
vim config/elasticsearch.yml
增加:network.host: 0.0.0.0 用来让外网访问
查看ip地址:ifconifg
保存退出
启动Elasticsearch -------------必须在elk用户下
./bin/elasticsearch
/elk/elasticsearch-6.2.4/bin/elasticsearch
后台启动:
./bin/elasticsearch -d
查看进程:
ps -ef|grep elasticsearch
用浏览器访问:http://127.0.0.1:9200 或者外网访问192.168.47.131:9200 用chorme或者firefox
Elasticsearch安装完毕
安装logstash
logstash是ELK中负责收集和过滤日志的
编写配置文件如下 (log4j.conf):
input {
log4j {
mode => "server"
host => "192.168.112.131"
port => 4560
}
}
output {
stdout {
codec => rubydebug
}
elasticsearch{
hosts => ["192.168.112.131:9200"]
index => "log4j-%{+YYYY.MM.dd}"
document_type => "log4j_type"
}
}
或者:
input {
tcp {
host => "192.168.47.131"
port => "4560"
codec => json
}
stdin{
}
}
filter {
kv {
remove_field => ["source","endOfBatch","loggerName","threadId","threadPriority","port","@version","thread","loggerFqcn","message","timeMillis"]
}
mutate {
remove_field => ["timestamp"]
}
}
output {
stdout {
codec => rubydebug
}
if "_jsonparsefailure" not in [tags] {
elasticsearch{
hosts => ["192.168.47.131:9200"]
index => "tim_mis-%{+YYYY.MM.dd}"
}
}
}
启动logstash
/elk/logstash-6.1.2/bin/logstash -f /elk/logstash-6.1.2/config/log4j.conf
/elk/logstash-6.2.4/bin/logstash -f /elk/logstash-6.2.4/config/log4j2_tim.conf
但是有时候会因为内存不够无法启动
vim /elk/logstash-6.2.4/config/jvm.options 因为是只读文件,所以保存要 :w !sudo tee % 退出使用:q!
-Xms256m
-Xmx1g
代表启动成功
安装kibana
解压:
tar -zxvf kibana-6.2.4-linux-x86_64.tar.gz
修改配置文件:
vim /elk/kibana-6.2.4-linux-x86_64/config/kibana.yml
# Kibana is served by a back end server. This setting specifies the port to use.
server.port: 5601
# Specifies the address to which the Kibana server will bind. IP addresses and host names are both valid values.
# The default is 'localhost', which usually means remote machines will not be able to connect.
# To allow connections from remote users, set this parameter to a non-loopback address.
server.host: "192.168.3.59"
# Enables you to specify a path to mount Kibana at if you are running behind a proxy. This only affects
# the URLs generated by Kibana, your proxy is expected to remove the basePath value before forwarding requests
# to Kibana. This setting cannot end in a slash.
#server.basePath: ""
# The maximum payload size in bytes for incoming server requests.
#server.maxPayloadBytes: 1048576
# The Kibana server's name. This is used for display purposes.
#server.name: "your-hostname"
# The URL of the Elasticsearch instance to use for all your queries.
elasticsearch.url: "http://192.168.3.59:9200"
# When this setting's value is true Kibana uses the hostname specified in the server.host
# setting. When the value of this setting is false, Kibana uses the hostname of the host
# that connects to this Kibana instance.
#elasticsearch.preserveHost: true
# Kibana uses an index in Elasticsearch to store saved searches, visualizations and
# dashboards. Kibana creates a new index if the index doesn't already exist.
kibana.index: ".kibana"
# The default application to load.
#kibana.defaultAppId: "home"
# If your Elasticsearch is protected with basic authentication, these settings provide
# the username and password that the Kibana server uses to perform maintenance on the Kibana
# index at startup. Your Kibana users still need to authenticate with Elasticsearch, which
# is proxied through the Kibana server.
#elasticsearch.username: "user"
#elasticsearch.password: "pass"
# Enables SSL and paths to the PEM-format SSL certificate and SSL key files, respectively.
# These settings enable SSL for outgoing requests from the Kibana server to the browser.
#server.ssl.enabled: false
#server.ssl.certificate: /path/to/your/server.crt
#server.ssl.key: /path/to/your/server.key
# Optional settings that provide the paths to the PEM-format SSL certificate and key files.
# These files validate that your Elasticsearch backend uses the same key files.
#elasticsearch.ssl.certificate: /path/to/your/client.crt
#elasticsearch.ssl.key: /path/to/your/client.key
# Optional setting that enables you to specify a path to the PEM file for the certificate
# authority for your Elasticsearch instance.
#elasticsearch.ssl.certificateAuthorities: [ "/path/to/your/CA.pem" ]
# To disregard the validity of SSL certificates, change this setting's value to 'none'.
#elasticsearch.ssl.verificationMode: full
# Time in milliseconds to wait for Elasticsearch to respond to pings. Defaults to the value of
# the elasticsearch.requestTimeout setting.
#elasticsearch.pingTimeout: 1500
# Time in milliseconds to wait for responses from the back end or Elasticsearch. This value
# must be a positive integer.
#elasticsearch.requestTimeout: 30000
# List of Kibana client-side headers to send to Elasticsearch. To send *no* client-side
# headers, set this value to [] (an empty list).
#elasticsearch.requestHeadersWhitelist: [ authorization ]
# Header names and values that are sent to Elasticsearch. Any custom headers cannot be overwritten
# by client-side headers, regardless of the elasticsearch.requestHeadersWhitelist configuration.
#elasticsearch.customHeaders: {}
# Time in milliseconds for Elasticsearch to wait for responses from shards. Set to 0 to disable.
#elasticsearch.shardTimeout: 0
# Time in milliseconds to wait for Elasticsearch at Kibana startup before retrying.
#elasticsearch.startupTimeout: 5000
# Specifies the path where Kibana creates the process ID file.
#pid.file: /var/run/kibana.pid
# Enables you specify a file where Kibana stores log output.
#logging.dest: stdout
# Set the value of this setting to true to suppress all logging output.
#logging.silent: false
# Set the value of this setting to true to suppress all logging output other than error messages.
#logging.quiet: false
# Set the value of this setting to true to log all events, including system usage information
# and all requests.
#logging.verbose: false
# Set the interval in milliseconds to sample system and process performance
# metrics. Minimum is 100ms. Defaults to 5000.
#ops.interval: 5000
# The default locale. This locale can be used in certain circumstances to substitute any missing
# translations.
#i18n.defaultLocale: "en"
保存退出
启动kibana
/elk/kibana-6.1.2-linux-x86_64/bin/kibana
http://192.168.47.131:5601
因此:全部启动顺序如下:
切换elk用户:
nohup xxxxxxx & // 后台启动
/elk/elasticsearch-6.2.4/bin/elasticsearch
/elk/logstash-6.2.4/bin/logstash -f /elk/logstash-6.2.4/config/log4j2_tim.conf
/elk/kibana-6.2.4-linux-x86_64/bin/kibana
其他参考:
ELK 系统 搭建 https://blog.csdn.net/fxbin123/article/details/79983245(有更好的优化,并且该博主下有kibana的使用介绍)
ELK日志分析平台搭建全过程 https://www.cnblogs.com/onetwo/p/6059231.html
Elasticsearch5.0 最新版本安装问题 http://blog.csdn.net/abcd_d_/article/details/53018927
ubuntu设置最大权限文件数量 http://blog.csdn.net/kimsoft/article/details/8024216
ELK实时日志分析平台部署搭建详细实现过程 http://www.linuxidc.com/Linux/2016-09/135137.htm
ElasticSearch6.X 下载与安装https://blog.csdn.net/fxbin123/article/details/79682876