话不多说,直接上代码:
filebeat收集docker-compse配置:
#=========================== Filebeat inputs =============================
filebeat.inputs:
- type: log
# Change to true to enable this input configuration.
enabled: true
# Paths that should be crawled and fetched. Glob based paths.
paths:
- /data/web/app/runtime/logs/buyer/*.log
#exclude_lines: ['^DBG']
#include_lines: ['^ERR', '^WARN']
#exclude_files: ['.gz$']
fields:
service: buyer #这里为filebeat添加的字段,用于区分不同的日志或项目类别,测试时一共配置3个类似配置,分别是 buyer/seller/nginx,前两者为程序日志,后者为nginx日志
fields_under_root: true
#multiline.pattern: ^\[
#multiline.negate: false
#============================= Filebeat modules ===============================
filebeat.config.modules:
# Glob pattern for configuration loading
path: ${path.config}/modules.d/*.yml
# Set to true to enable config reloading
reload.enabled: false
# Period on which files under path should be checked for changes
#reload.period: 10s
# The name of the shipper that publishes the network data. It can be used to group
# all the transactions sent by a single shipper in the web interface.
#name:
# The tags of the shipper are included in their own field with each
# transaction published.
#tags: ["service-X", "web-tier"]
# Optional fields that you can specify to add additional information to the
# output.
#fields:
# env: staging
#============================== Dashboards =====================================
# These settings control loading the sample dashboards to the Kibana index. Loading
# the dashboards is disabled by default and can be enabled either by setting the
# options here, or by using the `-setup` CLI flag or the `setup` command.
#setup.dashboards.enabled: false
# The URL from where to download the dashboards archive. By default this URL
# has a value which is computed based on the Beat name and version. For released
# versions, this URL points to the dashboard archive on the artifacts.elastic.co
# website.
#setup.dashboards.url:
#================================ Outputs =====================================
# Configure what output to use when sending the data collected by the beat.
output.logstash:
# The Logstash hosts
hosts: ["172.17.0.1:5044"]
# Optional SSL. By default is off.
# List of root certificates for HTTPS server verifications
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for SSL client authentication
#ssl.certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
#ssl.key: "/etc/pki/client/cert.key"
#============================== Xpack Monitoring ===============================
# filebeat can export internal metrics to a central Elasticsearch monitoring
# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The
# reporting is disabled by default.
# Set to true to enable the monitoring reporter.
#xpack.monitoring.enabled: false
# Uncomment to send the metrics to Elasticsearch. Most settings from the
# Elasticsearch output are accepted here as well. Any setting that is not set is
# automatically inherited from the Elasticsearch output configuration, so if you
# have the Elasticsearch output configured, you can simply uncomment the
# following line.
#xpack.monitoring.elasticsearch:
logstash配置为:
input {
beats {
port => "5044"
}
}
filter{
# syslog 配置解析
#grok {
# match => {
# "message" => "<%{DATA:num}>%{SYSLOGTIMESTAMP:dev_date} %{SYSLOG5424PRINTASCII:dev_hostname} %{DATA:dev_service} %{GREEDYDATA:devel_message}"
# }
#}
if [service] == 'nginx'{ #根据filebeat/syslog添加的字段来区分不同来源,处理不同逻辑
json { #json、grok等为不同处理插件,详情请看官网
#source => 'devel_message',
source => 'message'
}
} else {
json{
#source => 'devel_message'
source => 'message'
target => 'format_msg' #有target字段将解析到target字段中,否则直接分析后加到最顶层字段
add_field => { # add_field 为添加字段到es的顶层字段
"dev_context" => "%{[format_msg][context]}"
}
add_field => {
"dev_timestamp" => "%{[format_msg][timestamp]}"
}
add_field => {
"dev_level" => "%{[format_msg][level]}"
}
add_field => {
"dev_category" => "%{[format_msg][category]}"
}
add_field => {
"dev_trances" => "%{[format_msg][trances]}"
}
add_field => {
"dev_message" => "%{[format_msg][message]}"
}
remove_field => ["format_msg","message","devel_message"]
}
}
}
output {
if [dev_service] == 'buyer' { # 根据service字段判断并创建不同index
elasticsearch {
index => "buyer-%{+YYYY.MM.dd}"
hosts => ["192.158.0.2:9200"]
template => "/usr/share/logstash/pipeline/temple/jh-php-yii2.json" #配置使用模板,如果es中没有找到该模板,将会创建该模板
}
} else if [dev_service] == 'seller'{
elasticsearch {
index => "seller-%{+YYYY.MM.dd}"
hosts => ["192.158.0.2:9200"]
template => "/usr/share/logstash/pipeline/temple/jh-php-yii2.json"
}
} else {
elasticsearch {
index => "nginx-%{+YYYY.MM.dd}"
hosts => ["192.158.0.2:9200"]
template => "/usr/share/logstash/pipeline/temple/jh-nginx.json"
}
}
}
}
es和kibana配置不再详细描述。
template模板示例如下: jh-nginx.json:
{
"order": 0,
"version": 50001,
"template": "nginx_accesslog-*", # 匹配logstash中定义的es:index前缀
"settings": {
"index": {
"refresh_interval": "5s"
}
},
"mappings": {
"_default_": {
"dynamic_templates": [
{
"message_field": {
"path_match": "message",
"match_mapping_type": "string",
"mapping": {
"type": "text",
"norms": false
}
}
},
{
"string_fields": {
"match": "nginx_*", #这里存入es的前缀有nginx_,若没有直接改为'*'
"match_mapping_type": "string", # 匹配的类型
"mapping": { # 定义字段
"type": "text",
"norms": false,
"fields": {
"keyword": {
"type": "keyword",
"ignore_above": 256
}
}
}
}
}
],
"properties": {
"@timestamp": {
"type": "date"
},
"@version": {
"type": "keyword"
},
"geoip": {
"dynamic": true,
"properties": {
"ip": {
"type": "ip"
},
"location": {
"type": "geo_point"
},
"latitude": {
"type": "half_float"
},
"longitude": {
"type": "half_float"
}
}
}
}
}
},
"aliases": {}
}