所有的组件日志格式一致:
日志格式:
Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg
如下:E1221 13:53:13.024436 4853 controller.go:152] Unable to remove old endpoints from kubernetes service: StorageError: key not found, Code: 1, Key: /registry/masterleases/172.30.1.88, ResourceVersion: 0, AdditionalErrorMsg:
filebeat配置文件,7.9+
filebeat.inputs:
- type: log
enable: true
paths:
- /data/logs/kube-proxy/kube-proxy.*.root.log.ERROR.*
max_bytes: 20480
exclude_lines: ['^Running on machine','^Log file created','^Binary: Built with','^Log line format']
fields:
service: kube-proxy-error-log
multiline.pattern: ^E[0-9]{4}
multiline.negate: true
multiline.match: after
setup.ilm.enabled: false
output.kafka: # 输出到kafka
enabled: true # 该output配置是否启用
hosts: ["1.30.0.136:9092"] # kafka节点列表
topic: '%{[fields.service]}' # kafka会创建该topic,然后logstash(可以过滤修改)会传给es作为索引名称
partition.hash:
reachable_only: true # 是否只发往可达分区
compression: gzip # 压缩
max_message_bytes: 1000000 # Event最大字节数。默认1000000。应小于等于kafka broker message.max.bytes值
required_acks: 1 # kafka ack等级
worker: 2 # kafka output的最大并发数
bulk_max_size: 2048 # 单次发往kafka的最大事件数
logging.to_files: true # 输出所有日志到file,默认true, 达到日志文件大小限制时,日志文件会自动限制替换
shipper:
logstash配置文件
input {
kafka {
bootstrap_servers => "1.30.0.136:9092"
group_id => "logstash-kubelet-error-log"
client_id => "logstash-kubelet-error-log"
topics => ["kubelet-error-log"]
codec => json
consumer_threads => 9
decorate_events => true
}
}
filter {
ruby {
code => "event.set('logsize', event.get('message').bytesize)"
}
grok {
match => [
"message", "^E(%{NUMBER:date}\s+%{TIME:time})\s+%{NUMBER:threadid}\s+%{DATA:file_line}\]\s+%{GREEDYDATA:content}"
]
}
date {
match => [ "logdate", "yyyy-MM-dd HH:mm:ss.SSS" ]
target => "@timestamp"
remove_field => [ "logdate" ]
}
mutate{
remove_field => ["agent"]
remove_field => ["ecs"]
}
}
output {
elasticsearch {
hosts => [".30.0.92:9200"]
manage_template => false
index => "test_kubelet-error-log-%{+YYYY.MM.dd}"
}
}
output {
stdout {
codec => "rubydebug"
}
}
ES