filebeat
filebeat.prospectors:
# Each - is a prospector. Most options can be set at the prospector level, so
# you can use different prospectors for various configurations.
# Below are the prospector specific configurations.
- type: log
# Change to true to enable this prospector configuration.
enabled: true
# Paths that should be crawled and fetched. Glob based paths.
paths:
- D:/bigdata/*/log/device/*.log
#- c:\programdata\elasticsearch\logs\*
fields:
log_topics: log_device_topic
- type: log
# Change to true to enable this prospector configuration.
enabled: true
# Paths that should be crawled and fetched. Glob based paths.
paths:
- D:/bigdata/*/log/system/*.log
#- c:\programdata\elasticsearch\logs\*
fields:
log_topics: log_system_topic
- type: log
# Change to true to enable this prospector configuration.
enabled: true
# Paths that should be crawled and fetched. Glob based paths.
paths:
- D:/bigdata/*/log/user_operate/*.log
#- c:\programdata\elasticsearch\logs\*
fields:
log_topics: log_useroperate_topic
- type: log
# Change to true to enable this prospector configuration.
enabled: true
# Paths that should be crawled and fetched. Glob based paths.
paths:
- D:/bigdata/*/log/event/*.log
#- c:\programdata\elasticsearch\logs\*
fields:
log_topics: log_alarmevent_topic
#----------------------------- kafka output --------------------------------
output.kafka:
# Boolean flag to enable or disable the output module.
enabled: true
# The list of Kafka broker addresses from where to fetch the cluster metadata.
# The cluster metadata contain the actual Kafka brokers events are published
# to.
hosts: ["ip:9092", ",ip:9092", "ip:9092"]
# The Kafka topic used for produced events. The setting can be a format string
# using any event field. To set the topic from document type use `%{[type]}`.
topic: '%{[fields][log_topics]}'
zookeeper
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/home/bigdata/app/base/install/zookeeper-3.4.14/data
dataLogDir=/home/bigdata/app/base/install/zookeeper-3.4.14/log
clientPort=2181
server.1=ip:2888:3888
server.2=ip:2888:3888
server.3=ip:2888:3888
kafka
broker.id=0
port=9092
host.name=ip
num.partitions=3
offsets.topic.replication.factor=3
transaction.state.log.replication.factor=3
transaction.state.log.min.isr=3
default.replication.factor=3
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/home/bigdata/app/base/install/kafka_2.11-1.1.1/logs
num.recovery.threads.per.data.dir=1
log.retention.hours=48
log.retention.bytes=214748364800
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connection.timeout.ms=6000
group.initial.rebalance.delay.ms=0
zookeeper.connect=ip:2181,ip:2181,ip:2181