filebeat+zookeeepr+kafka配置笔记

2 篇文章 0 订阅
2 篇文章 0 订阅

filebeat

filebeat.prospectors:

# Each - is a prospector. Most options can be set at the prospector level, so
# you can use different prospectors for various configurations.
# Below are the prospector specific configurations.
- type: log

  # Change to true to enable this prospector configuration.
  enabled: true

  # Paths that should be crawled and fetched. Glob based paths.
  paths:
    - D:/bigdata/*/log/device/*.log
    #- c:\programdata\elasticsearch\logs\*
  fields:
    log_topics: log_device_topic
- type: log

  # Change to true to enable this prospector configuration.
  enabled: true

  # Paths that should be crawled and fetched. Glob based paths.
  paths:
    - D:/bigdata/*/log/system/*.log 
    #- c:\programdata\elasticsearch\logs\*
  fields:
    log_topics: log_system_topic
- type: log

  # Change to true to enable this prospector configuration.
  enabled: true

  # Paths that should be crawled and fetched. Glob based paths.
  paths:
    - D:/bigdata/*/log/user_operate/*.log
    #- c:\programdata\elasticsearch\logs\*
  fields:
    log_topics: log_useroperate_topic
- type: log

  # Change to true to enable this prospector configuration.
  enabled: true

  # Paths that should be crawled and fetched. Glob based paths.
  paths:
    - D:/bigdata/*/log/event/*.log 
    #- c:\programdata\elasticsearch\logs\*
  fields:
    log_topics: log_alarmevent_topic
#----------------------------- kafka output --------------------------------
output.kafka:
# Boolean flag to enable or disable the output module.
  enabled: true

# The list of Kafka broker addresses from where to fetch the cluster metadata.
# The cluster metadata contain the actual Kafka brokers events are published
# to.
  hosts: ["ip:9092", ",ip:9092", "ip:9092"]

# The Kafka topic used for produced events. The setting can be a format string
# using any event field. To set the topic from document type use `%{[type]}`.
  topic: '%{[fields][log_topics]}'

zookeeper

tickTime=2000
initLimit=10
syncLimit=5
dataDir=/home/bigdata/app/base/install/zookeeper-3.4.14/data
dataLogDir=/home/bigdata/app/base/install/zookeeper-3.4.14/log
clientPort=2181
server.1=ip:2888:3888
server.2=ip:2888:3888
server.3=ip:2888:3888

kafka

broker.id=0
port=9092
host.name=ip
num.partitions=3
offsets.topic.replication.factor=3
transaction.state.log.replication.factor=3
transaction.state.log.min.isr=3
default.replication.factor=3
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/home/bigdata/app/base/install/kafka_2.11-1.1.1/logs
num.recovery.threads.per.data.dir=1
log.retention.hours=48
log.retention.bytes=214748364800
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connection.timeout.ms=6000
group.initial.rebalance.delay.ms=0
zookeeper.connect=ip:2181,ip:2181,ip:2181
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值