ELK部署

  • 这些组件理想情况下使用同一版本,较为兼容

  • 配置好要拉取文件

 

 pwd

/var/log/linwei_all

 ll

total 19420

-rw-r--r-- 1 root root 3307971 Nov 1 19:26 access.a.log

-rw-r--r-- 1 root root 3307971 Nov 1 14:54 access.b.log

-rw-r--r-- 1 root root 3307971 Nov 1 14:54 access.c.log

-rw-r--r-- 1 root root 3307971 Nov 1 14:54 access.d.log

-rw-r--r-- 1 root root 3307971 Nov 1 14:54 access.f.log

-rw-r--r-- 1 root root 3307971 Nov 1 14:54 access.log

nginx日志格式为

 

log_format main escape=default '$remote_addr -[$time_local] -[$host] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" "$http_x_forwarded_for" request_time[$request_time] upstream_response_time[$upstream_response_time] upstream_addr[$upstream_addr] logId[$http_x_logid] imid[$http_imid] statistic[$statistic] Bfe_logid[$http_bfe_logid] CLIENTIP[$http_clientip] device_id[$cookie_device_id] X-Deviceid[$http_x_deviceid] route[$http_route] product[$http_product] subsys[$http_subsys]';

uninitialized_variable_warn off;

  • filebeat

    • 配置文件

 

filebeat.inputs:

- type: log

enabled: true

paths:

- /var/log/linwei_all/*.log

output.kafka:

hosts: ["172.31.9.52:9092"]

enabled: true

topic: test2

#####将filebeat的默认输出取消

codec.format:

string: "%{[message]}"

  • zookeeper

  • 配置文件

 

 pwd

/app/tools

 egrep -v '^#|^$' server1/zookeeper/conf/zoo.cfg

tickTime=2000

initLimit=10

syncLimit=5

dataDir=/app/tools/data/zk1

dataLogDir=/app/tools/logs/zk1

clientPort=2181

maxClientCnxns=60

server.1=172.31.9.52:2888:3888

server.2=172.31.9.52:2889:3889

server.3=172.31.9.52:2890:3890

[root@bjkjy-bkm-neisou-spider.bjkjy.baidu.com tools]# egrep -v '^#|^$' server2/zookeeper/conf/zoo.cfg

tickTime=2000

initLimit=10

syncLimit=5

dataDir=/app/tools/data/zk2

dataLogDir=/app/tools/logs/zk2

clientPort=2182

maxClientCnxns=60

server.1=172.31.9.52:2888:3888

server.2=172.31.9.52:2889:3889

server.3=172.31.9.52:2890:3890

egrep -v '^#|^$' server3/zookeeper/conf/zoo.cfg

tickTime=2000

initLimit=10

syncLimit=5

dataDir=/app/tools/data/zk3

dataLogDir=/app/tools/logs/zk3

clientPort=2183

maxClientCnxns=60

server.1=172.31.9.52:2888:3888

server.2=172.31.9.52:2889:3889

server.3=172.31.9.52:2890:3890

  • kafka

    • 配置文件

 

 pwd

/app/tools/kafka/config

[root@bjkjy-bkm-neisou-spider.bjkjy.baidu.com config]# egrep -v '^#|^$' server.properties

broker.id=1

num.network.threads=3

num.io.threads=8

socket.send.buffer.bytes=102400

socket.receive.buffer.bytes=102400

socket.request.max.bytes=104857600

log.dirs=../data

num.partitions=3

num.recovery.threads.per.data.dir=1

offsets.topic.replication.factor=1

transaction.state.log.replication.factor=1

transaction.state.log.min.isr=1

log.retention.hours=168

log.segment.bytes=1073741824

log.retention.check.interval.ms=300000

zookeeper.connect=172.31.9.52:2181,172.31.9.52:2182,172.31.9.52:2183

zookeeper.connection.timeout.ms=18000

    • 查看topic主题

 

bin/kafka-topics.sh --bootstrap-server kafkahost:9092 --list

__consumer_offsets

kingmouse

logstash-kafka-topic

talking

test

test2

    • 查看消费者

 

pwd

/app/tools/kafka/bin

[root@bjkjy-bkm-neisou-spider.bjkjy.baidu.com bin]# ./kafka-consumer-groups.sh --bootstrap-server kafkahost:9092 --describe --group kafkatest

GROUP TOPIC PARTITION CURRENT-OFFSET LOG-END-OFFSET LAG CONSUMER-ID HOST CLIENT-ID

kafkatest test2 0 66927 66927 0 kafkatest-0-4ff79b8c-4a60-4f1e-adbd-6790aef112e4 /172.31.9.52 kafkatest-0

kafkatest test2 1 66750 66750 0 kafkatest-0-4ff79b8c-4a60-4f1e-adbd-6790aef112e4 /172.31.9.52 kafkatest-0

kafkatest test2 2 66890 66890 0 kafkatest-0-4ff79b8c-4a60-4f1e-adbd-6790aef112e4 /172.31.9.52 kafkatest-0

  • logstash

    • jdk环境下载jdk11.大部分jdk版本不兼容

    • 配置文件

 

cat logstash.conf

input{

kafka{

bootstrap_servers => "172.31.9.52:9092"

client_id => "kafkatest"

group_id => "kafkatest"

auto_offset_reset => "latest"

consumer_threads => "1"

decorate_events => true

topics => ["test2"]

}

# file{

# path => "/var/log/linwei_all/*.log"

# type => "log"

# }

}

filter {

grok {

patterns_dir => "/etc/logstash/grok/patterns"

##配置日志匹配规则

match => {

"message" => '%{IP:remote_addr} -\[%{HTTPDATE:time_local}\] -\[%{USER:host}\] %{QUOTEDSTRING:request} %{INT:status} %{INT:body_bytes_sent} %{QUOTEDSTRING:http_referer} %{QUOTEDSTRING:http_user_agent} %{QUOTEDSTRING:http_x_forwarded_for} request_time\[%{SECOND:request_time}\] upstream_response_time\[%{SECOND:upstream_response_time}\] upstream_addr\[%{DATA:upstream_addr}\] logId\[%{INT:http_x_logid}\] imid\[%{INT:http_imid}\] statistic\[%{DATA:statistic}\] Bfe_logid\[%{INT:http_bfe_logid}\] CLIENTIP\[%{IP:http_clientip}\] device_id\[%{DATA:cookie_device_id}\] X-Deviceid\[%{DATA:http_x_deviceid}\] route\[%{DATA:http_route}\] product\[%{DATA:http_product}\] subsys\[%{DATA:http_subsys}\]'

}

}

}

output {

elasticsearch {

hosts => "http://172.31.9.52:9200"

index => "logs_linwei_data"

timeout => 300

}

stdout {

codec=> rubydebug

}

}

  • es

    • 配置文件

 

 pwd

/etc/elasticsearch

cat elasticsearch.yml

node.name: node-1

path.data: /var/lib/elasticsearch

path.logs: /var/log/elasticsearch

#bootstrap.memory_lock: true

network.host: '0.0.0.0'

http.port: 9200

discovery.seed_hosts: ["172.31.9.52"]

cluster.initial_master_nodes: ["172.31.9.52"]

  • kibana

    • 配置文件

 

 pwd

/etc/kibana

cat kibana.yml

server.port: 8089

server.host: "172.31.9.52"

elasticsearch.hosts: [ "http://172.31.9.52:9200" ]

kibana.index: ".kibana"

logging.dest: /var/log/kibana/kibana.log

logging.quiet: false

i18n.locale: "zh-CN"

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值