EFK入门从头到尾一条龙服务

日志收集 专栏收录该内容
2 篇文章 0 订阅

EFK(elk+kafka+filebeat+zookeeper)

环境:
centos7
192.168.10.130:jdk,zookeeper,kafka,filebeat,elasticsearch
192.168.10.131:jdk,zookeeper,kafka,logstash
192.168.10.132:jdk,zookeeper,kafka,kibana

1:时间同步

[root@localhost ~]# ntpdate pool.ntp.org

2:关闭防火墙

[root@localhost ~]# systemctl stop firewalld
[root@localhost ~]# setenforce  0

3:修改主机名

[root@localhost ~]# hostnamectl set-hostname kafka1
[root@localhost ~]# hostnamectl set-hostname kafka2
[root@localhost ~]# hostnamectl set-hostname kafka3

4:修改hosts文件

192.168.32.153 kafka1
192.168.32.154 kafka2
192.168.32.155 kafka3

5:安装jdk

[root@kafka03 src]# rpm -ivh jdk-8u131-linux-x64_.rpm 

6:安装zookeeper

[root@kafka01 src]# tar xzf zookeeper-3.4.14.tar.gz
mv zookeeper /usr/local/zookeeper
cd /usr/local/zookeeper/conf/
mv zoo_sample.cfg zoo.cfg

6: 编辑zoo.cfg

server.1=192.168.32.153:2888:3888
server.2=192.168.32.154:2888:3888
server.3=192.168.32.155:2888:3888

7:创建data目录

mkdir /tmp/zookeeper

8:配置myid

echo "1" > /tmp/zookeeper/myid
echo "2" > /tmp/zookeeper/myid
echo "3" > /tmp/zookeeper/myid

9:运行zk服务

/usr/local/zookeeper/bin/zkServer.sh start

10:查看zk的状态

[root@kafka03 conf]# /usr/local/zookeeper/bin/zkServer.sh status

一个leader
两个follower

11:安装kafka

 tar zxvf kafka_2.11-2.2.0.tgz 
 mv kafka_2.11-2.2.0 /usr/local/kafka

12 :编辑kafka配置文件

vim /usr/local/kafka/config/server.properties
broker.id=分别为0,1,2
advertised.listeners=PLAINTEXT://(主机名kafka01,kafka02,kafk03):9092

zookeeper.connect=192.168.32.153:2181,192.168.32.154:2181,192.168.32.155:2181

13:启动kafka

-daemon (引用配置文件)

/usr/local/kafka/bin/kafka-server-start.sh -daemon /usr/local/kafka/config/server.properties

验证:


netstat  -lptnu|grep 9092
tcp6       0      0 :::9092                 :::*                    LISTEN      9814/java    

14:创建一个topic

[root@kafka01 logs]  /usr/local/kafka/bin/kafka-topics.sh --create --zookeeper 192.168.32.153:2181 --replication-factor 2 --partitions 3 --topic wg007 created topic wg007.

15:模拟生产者:

 /usr/local/kafka/bin/kafka-console-producer.sh --broker-list 192.168.32.153:9092 --topic wg007

16:模拟消费者:

 /usr/local/kafka/bin/kafka-console-consumer.sh --bootstrap-server 192.168.32.153:9092 --topic wg007 --from-beginning

16.2 查看当前的topic

[root@kafka02 bin] /usr/local/kafka/bin/kafka-topics.sh --list --zookeeper 192.168.32.153:2181

__consumer_offsets
msg
wg007

17:安装filebeat(收集日志的)

[root@kafka01 src]  rpm -ivh filebeat-6.8.12-x86_64.rpm

18:编辑filebeat.yaml

cd /etc/filebeat

mv filebeat.yml filebeat.yml.bak

vim filebeat.yml

filebeat.inputs:
- type: log
  enabled: true
  paths:
    - /var/log/messages

output.kafka:
 enabled: true
 hosts: ["192.168.32.153:9092","192.168.32.154:9092","192.168.32.155:9092"]
 topic: msg

systemctl start filebeat

19: 安装logstash

rpm -ivh logstash-6.6.0.rpm

vim /etc/logstash/conf.d/msg.conf
input{
        kafka{
                bootstrap_servers => ["192.168.32.153:9092,192.168.32.154:9092,192.168.32.155:9092"]
                group_id => "logstash"
                topics => "msg"
                consumer_threads => 5
        }
}

output{
        elasticsearch{
                hosts => "192.168.32.153:9200"
                index => "msg-%{+YYYY.MM.dd}"
        }
}

20: 安装elasticsearch

rpm -ivh elas
vim /etc/elasticsearch/elas.yml
cluster.name: wg007
node.name: node-1
network.host: 192.168.32.153
http.port: 9200

21:安装kibana

rpm -ivh kibana-6.6.2-x86_64.rpm
server.port: 5601
.host: "192.168.32.155"
elasticsearch.hosts: ["http://192.168.32.153:9200"]
  • 0
    点赞
  • 0
    评论
  • 0
    收藏
  • 一键三连
    一键三连
  • 扫一扫,分享海报

©️2020 CSDN 皮肤主题: 数字20 设计师:CSDN官方博客 返回首页
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、C币套餐、付费专栏及课程。

余额充值