kafka zookeeper集群安装

http://kafka.apache.org/
http://archive.cloudera.com/kafka/
安装好jdk
配置hosts文件
::1     localhost       localhost.localdomain   localhost6      localhost6.localdomain6
127.0.0.1       localhost       localhost.localdomain   localhost4      localhost4.localdomain4
172.16.236.53   kafka001        kafka001
172.16.236.52   kafka002        kafka002
172.16.236.51   kafka003        kafka003
下载zookeeper
[root@kafka001 ~]# mkdir software
[root@kafka001 software]# wget https://mirrors.tuna.tsinghua.edu.cn/apache/zookeeper/zookeeper-3.4.14/zookeeper-3.4.14.tar.gz
[root@kafka002 software]# wget https://mirrors.tuna.tsinghua.edu.cn/apache/zookeeper/zookeeper-3.4.14/zookeeper-3.4.14.tar.gz
[root@kafka003 software]# wget https://mirrors.tuna.tsinghua.edu.cn/apache/zookeeper/zookeeper-3.4.14/zookeeper-3.4.14.tar.gz
[root@kafka001 ~]# tar -zxvf software/zookeeper-3.4.14.tar.gz -C app/
[root@kafka002 ~]# tar -zxvf software/zookeeper-3.4.14.tar.gz -C app/
[root@kafka003 ~]# tar -zxvf software/zookeeper-3.4.14.tar.gz -C app/
配置zookeeper
cd /root/app/zookeeper-3.4.14/conf
[root@kafka001 conf]# cp zoo_sample.cfg zoo.cfg 
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just
# example sakes.
dataDir=/root/app/zookeeper-3.4.14/data
# the port at which the clients will connect
clientPort=2181
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxClientCnxns=60
#
# Be sure to read the maintenance section of the
# administrator guide before turning on autopurge.
#
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
#autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
#autopurge.purgeInterval=1
server.1=kafka001:2888:3888
server.2=kafka002:2888:3888
server.3=kafka003.2888:3888
[root@kafka001 conf]# scp zoo.cfg kafka002:/root/app/zookeeper-3.4.14/conf/
[root@kafka001 conf]# scp zoo.cfg kafka003:/root/app/zookeeper-3.4.14/conf/
[root@kafka001 zookeeper-3.4.14]# mkdir data
[root@kafka001 zookeeper-3.4.14]# touch data/myid
[root@kafka001 zookeeper-3.4.14]# echo 1 > data/myid 
[root@kafka002 zookeeper-3.4.14]# mkdir data
[root@kafka002 zookeeper-3.4.14]# touch data/myid
[root@kafka002 zookeeper-3.4.14]# echo 2 > data/myid
[root@kafka003 zookeeper-3.4.14]# mkdir data
[root@kafka003 zookeeper-3.4.14]# touch data/myid
[root@kafka003 zookeeper-3.4.14]# echo 3 > data/myid
启动zookeeper
[root@kafka001 bin]# ./zkServer.sh start
[root@kafka002 bin]# ./zkServer.sh start
[root@kafka003 bin]# ./zkServer.sh start
[root@kafka002 zookeeper-3.4.14]# ./bin/zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /root/app/zookeeper-3.4.14/bin/../conf/zoo.cfg
Mode: leader
安装Kafka
[root@kafka001 software]# tar -zxvf kafka_2.11-0.10.0.0.tgz -C /root/app/
[root@kafka002 software]# tar -zxvf kafka_2.11-0.10.0.0.tgz -C /root/app/
[root@kafka003 software]# tar -zxvf kafka_2.11-0.10.0.0.tgz -C /root/app/
cd /root/app/kafka_2.11-0.10.0.0/config
[root@kafka001 config]# vi server.properties
# The id of the broker. This must be set to a unique integer for each broker.
broker.id=0
host.name=kafka001
port=9092
############################# Zookeeper #############################
# Zookeeper connection string (see zookeeper docs for details).
# This is a comma separated host:port pairs, each corresponding to a zk
# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
# You can also append an optional chroot string to the urls to specify the
# root directory for all kafka znodes.
zookeeper.connect=kafka001:2181,kafka002:2181,kafka003:2181/kafka

[root@kafka002 config]# vi server.properties
# The id of the broker. This must be set to a unique integer for each broker.
broker.id=1
host.name=kafka002
port=9092
############################# Zookeeper #############################
# Zookeeper connection string (see zookeeper docs for details).
# This is a comma separated host:port pairs, each corresponding to a zk
# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
# You can also append an optional chroot string to the urls to specify the
# root directory for all kafka znodes.
zookeeper.connect=kafka001:2181,kafka002:2181,kafka003:2181/kafka

[root@kafka003 config]# vi server.properties
# The id of the broker. This must be set to a unique integer for each broker.
broker.id=2
host.name=kafka003
port=9092
############################# Zookeeper #############################
# Zookeeper connection string (see zookeeper docs for details).
# This is a comma separated host:port pairs, each corresponding to a zk
# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
# You can also append an optional chroot string to the urls to specify the
# root directory for all kafka znodes.
zookeeper.connect=kafka001:2181,kafka002:2181,kafka003:2181/kafka
启动kafka
[root@kafka001 kafka_2.11-0.10.0.0]# ./bin/kafka-server-start.sh ./config/server.properties 
[root@kafka002 kafka_2.11-0.10.0.0]# ./bin/kafka-server-start.sh ./config/server.properties 
[root@kafka003 kafka_2.11-0.10.0.0]# ./bin/kafka-server-start.sh ./config/server.properties

 

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值