1、
tar -zxvf kafka_2.11-0.11.0.0.tgz
2、
mkdir /root/apps/kafka_2.11-0.11.0.2/logs
3、
cd config/
vi server.properties
#broker的全局唯一编号,不能重复
broker.id=1
#是否允许删除topic
delete.topic.enable=true
#kafka运行日志存放的路径
log.dirs=/root/apps/kafka_2.11-0.11.0.2/logs
#配置连接Zookeeper集群地址
zookeeper.connect=hadoop1:2181,hadoop2:2181,hadoop3:2181
4、vi /etc/profile
export KAFKA_HOME=/root/apps/kafka_2.11-0.11.0.2
export PATH=$PATH:$KAFKA_HOME/bin
5、
scp -r /root/apps/kafka_2.11-0.11.0.2 root@hadoop2:/root/apps
scp -r /root/apps/kafka_2.11-0.11.0.2 root@hadoop3:/root/apps
scp /etc/profile root@hadoop2:/etc
scp /etc/profile root@hadoop3:/etc
6、hadoop2、hadoop3
vi server.properties
broker.id=2、broker.id=3
7、
启动集群(三台机器)
bin/kafka-server-start.sh config/server.properties &
关闭集群
bin/kafka-server-stop.sh stop