● zk集群事先已准备好
● 上传kafaka压缩包至Linux解压
[root@hadoop-01 ~]# tar -zxvf kafka_2.11-0.8.2.2.tgz -C /root/
[root@hadoop-01 ~]# cp kafka_2.11-0.8.2.2 ./apps/
● 修改kafka配置
[root@hadoop-01 ~]# cd ./apps/kafka_2.11-0.8.2.2/config/
[root@hadoop-01 config]# vi server.properties
broker.id=0 #每台机器必须唯一
host.name=hadoop-01 #可不写
log.dirs=/data/kafka #注意:这不是日志路径,这是kafka数据保存路径
zookeeper.connect=hadoop-01:2181,hadoop-02:2181,hadoop-03:2181 # zk集群
● 拷贝至每个机器
[root@hadoop-01 apps]# scp -r kafka_2.11-0.8.2.2/ hadoop-02:$PWD
[root@hadoop-01 apps]# scp -r kafka_2.11-0.8.2.2/ hadoop-03:$PWD
修改这2个属性即可,比如
hadoop-02改为broker.id=1 ,host.name=hadoop-02
hadoop-03改为broker.id=2 ,host.name=hadoop-03
● 启动
#这是以后台的形式启动,如果不需要后台启动,去掉-daemon即可
[root@hadoop-01 apps]# ./kafka_2.11-0.8.2.2/bin/kafka-server-start.sh -daemon ./kafka_2.11-0.8.2.2/config/server.properties
#查看进程
[root@hadoop-01 apps]# jps
24567 Master
15226 CoarseGrainedExecutorBackend
8812 Worker
16351 Kafka
16463 Jps
7791 QuorumPeerMain