1、下载解压Kafka
cd /home/hadoop/app
tar -zxvf kafka-2.13-3.2.0.tgz
rm kafka-2.13-3.2.0.tgz
ln -s kafka-2.13-3.2.0 kafka
2、修改Kafka配置文件
修改zookeeper.properties
dataDir=/home/hadoop/data/zookeeper/zkdata
clientPort=2181
修改consumer.properties
zookeeper.connect=hadoop01:2181,hadoop02:2181,hadoop03:2181
修改producer.properties
metadata.broker.list=hadoop01:9092,hadoop02:9092,hadoop03:9092
修改server.properties
zookeeper.connect=hadoop01:2181,hadoop02:2181,hadoop03:2181
3、kafka安装目录分发到hadoop02、hadoop03
cd /home/hadoop/app
scp -r kafka-2.13-3.2.0 hadoop@hadoop02:/home/hadoop/app
scp -r kafka-2.13-3.2.0 hadoop@hadoop03:/home/hadoop/app
ln -s kafka-2.13-3.2.0 kafka #在hadoop02、hadoop03上生成软连接
4、修改Server编号
cd /home/hadoop/app/kafka/config
vi server.properties
broker.id=1 #分别在hadoop01上执行
broker.id=2 #分别在hadoop02上执行
broker.id=3 #分别在hadoop03上执行
5、启动Kafka集群
cd /home/hadoop/app/kafka
bin/kafka-server-start.sh -daemon config/server.properties #hadoop01、hadoop02、hadoop03上都要启动
6、验证集群
jps #在hadoop01、hadoop02、hadoop03上都执行,如果都能看到kafka进程,则启动成功
7、命令验证
cd /home/hadoop/app/kafka/bin
#创建topic
kafka-topics.sh --bootstrap-server hadoop01:9092 --create --topic mydemo2 --replication-factor 3 --partitions 3
#启动kafka消费者
kafka-console-consumer.sh --bootstrap-server hadoop01:9092 --topic mydemo2
#启动kafka生产者
kafka-console-producer.sh --broker-list hadoop01:9092 --topic mydemo2