下载kafka
https://archive.apache.org/dist/kafka/1.1.1/kafka_2.11-1.1.1.tgz
解压到安装目录
tar -zxvf kafka_2.11-1.1.1.tgz -C ../servers
配置文件
第一台:修改kafka配置文件server.properties
broker.id=0
listeners=PLAINTEXT://node01:9092
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/export/servers/kafka_2.11-1.1.1/logs
num.partitions=3
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.flush.interval.messages=10000
log.flush.interval.ms=1000
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=node01:2181,node02:2181,node03:2181
zookeeper.connection.timeout.ms=6000
group.initial.rebalance.delay.ms=0
delete.topic.enable=true
第二台:修改kafka配置文件server.properties
broker.id=1
listeners=PLAINTEXT://node02:9092
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/export/servers/kafka_2.11-1.1.1/logs
num.partitions=3
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.flush.interval.messages=10000
log.flush.interval.ms=1000
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=node01:2181,node02:2181,node03:2181
zookeeper.connection.timeout.ms=6000
group.initial.rebalance.delay.ms=0
delete.topic.enable=true
第三台:修改kafka配置文件server.properties
broker.id=2
listeners=PLAINTEXT://node03:9092
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/export/servers/kafka_2.11-1.1.1/logs
num.partitions=3
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.flush.interval.messages=10000
log.flush.interval.ms=1000
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=node01:2181,node02:2181,node03:2181
zookeeper.connection.timeout.ms=6000
group.initial.rebalance.delay.ms=0
delete.topic.enable=true
集群启动脚本
#!/bin/bash
brokers="node01 node02 node03"
KAFKA_HOME="/export/servers/kafka_2.11-1.1.1"
KAFKA_NAME="kafka_2.11-1.1.1"
echo "INFO : Begin to start kafka cluster ..."
for broker in $brokers
do
echo "INFO : Starting ${KAFKA_NAME} on ${broker} ..."
ssh ${broker} -C "source /etc/profile; sh ${KAFKA_HOME}/bin/kafka-server-start.sh -daemon ${KAFKA_HOME}/config/server.properties"
if [[ $? -eq 0 ]]; then
echo "INFO:[${broker}] Start successfully"
fi
done
echo "INFO:Kafka cluster starts successfully !"
集群关闭脚本
#!/bin/bash
brokers="node01 node02 node03"
KAFKA_HOME="/export/servers/kafka_2.11-1.1.1"
KAFKA_NAME="kafka_2.11-1.1.1"
echo "INFO : Begin to stop kafka cluster ..."
for broker in $brokers
do
echo "INFO : Shut down ${KAFKA_NAME} on ${broker} ..."
ssh ${broker} "source /etc/profile;bash ${KAFKA_HOME}/bin/kafka-server-stop.sh"
if [[ $? -ne 0 ]]; then
echo "INFO : Shut down ${KAFKA_NAME} on ${broker} is down"
fi
done
echo "INFO : kafka cluster shut down completed!"