环境:jdk1.8
目录
1、安装zookeeper
mkdir -p /usr/local/data/zkdata
mkdir -p /usr/local/data/zkdatalog
tar -xvf zookeeper-3.4.10.tar -C /usr/local/
rm -rf /usr/local/zookeeper-3.4.10/conf/zoo.cfg
cat>>/usr/local/zookeeper-3.4.10/conf/zoo.cfg<<EOF
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/usr/local/data/zkdata
dataLogDir=/usr/local/data/zkdatalog
clientPort=2181
server.1=192.168.0.1:2888:3888
EOF
echo "1" > /usr/local/data/zkdata/myid
chown -R jht:jht /usr/local/zookeeper-3.4.10
chown -R jht:jht /usr/local/data/zkdata
chown -R jht:jht /usr/local/data/zkdatalog
#启动方式
/usr/local/zookeeper-3.4.10/bin/zkServer.sh start
2、安装kafka
mkdir -p /usr/local/data/kafka
tar -zxvf kafka_2.11-2.0.0.tgz -C /usr/local/
mv /usr/local/kafka_2.11-2.0.0/config/server.properties /usr/local/kafka_2.11-2.0.0/config/server.properties.default
touch /usr/local/kafka_2.11-2.0.0/config/server.properties
cat>>/usr/local/kafka_2.11-2.0.0/config/server.properties<<EOF
broker.id=1
listeners=PLAINTEXT://192.168.0.1:9092
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/usr/local/data/kafka
num.partitions=3
num.recovery.threads.per.data.dir=1
log.retention.hours=72
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
log.cleaner.enable=false
zookeeper.connect=192.168.0.1:2181
zookeeper.connection.timeout.ms=6000
queued.max.requests =500
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
group.initial.rebalance.delay.ms=0
EOF
#启动
/usr/local/kafka_2.11-2.0.0/bin/kafka-server-start.sh -daemon config/server.properties
3、集群搭建
#将目录scp复制到其他两台机器上
vim zoo.cfg
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/usr/local/data/zkdata
dataLogDir=/usr/local/data/zkdatalog
clientPort=2181
server.1=192.168.0.1:2888:3888
server.2=192.168.0.2:2888:3888
server.3=192.168.0.3:2888:3888
echo 1 > /usr/local/data/zkdata/myid ##其他两台机器依次写入2和3
#依次启动三个zookeeper
#kafka集群
#修改192.168.0.1中/usr/local/kafka_2.11-2.0.0/config/server.properties
broker.id=1
#listeners=PLAINTEXT://192.168.0.1:9092
host.name=192.168.0.1
port=9092
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/usr/local/data/kafaka
num.partitions=3
num.recovery.threads.per.data.dir=1
log.retention.hours=72
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
log.cleaner.enable=false
#zookeeper.connect=192.168.0.1:2181
zookeeper.connect=192.168.0.1:2181,192.168.0.2:2181,192.168.0.3:2181
zookeeper.connection.timeout.ms=6000
queued.max.requests =500
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
group.initial.rebalance.delay.ms=0
#依次将目录复制到其他两台机器
#只需修改以下两项配置即可。
#标识ID,非负整数型
broker.id=1 ###修改其他两台主机的id为2、3
#主机名地址
host.name=192.168.0.1 ###修改为其他主机的IP地址
#依次启动
#kafka状态查看
bin/kafka-topics.sh --list --zookeeper 192.168.0.1:2181,192.168.0.2:2181,192.168.0.3:2181
kafka常用命令
#查看kafka主题
bin/kafka-topics.sh --list --zookeeper 192.168.0.1:2181
#kafka查看租
./bin/kafka-consumer-groups.sh --bootstrap-server 192.168.0.1:9092 --list
#kafka查看是否积压(test-group为刚才查到的组)
./bin/kafka-consumer-groups.sh --bootstrap-server 192.168.0.1:9092 --group test-group --describe
#kafka查看主题
bin/kafka-topics.sh --list --zookeeper 192.168.0.1:2181
#kafka查看主题数据
bin/kafka-console-consumer.sh --bootstrap-server 192.168.0.1:9092 --topic test-topic --from-beginning
kafka占用磁盘高处理
#kafka数据磁盘占用高
#查看是哪个主题占用高,如果是__consumer_offsets,则使用以下命令:
bin/kafka-configs.sh --zookeeper 172.16.0.10:2181 --alter --entity-name __consumer_offsets --entity-type topics --add-config retention.ms=86400000
bin/kafka-configs.sh --zookeeper 172.16.0.10:2181 --alter --entity-name __consumer_offsets --entity-type topics --add-config cleanup.policy=delete
bin/kafka-configs.sh --zookeeper 172.16.0.10:2181 --alter --entity-name __consumer_offsets --entity-type topics --add-config 'max.message.bytes=50000000'
bin/kafka-configs.sh --zookeeper 172.16.0.10:2181 --alter --entity-name __consumer_offsets --entity-type topics --add-config 'flush.messages=50000'