1 软件版本要求和宿主环境

1.1 软件版本

jdk1.7
zookeeper 2.4.6 (kafka内置) & kafka_2.11-0.10.0.0
logkafka-master
kafka-manager-1.2.9.10
KafkaOffsetMonitor

软件地址
kafkahttp://kafka.apache.org/downloads
logkafka-masterhttps://github.com/Qihoo360/logkafka
kafka-manager-1.2.9.10https://github.com/yahoo/kafka-manager
KafkaOffsetMonitor(很久不跟新)https://github.com/quantifind/KafkaOffsetMonitor

1.2 宿主环境(centos7)

主机组件备注
kafka1v.xingkong.comZookeeper, Kafka.Broker
kafka2v.xingkong.comZookeeper, Kafka.Broker,KafkaOffsetMonitor
kafka3v.xingkong.comZookeeper, Kafka.Broker,KafkaManager
log.xingkong.comLogKafka日志中心机


1.3 安装Java环境

在所有机器上安装java环境(创建infra账号,所有机器uid,gid保持一样)

groupadd infra
useradd -g infra infra

sudo mv jdk1.7 /usr/local/
cd /usr/local/
sudo ln -s jdk1.7 jdk
sudo update-alternatives --install /usr/bin/java java $JAVA_HOME/bin/java 300
sudo update-alternatives --install /usr/bin/javac javac $JAVA_HOME/bin/javac 300
sudo update-alternatives --config java
sudo update-alternatives --config javac


2 安装

2.1 安装Zookeeper
所有kafka1-3主机安装及目录准备

sudo mkdir -p /data/kafka/data
sudo mkdir -p /data/kafka/log
sudo mv  kafka_2.11-0.10.0.0 /usr/local/kafka_2.11-0.10.0.0
sudo ln -s /usr/local/kafka_2.11-0.10.0.0 /usr/local/kafka
sudo chown -R infra:infra /usr/local/kafka_2.11-0.10.0.0
sudo chown -R infra:infra /usr/local/kafka
sudo chown -R infra:infra /data/kafka
sudo mkdir -p /data/zookeeper/data
sudo mkdir -p /data/zookeeper/log
sudo mkdir -p /data/zookeeper/datalog
sudo echo 1 > /data/zookeeper/data/myid (kafka1v)
sudo echo 2 > /data/zookeeper/data/myid (kafka2v)
sudo echo 3 > /data/zookeeper/data/myid (kafka3v)
sudo chown -R infra:infra /data/zookeeper

2.1.2 修改配置文件

vim /usr/local/kafka/config/zookeeper.properties
修改以下内容
dataDir=/data/zookeeper/data
clientPort=2181
maxClientCnxns=0

tickTime=2000
initLimit=5
syncLimit=2
server.1=172.16.10.1:2888:3888
server.2=172.16.10.2:2888:3888
server.3=172.16.10.3:2888:3888

logDir=/data/zookeeper/log
dataLogDir=/data/zookeeper/datalog

autopurge.purgeInterval=1
autopurge.snapRetainCount=3

2.1.3 修改启动脚本

vim /usr/local/kafka/bin/zookeeper-server-start.sh
在最后一行之前加上这么一行
export LOG_DIR=`grep ^logDir $base_dir/../config/zookeeper.properties | cut -d '=' -f 2`

2.2 安装Kafka
2.2.2 修改配置文件

vim /usr/local/kafka/config/server.properties
修以下内容
broker.id=1 (每台机器的id不能重复)
listeners=PLAINTEXT://kafka1v.infra.bjtb.pdtv.it:9092
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/data/kafka/data
num.partitions=3
num.recovery.threads.per.data.dir=1
log.retention.hours=3
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=172.16.10.1:2181,172.16.10.2:2181,172.16.10.3:2181
zookeeper.connection.timeout.ms=6000
default.replication.factor=2
LOG_DIR=/data/kafka/log
JMX_PORT=7991
KAFKA_HEAP_OPTS=-server -Xms1024M -Xmx4096M
delete.topic.enable=true
log.cleaner.enable=true
log.cleanup.policy=delete
auto.create.topics.enable=false

2.2.3 修改启动脚本

vim /usr/local/kafka/bin/kafka-server-start.sh
修改如下信息
exec $base_dir/kafka-run-class.sh $EXTRA_ARGS kafka.Kafka "$@"

2.2.4 修改kafka日志目录

cd /usr/local/kafka
sudo rm -rf logs
sudo ln -sv /data/kafka/log  logs

2.3 安装LogKafka

log.xingkong.com(主机)
2.3.1 安装及目录准备

sudo mkdir -p /data/logkafka/log
sudo mkdir -p /data/logkafka/data
sudo mv logkafka-master /usr/local/
cd /usr/local/
sudo ln -s logkafka-master logkafka
sudo chown -R infra:infra /data/logkafka
sudo chown -R infra:infra logkafka-master

2.3.2 修改配置文件

vim /usr/local/logkafka/_install/conf/logkafka.conf
修改以下内容
zookeeper.connect = "kafka1v.xingkong.com:2181,kafka2v.xingkong.com:2181,kafka3v.xingkong.com:2181"
pos.path = /data/logkafka/data/pos.log
logkafka.id=log.xingkong.com
line.max.bytes = 1048576
read.max.bytes = 1048576
key.max.bytes = 1024
zookeeper.upload.interval = 10000
refresh.interval = 30000
stat.silent.max.ms = 10000
path.queue.max.size = 100
message.send.max.retries = 10000
queue.buffering.max.messages = 400000

2.4 安装KafkaManager

在kafka3v.xingkong.com安装KafkaManager

2.4.1 安装及目录准备

sudo mkdir -p /data/KafkaManager/log
sudo mv kafka-manager-1.2.9.10 /usr/local/
sudo ln -s kafka-manager-1.2.9.10 KafkaManager
sudo chown -R infra:infra /usr/local/kafka-manager-1.2.9.10
sudo chown -R infra:infra /data/KafkaManager

2.4.2 修改配置文件

vim /usr/local/KafkaManager/conf/application.conf
修改以下内容
play.crypto.secret="^<csmm5Fx4d=r2HEX8pelM3iBkFVv?k[mc;IZE<_Qoq8EkX_/7@Zt6dP05Pzea3U"
play.crypto.secret=${?APPLICATION_SECRET}
play.i18n.langs=["en"]
play.http.requestHandler = "play.http.DefaultHttpRequestHandler"
play.http.context = "/"
play.application.loader=loader.KafkaManagerLoader
kafka-manager.zkhosts="172.16.10.1:2181,172.16.10.2:2181,172.16.10.3:2181"
kafka-manager.zkhosts=${?ZK_HOSTS}
pinned-dispatcher.type="PinnedDispatcher"
pinned-dispatcher.executor="thread-pool-executor"
application.features=["KMClusterManagerFeature","KMTopicManagerFeature","KMPreferredReplicaElectionFeature","KMReassignPartitionsFeature"]
akka {
  loggers = ["akka.event.slf4j.Slf4jLogger"]
  loglevel = "INFO"
}
basicAuthentication.enabled=false
basicAuthentication.username="admin"
basicAuthentication.password="password"
basicAuthentication.realm="Kafka-Manager"
basicAuthentication.excluded=["/api/health"] # ping the health of your instance without authentification
kafka-manager.consumer.properties.file=${?CONSUMER_PROPERTIES_FILE}
vim /usr/local/KafkaManager/conf/logger.xml
修改以下内容
<configuration>
  <conversionRule conversionWord="coloredLevel" converterClass="play.api.Logger$ColoredLevel" />
  <appender name="FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
    <!--<file>${application.home}/logs/application.log</file>-->
    <file>/data/KafkaManager/log/application.log</file>
    <encoder>
       <pattern>%date - [%level] - from %logger in %thread %n%message%n%xException%n</pattern>
    </encoder>
    <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
      <fileNamePattern>${application.home}/logs/application.%d{yyyy-MM-dd}.log</fileNamePattern>
      <maxHistory>5</maxHistory>
      <totalSizeCap>5GB</totalSizeCap>
    </rollingPolicy>
  </appender>
  <logger name="play" level="INFO" />
  <logger name="application" level="DEBUG" />
  <!-- Off these ones as they are annoying, and anyway we manage configuration ourself -->
  <logger name="com.avaje.ebean.config.PropertyMapLoader" level="OFF" />
  <logger name="com.avaje.ebeaninternal.server.core.XmlConfigLoader" level="OFF" />
  <logger name="com.avaje.ebeaninternal.server.lib.BackgroundThread" level="OFF" />
  <logger name="com.gargoylesoftware.htmlunit.javascript" level="OFF" />
  <logger name="org.apache.zookeeper" level="INFO"/>
  <logger name="akka" level="INFO" />
  <logger name="kafka" level="INFO" />
  <root level="INFO">
    <appender-ref ref="FILE" />
  </root>
</configuration>

2.5 安装KafkaOffsetMonitor

2.5.1 安装及目录准备

sudo mkdir -p /data/KafkaOffsetMonitor/log/
sudo mv KafkaOffsetMonitor /usr/local/
sudo chown -R infra:infra /usr/local/KafkaOffsetMonitor
sudo chown -R infra:infra /data/KafkaOffsetMonitor

2.5.2 修改配置文件

vim /usr/local/KafkaOffsetMonitor/conf/monitor.conf
修改以下内容
offsetStorage=zookeeper
zookeeper=172.16.10.1:2181,172.16.10.2:2181,172.16.10.3:2181
port=9099
refresh=10.seconds
retain=1.days
JAVA_OPTS=-server -Xms256M -Xmx512M 
noHupLog=/data/KafkaOffsetMonitor/log/nohup.out

3 启动服务

所有服务均以infra用户启动

3.1 启动Zookeeper

cd /usr/local/kafka/
bin/zookeeper-server-start.sh -daemon config/zookeeper.properties

3.2 启动Kafka

cd /usr/local/kafka/
bin/kafka-server-start.sh -daemon config/server.properties

3.4 启动LogKafka

cd /usr/local/logkafka/_install/bin
nohup ./logkafka -e ../conf/easylogging.conf -f ../conf/logkafka.conf >> /data/logkafka/log/nohup.out 2>&1 &

3.4 启动KafkaManager

cd /usr/local/KafkaManager/bin
nohup ./kafka-manager -Dconfig.file=../conf/application.conf -Dhttp.port=9090 >> /data/KafkaManager/log/nohup.out 2>&1 &
查看日志
tail -f /data/KafkaManager/log/nohup.out

访问
http://kafka3v.xingkong.com:9090

4 关闭服务
4.1 关闭KafkaManager,KafkaOffsetMonitor,logkafka

通过jps或者ps -ef | grep logkafka找到进程号然后kill进程

4.2 关闭Kafka和zookeeper

cd /usr/local/kafka/
bin/kafka-server-stop.sh config/server.properties
bin/zookeeper-server-stop.sh config/zookeeper.properties

5 清空所有数据

sudo rm -rf /data/kafka/data/*
sudo rm -rf /data/kafka/data/.lock
sudo rm -rf /data/kafka/data/.kafka_cleanshutdown
sudo rm -rf /data/kafka/log/*
sudo rm -rf /data/zookeeper/data/version-2/*
sudo rm -rf /data/zookeeper/datalog/version-2/*
sudo rm -rf /data/zookeeper/log/*
sudo rm -rf /data/kafka/logs/*
sudo rm -rf /usr/local/KafkaManager/logs/*
sudo rm -rf /usr/local/KafkaOffsetMonitor/bin/offsetapp.db
sudo rm -rf /usr/local/KafkaOffsetMonitor/logs/*
sudo rm -rf /data/KafkaManager/log/*
sudo rm -rf /data/KafkaOffsetMonitor/log/*
sudo rm -rf /data/logkafka/data/*
sudo rm -rf /data/logkafka/log/*


最后总结

zookeeper配置 :
业务名称      host                 version  id    port  数据交换端口    选举端口   JVM—Xmx     logDir                dataDirdata               LogDir
xingkong    kafka1v.xingkong.com    3.4.6    1    2181    2888           3888      512MB    /data/zookeeper/log    /data/zookeeper/data    /data/zookeeper/datalog    
            kafka2v.xingkong.com    3.4.6    2    2181    2888           3888      512MB    /data/zookeeper/log    /data/zookeeper/data    /data/zookeeper/datalog    
            kafka3v.xingkong.com    3.4.6    3    2181    2888           3888      512MB    /data/zookeeper/log    /data/zookeeper/data    /data/zookeeper/datalog    

kafka配置 :
业务名称    host                    version         id    port MX_PORTJVM Xms ~ Xmx    log.dirs              LOG_DIR      分区数  副本数 
xingkong    kafka1v.xingkong.com    2.11-0.10.0.0    1    9092    7991    1GB ~ 4GB    /data/kafka/data    /data/kafka/log    3    2    
            kafka2v.xingkong.com    2.11-0.10.0.0    2    9092    7991    1GB ~ 4GB    /data/kafka/data    /data/kafka/log    3    2    
            kafka3v.xingkong.com    2.11-0.10.0.0    3    9092    7991    1GB ~ 4GB    /data/kafka/data    /data/kafka/log    3    2