kafka zookeeper 安装
192.168.x.127 zk1 kafka1
192.168.x.130 zk2 kafka2
192.168.x.135 zk3 kafka3
设置 /etc/hosts
192.168.x.127 zk1 kafka1
192.168.x.130 zk2 kafka2
192.168.x.135 zk3 kafka3
设置 主机名
hostnamectl set-hostname zk1 zk2 zk3
##ntp 时间认证服务
for i in zk1 zk2 zk3;do ssh $i yum install ntp -y ;done
for i in zk1 zk2 zk3;do ssh $i "systemctl enable ntpd && systemctl start ntpd";done
###安装java###安装java 版本1.8
https://www.oracle.com/technetwork/java/javase/overview/index.html
jdk1.8.0_111.zip
vim /etc/profile 追加一下变量
export JAVA_HOME=/usr/local/java
export PATH=$JAVA_HOME/bin:$PATH
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
source /etc/profile
###安装zookeeper ##http://zookeeper.apache.org/
wget http://mirror.bit.edu.cn/apache/zookeeper/stable/zookeeper-3.4.12.tar.gz
mv zookeeper-3.4.12.tar.gz zookeeper
scp -r zookeeper root@zk1:/usr/local/
##设置环境变量
echo -e "# append zk_env\nexport PATH=\$PATH:/usr/local/zookeeper/bin" >> /etc/profile
##设置 zoo.cfg
[root@zk1 zookeeper]# egrep -v '#|^$' /usr/local/zookeeper/conf/zoo.cfg
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/usr/local/zookeeper/data
clientPort=2181
dataLogDir=/usr/local/zookeeper/logs
autopurge.snapRetainCount=500
autopurge.purgeInterval=24
server.1=192.168.x.127:2888:3888
server.2=192.168.x.130:2888:3888
server.3=192.168.x.135:2888:3888
mkdir -p /usr/local/zookeeper/{data,logs}
##把zk1 上的同步到zk2 zk3 上
scp -r zookeeper root@zk2:/usr/local/
scp -r zookeeper root@zk3:/usr/local/
##设置标识
ssh zk1 echo "1" > /usr/local/zookeeper/data/myid
ssh zk2 echo "2" > /usr/local/zookeeper/data/myid
ssh zk3 echo "3" > /usr/local/zookeeper/data/myid
##启动集群
ssh zk1 "zkServer.sh start"
ssh zk2 "zkServer.sh start"
ssh zk3 "zkServer.sh start"
##安装kafka
http://kafka.apache.org/downloads
wget http://mirror.bit.edu.cn/apache/kafka/2.1.0/kafka_2.11-2.1.0.tgz
mv kafka_2.11-2.1.0 kafka
cp -r kafka /usr/local/
##设置环境变量,所有kafka 节点
echo -e "# append kafka_env\nexport PATH=\$PATH:/usr/local/kafka/bin" >> /etc/profile
##创建kafka 消息目录
mkdir /usr/local/kafka/kafkalogs
##修改配置文件
broker.id=1
host.name=192.168.x.127
port=9092
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/usr/local/kafka/kafkalogs
num.partitions=1
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=168
message.max.byte=5242880
default.replication.factor=2
replica.fetch.max.bytes=5242880
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=192.168.x.127:2181,192.168.x.130:2181,192.168.x.135:2181
zookeeper.connection.timeout.ms=6000
group.initial.rebalance.delay.ms=0
#同步文件
scp -r /usr/local/kafka root@zk2:/usr/local/
scp -r /usr/local/kafka root@zk3:/usr/local/
##分别修改
broker.id=2
host.name=192.168.x.130
broker.id=3
host.name=192.168.x.135
##三个点都启动
nohup kafka-server-start.sh /usr/local/kafka/config/server.properties > /usr/local/kafka/kafka.log 2>&1 &
##测试
root@zk1 ~]# kafka-topics.sh --zookeeper 192.168.x.127:2181,192.168.x.130:2181,192.168.x.135:2181 --create --topic t-behavior --replication-factor 3 --partitions 3
Created topic "t-behavior".
[root@zk1 ~]# kafka-topics.sh --zookeeper 192.168.x.127:2181,192.168.x.130:2181,192.168.x.135:2181 --list
t-behavior
[root@zk1 ~]# kafka-topics.sh --zookeeper 192.168.x.127:2181,192.168.x.130:2181,192.168.x.135:2181 --describe --topic t-behavior
Topic:t-behavior PartitionCount:3 ReplicationFactor:3 Configs:
Topic: t-behavior Partition: 0 Leader: 1 Replicas: 1,2,3 Isr: 1,2,3
Topic: t-behavior Partition: 1 Leader: 2 Replicas: 2,3,1 Isr: 2,3,1
Topic: t-behavior Partition: 2 Leader: 3 Replicas: 3,1,2 Isr: 3,1,2
[root@zk1 ~]#