1、系统配置
apache-zookeeper-3.7.1-bin.tar.gz
jdk-11.0.19_linux-x64_bin.tar.gz
kafka_2.13-3.3.2.tgz
scala-2.12.17.tgz
#配置免密sudo(略)
#repo源配置(略)
#配置数据中心服务发现agent(略)
#时间设置(略)
#创建/data目录,挂载数据盘,用于存储kafka的数据
sudo mdir /data
sudo mount /dev/sdb /data
sudo sh -c 'echo "/dev/sdb /data xfs defaults 0 0" >> /etc/fstab'
# 编辑文件描述符 1048576(nr_open默认值)
sudo vi /etc/security/limits.conf
* soft nofile 1048576
* soft nproc 1048576
* hard nofile 1048576
* hard nproc 1048576
sudo vi /etc/sysctl.conf
fs.file-max=99999999
fs.nr_open=1048576
2、下载解压所有软件包至/opt,并且创建软连接
tar -zxvf apache-zookeeper-3.7.1-bin.tar.gz
tar -zxvf kafka_2.13-3.3.2.tgz
tar -zxvf scala-2.12.17.tgz
sudo mv apache-zookeeper-3.7.1-bin /opt/
sudo mv kafka_2.13-3.3.2 /opt/
sudo mv scala-2.12.17 /opt/
sudo ln -s apache-zookeeper-3.7.1-bin/ zookeeper
sudo ln -s jdk-11.0.19/ jdk
sudo ln -s kafka_2.13-3.3.2/ kafka
sudo ln -s scala-2.12.17/ scala
# 测试是否安装完成
sjzx1@node1:~$ scala -version
Scala code runner version 2.12.17 -- Copyright 2002-2022
sudo apt-get install openjdk-11-jdk
3、添加环境变量:/etc/profile 文件尾部添加如下内容
sudo vi /etc/profile
# export JAVA_HOME=/opt/jdk
export SCALA_HOME=/opt/scala
export ZOOKEEPER_HOME=/opt/zookeeper
export KAFKA_HOME=/opt/kafka
export PATH=$SCALA_HOME/bin:$ZOOKEEPER_HOME/bin:$KAFKA_HOME/bin:$PATH
sudo vi /root/.bashrc
export SCALA_HOME=/opt/scala
export ZOOKEEPER_HOME=/opt/zookeeper
export KAFKA_HOME=/opt/kafka
export PATH=$SCALA_HOME:/bin:$ZOOKEEPER_HOME/bin:$KAFKA_HOME/bin:$PATH
重启验证所有配置是否正常
软件配置
zookeeper配置
编辑 conf/zoo.cfg
tickTime=2000
initLimit=10
syncLimit=5
maxClientCnxns=0
admin.enableServer=false
dataDir=/opt/zookeeper/data
clientPort=2181
server.1=node1.kafka02.ctdc.com:2888:3888
server.2=node2.kafka02.ctdc.com:2888:3888
server.3=node3.kafka02.ctdc.com:2888:3888
# sasl认证
authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider
requireClientAuthScheme=sasl
根据配置文件创建 数据目录,sasl认证文件以及启动参数
mkdir /opt/zookeeper/data
#创建myid文件
touch /opt/zookeeper/data/myid
#各节点写入不同的ID,以区分不同的zookeeper节点
echo 1 > /opt/zookeeper/data/myid
echo 2 > /opt/zookeeper/data/myid
echo 3 > /opt/zookeeper/data/myid
编辑zookeeper启动服务
编辑配置文件:
sudo vi /lib/systemd/system/zookeeper.service
[Unit]
Description=Apache ZooKeeper service
Requires=network.target
After=network.target
[Service]
Type=forking
WorkingDirectory=/opt/zookeeper
ExecStart=/opt/zookeeper/bin/zkServer.sh start /opt/zookeeper/conf/zoo.cfg
ExecStop=/opt/zookeeper/bin/zkServer.sh stop /opt/zookeeper/conf/zoo.cfg
ExecReload=/opt/zookeeper/bin/zkServer.sh restart /opt/zookeeper/conf/zoo.cfg
TimeoutSec=30
Restart=on-failure
[Install]
WantedBy=multi-user.target
重新加载配置文件:
sudo systemctl daemon-reload
sudo systemctl start zookeeper
/opt/zookeeper/bin/zkServer.sh start
/opt/zookeeper/bin/zkServer.sh stop
/opt/zookeeper/bin/zkServer.sh status
/opt/zookeeper/bin/zkCli.sh -server node1.kafka02.ctdc.com:2181
kafka配置
# 检查java是否安装完毕
#检查 /data目录是否挂载
# 创建kafka-logs
mkdir /data/kafka
mkdir /data/kafka/kafka-logs
sudo vi /opt/kafka/config/server.properties
############################# Server Basics #############################
# The id of the broker. This must be set to a unique integer for each broker.
broker.id=1
############################# Socket Server Settings ##########################
#
port=9092
listeners=SASL_PLAINTEXT://node1.kafka02.ctdc.com:9092
advertised.listeners=SASL_PLAINTEXT://node1.kafka02.ctdc.com:9092
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
authorizer.class.name=kafka.security.authorizer.AclAuthorizer
sasl.enabled.mechanisms=PLAIN
security.inter.broker.protocol=SASL_PLAINTEXT
sasl.mechanism.inter.broker.protocol=PLAIN
allow.everyone.if.no.acl.found=true
super.users=User:borkerAdmin
############################# Log Basics #############################
log.dirs=/data/kafka/kafka-logs
num.partitions=3
num.recovery.threads.per.data.dir=1
############################# Internal Topic Settings ########################
###
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
############################# Log Flush Policy #############################
############################# Log Retention Policy ############################
log.retention.hours=6
#log.retention.bytes=42949672960
#log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
############################# Zookeeper #############################
zookeeper.connect=node1.kafka02.ctdc.com:2181,node2.kafka02.ctdc.com:2181,node
kafka02.ctdc.com:2181
zookeeper.connection.timeout.ms=18000
############################# Group Coordinator Settings ######################
#####
group.initial.rebalance.delay.ms=0
# 消息大小最大20M
message.max.bytes=20971520
max.request.size=20971520
设置账号密码
sudo vi /opt/kafka/config/kafka_server_jaas.conf
KafkaServer {
org.apache.kafka.common.security.plain.PlainLoginModule required
username="borkerAdmin"
password="borkerAdminPassword"
user_borkerAdmin="borkerAdminPassword"
user_borkerClient="borkerClientPassword"
user_tpcnkstmfsjy="tHsOeGhlCwXe"
user_ciewswbzxdst="n82Kf3TWEDTR"
user_cctornzcbyds="S9ckpbZYpFCH"
user_bdxxbylselie="F8PyNF15K9Qn"
user_kxuxgiftcvoe="Ssovamec6dUx"
user_myavkkoprslr="1L0JgPeK8EJd"
user_gqolffyfitzd="PWQrtBYPKjud"
user_hyigxwczqrgr="gQQXgnKtpEoP"
user_wydjtyzousmz="N45EYp1AA9qe"
user_nweivdldxlaa="LDBQyLXLzzcl"
user_qvvornqyugyi="veb1Vzs8yDTo"
user_xjkimjwvphqn="C8YhmsYEK4bi"
user_jkvbihxhvbhw="4b1260UsxO8b"
user_elrcisifkdvt="UGYi1Uh0u2re"
user_cjgavfpokdzo="NgA4wbWwFq77"
user_xtkvqmvniobu="ANaYtQDn1Tl8"
user_kjasubkudjoo="w6qYHgJcrmG7"
user_zzfryxiyqnsa="RPmAEGI2qeZW"
user_lrtxnbihnjoj="kGtxbmR8vQZZ"
user_hjvwjjcnsktx="BYe5VzkY4GaH"
user_readonly="pueqXCvWh4fH";
};
sudo vi /opt/kafka/bin/kafka-server-start.sh
添加 -Djava.security.auth.login.config=/opt/kafka/config/kafka_server_jaas.conf
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
export KAFKA_HEAP_OPTS="-Xmx2G -Xms1G -Djava.security.auth.login.config=/op
t/kafka/config/kafka_server_jaas.conf"
fi
客户端认证配置
配置客户端认证文件
sudo vi /opt/kafka/config/kafka_client_jaas.conf
KafkaClient {
org.apache.kafka.common.security.plain.PlainLoginModule required
username="borkerClient"
password="borkerClientPassword";
};
• 配置中的username和password为kafka_server_jaas.conf中配置的user_开头的用户及对
应的密码
• KafkaClient这个名称不能变,保持大小严格
• 同样两个“;”不能少且位置不能变
配置启动脚本
sudo vi /opt/kafka/bin/kafka-console-consumer.sh
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
export KAFKA_HEAP_OPTS="-Xmx512M -Djava.security.auth.login.config=/opt/kaf
ka/config/kafka_client_jaas.conf"
fi
sudo vi /opt/kafka/bin/kafka-console-producer.sh
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
export KAFKA_HEAP_OPTS="-Xmx512M -Djava.security.auth.login.config=/opt/kaf
ka/config/kafka_client_jaas.conf"
fi
修改配置文件
sudo vi /opt/kafka/config/consumer.properties
bootstrap.servers=node1.kafka02.ctdc.com:9092,node2.kafka02.ctdc.com:9092,node
3.kafka02.ctdc.com:9092
security.protocol=SASL_PLAINTEXT
sasl.mechanism=PLAIN
sudo vi /opt/kafka/config/producer.properties
bootstrap.servers=node1.kafka02.ctdc.com:9092,node2.kafka02.ctdc.com:9092,node
3.kafka02.ctdc.com:9092
security.protocol=SASL_PLAINTEXT
sasl.mechanism=PLAIN
创建命令认证配置文件
sudo vi /opt/kafka/config/command_config.properties
sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule requir
ed username="borkerClient" password="borkerClientPassword";
security.protocol=SASL_PLAINTEXT
sasl.mechanism=PLAIN
kafka服务创建
创建kafka服务文件
sudo vi /lib/systemd/system/kafka.service
注意事项:kafka需要在zookeeper后面启动
[Unit]
Description=kafka
After=network.target zookeeper.service
[Service]
Type=forking
ExecStart=/opt/kafka/bin/kafka-server-start.sh -daemon /opt/kafka/config/serve
r.properties
ExecReload=/bin/kill -s HUP $MAINPID
ExecStop=/opt/kafka/bin/kafka-server-stop.sh
PrivateTmp=true
Restart=on-abnormal
[Install]
WantedBy=multi-user.target
守护启动
sudo systemctl start kafka
sudo systemctl stop kafka
#查看topic
sudo /opt/kafka/bin/kafka-topics.sh --bootstrap-server node1.kafka02.ctdc.com:9
092 --list --command-config /opt/kafka/config/command_config.properties
#创建topic
sudo /opt/kafka/bin/kafka-topics.sh --bootstrap-server node1.kafka02.ctdc.com:9
092 --create --topic zhangtaotest --partitions 3 --replication-factor 3 --comma
nd-config /opt/kafka/config/command_config.properties