概述
kafka单节点安装及命令行操作,官网链接 kafka官网速递
实践
配置
zookeeper
kafka 3.x之后内置了zookeeper
# 创建目录
[root@hadoop02 kafka_2.12-3.7.1]# mkdir zookeeper
[root@hadoop02 kafka_2.12-3.7.1]# cd zookeeper/
[root@hadoop02 zookeeper]# pwd
/root/kafka_2.12-3.7.1/zookeeper
# server.properties
# broker的全局唯一编号,不能重复
broker.id=0
# 处理网络请求线程的数量
num.network.threads=3
# 处理磁盘IO线程的数量
num.io.threads=8
# 发送套接字的缓冲区域大小
socket.send.buffer.bytes=102400
# 接收套接字的缓冲区大小
socket.receive.buffer.bytes=102400
# 接收套接字的最大字节数
socket.request.max.bytes=104857600
# 日志目录(修改)
log.dirs=/tmp/kafka-logs
log.dirs=/root/kafka_2.12-3.7.1
# 每个 topic 默认分区数为 1
num.partitions=1
# 文件(数据)保留默认七天
log.retention.hours=168
安装
tar -xzf kafka_2.12-3.7.1.tgz
cd kafka_2.13-3.7.1
# kafka3.x之后自带zookeeper
vi zookeeper.properties
dataDir=/root/kafka_2.12-3.7.1/zookeeper
bin/zookeeper-server-start.sh -daemon config/zookeeper.properties
bin/kafka-server-start.sh -daemon config/server.properties
bin/kafka-server-start.sh config/kraft/server.properties[root@hadoop02 kafka_2.12-3.7.1]# jps
12260 QuorumPeerMain
14862 Kafka
16382 Jps
[root@hadoop02 kafka_2.12-3.7.1]#
命令行使用
kafka-topics.sh
kafka-topic 的使用:创建、删除、查看、修改
# REQUIRED kafka的Broker的地址: hostname:port
--bootstrap-server <String: server to REQUIRED: The Kafka server to connect
connect to> to.
--create Create a new topic.
--delete Delete a topic
--describe List details for the given topics.
--list List all available topics.
--alter Alter the number of partitions and
replica assignment. Update the
configuration of an existing topic
via --alter is no longer supported
here (the kafka-configs CLI supports
altering topic configs with a --
bootstrap-server option).
--partitions <Integer: # of partitions> The number of partitions for the topic
being created or altered (WARNING:
If partitions are increased for a
topic that has a key, the partition
logic or ordering of the messages
will be affected). If not supplied
for create, defaults to the cluster
default.
--replication-factor <Integer: The replication factor for each
--config <String: name=value> key value
实践
# ./kafka-topics.sh --bootstrap-server localhost:9092 --create --topic test01
[root@hadoop02 bin]# ./kafka-topics.sh --bootstrap-server localhost:9092 --create --topic test01
Created topic test01.
# ./kafka-topics.sh --bootstrap-server localhost:9092 --list
[root@hadoop02 bin]# ./kafka-topics.sh --bootstrap-server localhost:9092 --list
test01
[root@hadoop02 bin]#
# ./kafka-topics.sh --bootstrap-server localhost:9092 --describe --topic test01
[root@hadoop02 bin]# ./kafka-topics.sh --bootstrap-server localhost:9092 --describe --topic test01
Topic: test01 TopicId: BvwZ-jNbTQ-eKcNya0jEdQ PartitionCount: 1 ReplicationFactor: 1 Configs:
Topic: test01 Partition: 0 Leader: 0 Replicas: 0 Isr: 0
[root@hadoop02 bin]#
命令行生产数据
# ./kafka-console-producer.sh --bootstrap-server localhost:9092 --topic test01
命令行消费数据
# ./kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic test01