cd opt
tar -zxvf kafka_2.12-3.6.0.tgz
###### 2. 配置kafka
在kafka解压目录同一路径下创建
mkdir -p /opt/software/kafka
mkdir -p /opt/software/kafka/zookeeper #zookeeper数据目录
mkdir -p /opt/software/kafka/log #kafka日志
mkdir -p /opt/software/kafka/zookeeper/log #zookeeper日志
###### 3 进入配置文件目录
cd /opt/kafka_2.12-3.6.0/config/
###### 4 修改配置文件server.properties,添加下面内容
broker.id=0
port=9092 #端口号
host.name=localhost #服务器IP地址,修改为自己的服务器IP
log.dirs=/opt/software/kafka/log #日志存放路径,上面创建的目录
zookeeper.connect=localhost:2181 #zookeeper地址和端口,单机配置部署,localhost:2181
###### 5 配置zookeeper服务 zookeeper.properties
dataDir=/opt/software/kafka/zookeeper #zookeeper数据目录
dataLogDir=/opt/software/kafka/zookeeper/log #zookeeper日志目录
clientPort=2181
maxClientCnxns=100
tickTimes=2000
initLimit=10
syncLimit=5
###### 6 创建启动和关闭的 kafka 执行脚本
###### 6.1 创建启动脚本
cd /opt/kafka_2.12-3.6.0/
vi kafkaStart.sh
配置启动脚本 kafkaStart.sh
#启动zookeeper
/opt/kafka_2.12-3.6.0/bin/zookeeper-server-start.sh /opt/kafka_2.12-3.6.0/config/zookeeper.properties &
sleep 3 #等3秒后执行
#启动kafka
/opt/kafka_2.12-3.6.0/bin/kafka-server-start.sh /opt/kafka_2.12-3.6.0/config/server.properties &
###### 6.2 创建关闭脚本 kafkaStop.sh
cd /opt/kafka_2.12-3.6.0/
vi kafkaStop.sh
配置关闭脚本 kafkaStop.sh
#关闭zookeeper
/opt/kafka_2.12-3.6.0/bin/zookeeper-server-stop.sh /opt/kafka_2.12-3.6.0/config/zookeeper.properties &
sleep 3 #等3秒后执行
#关闭kafka
/opt/kafka_2.12-3.6.0/bin/kafka-server-stop.sh /opt/kafka_2.12-3.6.0/config/server.properties &
###### 7 启动脚本,关闭脚本赋予权限
chmod 777 kafkaStart.sh
chmod 777 kafkaStop.sh
启动和关闭kafka
cd /opt/kafka_2.12-3.6.0/
sh kafkaStart.sh #启动
sh kafkaStop.sh #关闭
###### 8 创建生产者 topic 和 消费者 topic
---
cd /opt/kafka_2.12-3.6.0/bin/ #进入kafka目录
./kafka-console-producer.sh --broker-list localhost:9092 --topic test #创建生产者 test你要建立的topic名
./kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic test #创建消费者
查看 kafka 是否启动
[root@localhost kafka_2.12-3.6.0]# jps
21324 QuorumPeerMain
15211 Jps
21215 Kafka
里面有QuorumPeerMain和kafkas说明启动成功了
查看当前的一些topic
cd /opt/kafka_2.12-3.6.0/bin/
./kafka-topics.sh --zookeeper localhost:2181 --list
./kafka-topics.sh --list --bootstrap-server localhost:9092
###### 9 Spring boot集成Kafka
1、pom依赖
org.springframework.kafka spring-kafka org.apache.kafka kafka-clients 2.4.1
2.消费者
@Autowired
private KafkaTemplate<String, String> kafkaTemplate;
@RequestMapping(“/userGets”)
public Object gets() {
// send 第一个参数为topic的名称,第二个参数为我们要发送的信息
kafkaTemplate.send(“topic.quick.default”,“1231235”);
return “发送成功”;
}
@KafkaListener(topics = {“topic1”})
public void onMessage(ConsumerRecord<?, ?> record) {
System.out.println(record.value());
}
@KafkaListener(topics = {“topic2”})
public void getMessage(ConsumerRecord<String, String> record) {
String key = record.key();
String value = record.value();
}
3. 测试
//生产者
public static void main(String[] args) {
Properties properties = new Properties();
properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, “localhost:9092”);
properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
KafkaProducer<String, String> producer = new KafkaProducer<>(properties);
String topic = "test-topic";
for (int i = 0; i < 10; i++) {
String message = "Message " + i;
producer.send(new ProducerRecord<>(topic, message));
System.out.println("Sent: " + message);
}
producer.close();
}
//消费者
public static void main(String[] args) {
Properties properties = new Properties();
properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
properties.put(ConsumerConfig.GROUP_ID_CONFIG, "group1");
properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties);
//消息者订阅主题
consumer.subscribe(Collections.singletonList("test-topic"));
//循环
while (true) {
//每次拉取 1千条消息
ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
for (ConsumerRecord<String, String> record : records) {
System.out.println("=============> 消费kafka消息:"+ record.value());
}
}
}
网上学习资料一大堆,但如果学到的知识不成体系,遇到问题时只是浅尝辄止,不再深入研究,那么很难做到真正的技术提升。
一个人可以走的很快,但一群人才能走的更远!不论你是正从事IT行业的老鸟或是对IT行业感兴趣的新人,都欢迎加入我们的的圈子(技术交流、学习资源、职场吐槽、大厂内推、面试辅导),让我们一起学习成长!