Zookeeper + Kafka
环境搭建及应用
文章目录
一、安装jdk
、zookeeper
、kafka
1、上传相关软件包到Linux
服务器
# win + r 输入 cmd
# 以下所有命令用 $ 和 > 区分linux和windows
# 打开windows终端先连接服务器
> ssh root@192.168.10.20 -p 22
> yes
> ********
# 创建相关目录
$ mkdir -p {/opt/software,/opt/module}
# 新建windows终端上传相关软件包
> scp apache-zookeeper-3.8.0-bin.tar.gz jdk-8u311-linux-x64.tar.gz kafka_2.13-3.1.0.tgz root@192.168.10.20:/opt/software
2、解压软件包到指定目录
# 切换到连接成功的linux终端
$ tar -zxvf apache-zookeeper-3.8.0-bin.tar.gz -C /opt/module
$ tar -zxvf jdk-8u311-linux-x64.tar.gz -C /opt/module
$ tar -zxvf kafka_2.13-3.1.0.tgz -C /opt/module
3、重命名解压后包文件
$ mv apache-zookeeper-3.8.0-bin zookeeper
$ mv jdk-8u311-linux-x64 jdk8
$ mv kafka_2.13-3.1.0 kafka
4、配置环境变量
$ cat << EOF >> /etc/profile
# set java environment
export JAVA_HOME=/opt/module/jdk8
export JRE_HOME=$JAVA_HOME/jre
export PATH=$PATH:$JAVA_HOME/bin
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
# set zookeeper environment
export ZOOKEEPER_HOME=/opt/module/zookeeper
export PATH=$ZOOKEEPER_HOME/bin:$PATH
# set kafka environment
export KAFKA_HOME=/opt/module/kafka
export PATH=$KAFKA_HOME/bin:$PATH
EOF
# 使配置生效
$ source /etc/profile
# 检查配置是否生效
# 验证java
$ java -version
# 验证zookeeper
$ zkServer.sh start
# 验证kafka
$ kafka-server-start.sh /opt/module/kafka/config/server.properties
# 验证成功后关闭相关服务
$ zkServer.sh stop
$ kafka-server-stop.sh /opt/module/kafka/config/server.properties
5、配置zookeeper
$ cd /opt/module/conf
$ touch zoo.cfg
# 写入配置文件
$ cat << EOF >> ./zoo.cfg
tickTime=2000
dataDir=/opt/module/zookeeper/data
clientPort=2181
initLimit=5
syncLimit=2
EOF
6、配置kafka
$ cd /opt/module/kafka/config
$ vi server.properties
# 将36行注释去掉,并配置当前服务器的ip
36 advertised.listeners=PLAINTEXT://192.168.10.20:9092
7、关闭防火墙
$ systemctl stop firewalld && systemctl disable firewalld
二、启动服务
1、联合zookeeper
启动kafka
$ zookeeper-server-start.sh /opt/module/kafka/config/zookeeper.properties
$ kafka-server-start.sh /opt/module/kafka/config/server.properties
# 验证,输出如下启动完成
$ jps
13172 Jps
2491 QuorumPeerMain
2847 Kafka
2、创建主题、扩展分区
$ kafka-topics.sh --bootstrap-server 192.168.10.20:9092 --create --topic order
# 扩展分区,kafka一个分区只能被一个kafka消费者消费
$ kafka-topics.sh --alter --bootstrap-server 192.168.10.20:9092 --partitions 2 --topic order
3、创建生产者和消费者
# 开启两个linux终端分别创建
# 生产者
$ kafka-console-producer.sh --bootstrap-server 192.168.10.20:9092 --topic order
# 消费者
$ kafka-console-consumer.sh --bootstrap-server 192.168.10.20:9092 --topic order --from-beginning
# 通过生产者发送消息,观察消费者是否可以成功接收
4、如何创建单节点多代理服务?
# 创建代理服务配置文件
$ cd /opt/module/kafka/config
$ cp server.properties server-one.properties
$ cp server.properties server-two.properties
# 修改server-one.properties配置文件
$ vi ./server-one.properties
# 修改第21行和36行
21 broker.id=1
36 advertised.listeners=PLAINTEXT://192.168.10.20:9093
# 修改server-two.properties配置文件
$ vi ./server-two.properties
# 修改第21行和36行
21 broker.id=2
36 advertised.listeners=PLAINTEXT://192.168.10.20:9094
# 分别启动三个服务即可
$ kafka-server-start.sh /opt/module/kafka/config/server.properties
$ kafka-server-start.sh /opt/module/kafka/config/server-one.properties
$ kafka-server-start.sh /opt/module/kafka/config/server-two.properties
# 举一反三,多节点多代理服务也只需修改配置文件即可
# tips:必须保证 broker.id 值全局唯一
三、编写java
生产者和消费者
1、导入所需的pom
依赖
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>3.0.1</version>
</dependency>
2、编写生产者代码
package com.lemon.kafka;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
import java.util.Properties;
/**
* @Classname SimpleKafkaProducer
* @Description TODO
* @Version 1.0.0
* @Date 4/25/2022 3:27 PM
* @Author LemonCoder
*/
public class SimpleKafkaProducer {
public static void main(String[] args) throws Exception {
Properties properties = new Properties();
properties.put("bootstrap.servers", "192.168.10.20:9092");
properties.put("acks", "all");
properties.put("retries", 0);
properties.put("batch.size", 16384);
properties.put("linger.ms", 1);
properties.put("buffer.memory", 33554432);
properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
Producer<String, String> producer = null;
try {
producer = new KafkaProducer<String, String>(properties);
for (int i = 0; i < 5; i++) {
String msg = "This is Message " + i;
// 指定主题名:order
producer.send(new ProducerRecord<String, String>("order", msg));
System.out.println("Sent:" + msg);
}
} catch (Exception e) {
e.printStackTrace();
} finally {
producer.close();
}
}
}
3、编写消费者代码
package com.lemon.kafka;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import java.util.Arrays;
import java.util.Properties;
/**
* @Classname SimpleKafkaConsumer
* @Description TODO
* @Version 1.0.0
* @Date 4/26/2022 9:36 AM
* @Author LemonCoder
*/
public class SimpleKafkaConsumer {
public static void main(String[] args) throws Exception {
// 此处我直接指定topic
String topicName = "order";
Properties props = new Properties();
props.put("bootstrap.servers", "192.168.10.20:9092");
props.put("group.id", "test");
props.put("enable.auto.commit", "true");
props.put("auto.commit.interval.ms", "1000");
props.put("session.timeout.ms", "30000");
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(props);
consumer.subscribe(Arrays.asList(topicName));
System.out.println("Subscribed to topic " + topicName);
int i = 0;
while (true) {
ConsumerRecords<String, String> records = consumer.poll(100);
for (ConsumerRecord<String, String> record : records) {
System.out.printf("offset = %d, key = %s, value = %s\n",record.offset(), record.key(), record.value());
}
}
}
}