Dockers Kafka搭建
首先安装好docker for windows
启动cmd窗口,拉取Zookeeper和kafka镜像,选择下载度最高的镜像
docker search zookeeper docker search kafka
我们选择第一个镜像
接下来开始拉取zookeeper和kafka镜像,这里我们拉取wurstmeister/kafka
docker pull zookeeper docker pull wurstmeister/kafka
打开docker桌面,可以看见我们的zookeeper已经拉取下来了
现在我们的kafka也拉取下来啦
用管理员启动cmd
启动zookeeper,这里我们给zookeeper命名zk
docker run -dit --name zk -p 2181:2181 zookeeper
zookeeper已经启动啦
重启zookeeper
docker restart zk
查看ZK日志
docker logs -f zk
接下来启动三个kafka,组成集群:本机ip修改为自己电脑的ip地址
docker run -d --name kafka01 -p 9092:9092 -e KAFKA_BROKER_ID=0 -e KAFKA_ZOOKEEPER_CONNECT=本机ip:2181 -e ALLOW_PLAINTEXT_LISTENER=yes -e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://本机ip:9092 -e KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9092 -t wurstmeister/kafka
docker run -d --name kafka02 -p 9093:9093 -e KAFKA_BROKER_ID=1 -e KAFKA_ZOOKEEPER_CONNECT=本机ip:2181 -e ALLOW_PLAINTEXT_LISTENER=yes -e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://本机ip:9093 -e KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9093 -t wurstmeister/kafka
docker run -d --name kafka03 -p 9094:9094 -e KAFKA_BROKER_ID=2 -e KAFKA_ZOOKEEPER_CONNECT=本机ip:2181 -e ALLOW_PLAINTEXT_LISTENER=yes -e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://本机ip:9094 -e KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9094 -t wurstmeister/kafka
可以看见kafka已经启动了
.测试配置有无正常安装以及生产消费的基本使用。
测试建立3的副本和5的partition,查看是否配置成功。
KafKa01生产消息,KafKa02和KafKa03消费消息:
docker exec -ti kafka01 kafka-topics.sh --create --zookeeper 本机ip:2181 --replication-factor 3 --partitions 5 --topic TestTopic
成功
查看已创建好的Topic信息:
docker exec -ti kafka01 kafka-topics.sh --describe --bootstrap-server 本机ip:9092 --topic TestTopic
docker exec -ti kafka02 kafka-topics.sh --describe --bootstrap-server 本机ip:9093 --topic TestTopic
docker exec -ti kafka03 kafka-topics.sh --describe --bootstrap-server 本机ip:9094 --topic TestTopic
消费和生产测试,kafka01输出后在其他两个节点能看到消息:(打开三个cmd终端分别执行)
生产者:
docker exec -ti kafka01 kafka-console-producer.sh --broker-list 本机ip:9092 --topic TestTopic
消费者:
docker exec -ti kafka02 kafka-console-consumer.sh --bootstrap-server 本机ip:9093 --topic TestTopic --from-beginning
docker exec -ti kafka03 kafka-console-consumer.sh --bootstrap-server 本机ip:9094 --topic TestTopic --from-beginning
如下:消费者可以收到生产者的消息
kafka使用:javaAPI,代码仅供测试参考,不提供数据。
maven依赖:
<dependencies>
<!-- https://mvnrepository.com/artifact/org.apache.kafka/kafka-clients -->
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>2.8.1</version>
</dependency>
<!-- https://mvnrepository.com/artifact/log4j/log4j -->
<dependency>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
<version>1.2.17</version>
</dependency>
<dependency>
<groupId>com.opencsv</groupId>
<artifactId>opencsv</artifactId>
<version>5.5.1</version>
</dependency>
</dependencies>
生产者程序:
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.serialization.StringSerializer;
import java.io.BufferedReader;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.Properties;
public class ProducerDemo {
private static final String TOPIC = "LGJY";
public static void main(String[] args) throws InterruptedException {
String a_csvFilePath = "股票a.csv";
String b_csvFilePath = "股票b.csv";
String c_csvFilePath = "股票c.csv";
// 要构造一个消息生产者对象,关于kafka集群等相关配置,可以从Properties文件中加载也可以从一个Properties对象中加载
// KafkaProducer按照固定的key取出对应的value
Properties properties = new Properties();
// 指定集群节点
properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.89.19:9092,192.168.89.19:9093,192.168.89.19:9094");
// 发送消息,网络传输,需要对key和value指定对应的序列化类
properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
properties.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384);
properties.put(ProducerConfig.ACKS_CONFIG, "1");
long startTime = System.currentTimeMillis();
try (BufferedReader br0 = new BufferedReader(new InputStreamReader(new FileInputStream(a_csvFilePath), "GBK"));
BufferedReader br1 = new BufferedReader(new InputStreamReader(new FileInputStream(b_csvFilePath), "GBK"));
BufferedReader br2 = new BufferedReader(new InputStreamReader(new FileInputStream(c_csvFilePath), "GBK"))) {
// 创建 Kafka 生产者
Producer<String, String> producer = new KafkaProducer<>(properties);
String line0;
String line1;
String line2;
while ((line0 = br0.readLine()) != null && (line1 = br1.readLine()) != null && (line2 = br2.readLine()) != null) {
// 将每行数据发送到 Kafka Topic
// 根据 CSV 格式进行数据切割和处理
String[] data0 = line0.split(",");
String[] data1 = line1.split(",");
String[] data2 = line2.split(",");
String transactionData[] = new String[3];
transactionData[0] = data0[2] + "," + data0[4];
transactionData[1] = data1[2] + "," + data1[4];
transactionData[2] = data2[2] + "," + data2[4];
ProducerRecord<String, String> record0 = new ProducerRecord<String, String>(TOPIC, 0, null, transactionData[0]);
producer.send(record0);
System.out.println("消息发送成功,a股票交易:" + data0[4]);
ProducerRecord<String, String> record1 = new ProducerRecord<String, String>(TOPIC, 1, null,transactionData[1]);
producer.send(record1);
System.out.println("消息发送成功,b股票交易:" + data1[4]);
ProducerRecord<String, String> record2 = new ProducerRecord<String, String>(TOPIC, 2, null,transactionData[2]);
producer.send(record2);
System.out.println("消息发送成功,c股票交易:" + data2[4]);
}
producer.close();
long endTime = System.currentTimeMillis();
long totalTime = endTime - startTime;
System.out.println("生产者发送总时间:" + totalTime + "毫秒");
} catch (IOException e) {
e.printStackTrace();
}
}
}
消费者程序:
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import java.util.Collections;
import java.util.Objects;
import java.util.Properties;
public class ConsumerDemo {
private static final String TOPIC = "LGJY";
public static void main(String[] args) {
// 属性对象
Properties properties = new Properties();
// 指定集群节点
properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.89.19:9092,192.168.89.19:9093,192.168.89.19:9094");
//反序列化类
properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
// 取消自动提交 防止消息丢失
properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,"false");
//指定分组的名称
properties.put(ConsumerConfig.GROUP_ID_CONFIG, "LGJY-GROUP1");
//消息消费者对象
KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<String, String>(properties);
//订阅消息
kafkaConsumer.subscribe(Collections.singletonList(TOPIC));
int a = 0;
int b = 0;
int c = 0;
while (true) {
// 获取消息的方法是一个阻断式方法
ConsumerRecords<String, String> records = kafkaConsumer.poll(500);
for (ConsumerRecord<String, String> record : records) {
String re[] = record.value().split(",");
if (Objects.equals(re[0], "股票a")){
a = a + Integer.parseInt(re[1]);
System.out.println("股票a实时交易总量:" + a);
}
if (Objects.equals(re[0], "股票b")){
b = b + Integer.parseInt(re[1]);
System.out.println("股票b实时交易总量:" + b);
}
if (Objects.equals(re[0], "股票c")){
c = c + Integer.parseInt(re[1]);
System.out.println("股票c实时交易总量:" + c);
}
// System.out.println("股票交易量:" + record.value());
// 手动提交
kafkaConsumer.commitSync();
}
}
}
}