kafka的设计中依赖了zookeeper,zookeeper是为了解决分布式一致性问题的工具,用作去中心化的集群模式。
网上概念说的很多了,很全也很好,话不多说,直接上代码。
一.导入依赖
之前看maven库里有两种依赖kafka-clients和kafka_2.11,经过一番查询才知道,kafka-clients比kafka_2.11依赖的jar少,对于消费者没有低版本api与低版本api的区分,所以选择了kafka-clients,另外Spring也有集成的,哈哈,又一次感觉到了Spring对于java开发的方便,真好(陆超脸)。
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>1.0.1</version>
</dependency>
二.生产者
package com.example.kafkademo;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import java.util.Properties;
/**
* Kafka生产者
*
* @author liucong
*/
public class KafkaProducerClient extends Thread {
//设置final的生产者
private final KafkaProducer kafkaProducer;
//在构造函数中创建
public KafkaProducerClient() {
//配置文件
Properties properties = new Properties();
//设置Kafka服务器列表
properties.put("bootstrap.servers", "localhost:9092");
//设置确认
properties.put("acks", "all");
//设置retries
properties.put("retries", 0);
//以字节为单位控制默认的批量大小
properties.put("batch.size", 16384);
//设置一定的延迟,以便批量发送
properties.put("linger.ms", 1);
//设置生产者将使用的总内存,不强制限制
properties.put("buffer.memory", 2000000);
//设置key的序列化类型
properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
//设置value的序列化类型
properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
//创建生产者
this.kafkaProducer = new KafkaProducer<>(properties);
}
@Override
public void run() {
for (int i = 0; i < 10; i++) {
//生产
System.out.println("message " + " key = " + i + " value = " + i);
kafkaProducer.send(new ProducerRecord<>("message","key = " + i, "value = " + i));
try {
sleep(10);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
//清内存
kafkaProducer.flush();
//关连接
kafkaProducer.close();
}
}
三.消费者
package com.example.kafkademo;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import java.util.Arrays;
import java.util.Properties;
/**
* Kafka消费者
*
* @author liucong
*/
public class KafkaConsumerClient extends Thread{
private final KafkaConsumer kafkaConsumer;
public KafkaConsumerClient() {
Properties properties = new Properties();
// 设置Kafka服务列表
properties.put("bootstrap.servers", "localhost:9092");
//指定consumer所在的组
properties.put("group.id", "test");
// true指定consumer自动向zookeeper写入每个分区的offset
properties.put("enable.auto.commit", "true");
// 往zookeeper上写offset的频率
properties.put("auto.commit.interval.ms", "1000");
// key的反序列化类型
properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
// 设置value的反序列化类型
properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
//创建KafkaConsumer
this.kafkaConsumer = new KafkaConsumer<String, String>(properties);
}
@Override
public void run() {
//订阅数据,可以指定多个topic
kafkaConsumer.subscribe(Arrays.asList("message"));
//获取数据
while (true) {
ConsumerRecords<String, String> records = kafkaConsumer.poll(100);
for (ConsumerRecord<String, String> record : records) {
System.out.printf("topic = %s,offset = %d, key = %s, value = %s%n",record.topic(), record.offset(), record.key(), record.value());
}
}
}
}
四.测试
package com.example.kafkademo;
/**
* Kafka测试类
*
* @author liucong
*/
public class KafkaTest {
public static void main(String[] strings) {
//一个生产者线程,一个消费者线程,跑起来。。。
new KafkaProducerClient().start();
new KafkaConsumerClient().start();
}
}