上一篇文章我们说了一些kafka在命令行的时候是如何操作的,详情请看Kafka 深入浅出——Kafka的命令行的管理使用以及参数详解
那今天我们讲的这是如何在Java中去实际使用我们的kafka,话不多说,直接上代码。
生产者代码开发
- 创建maven工程引入依赖
<dependencies>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>1.0.1</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.kafka/kafka -->
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.11</artifactId>
<version>1.1.0</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.0</version>
<configuration>
<source>1.8</source>
<target>1.8</target>
<encoding>UTF-8</encoding>
<!-- <verbal>true</verbal>-->
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-shade-plugin</artifactId>
<version>2.4.3</version>
<executions>
<execution>
<phase>package</phase>
<goals>
<goal>shade</goal>
</goals>
<configuration>
<filters>
<filter>
<artifact>*:*</artifact>
<excludes>
<exclude>META-INF/*.SF</exclude>
<exclude>META-INF/*.DSA</exclude>
<exclude>META-INF/*.RSA</exclude>
</excludes>
</filter>
</filters>
<transformers>
<transformer implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
<mainClass></mainClass>
</transformer>
</transformers>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
- 代码开发
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import java.util.Properties;
public class KafkaProducerStudy {
/**
* 通过javaAPI实现向kafka当中生产数据
* @param args
*/
public static void main(String[] args) {
Properties props = new Properties();
props.put("bootstrap.servers", "node01:9092,node02:9092,node03:9092");
//消息的确认机制
props.put("acks", "all");
props.put("retries", 0);
//缓冲区的大小 默认32M
props.put("buffer.memory", 33554432);
//批处理数据的大小,每次写入多少数据到topic 默认16KB
props.put("batch.size", 16384);
//可以延长多久发送数据 默认为0 表示不等待 ,立即发送
props.put("linger.ms", 1);
props.put("buffer.memory", 33554432);
//指定数据序列化和反序列化
props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
KafkaProducer<String, String> producer = new KafkaProducer<String,String>(props);
for(int i =0;i<100;i++){
//既没有指定分区号,也没有数据的key,直接使用轮序的方式将数据发送到各个分区里面去
ProducerRecord record = new ProducerRecord("test", "helloworld" + i);
producer.send(record);
}
//关闭消息发送客户端
producer.close();
}
}
消费者代码开发
- 自动提交偏移量代码开发
package com.lijiawei.consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import java.util.Arrays;
import java.util.Properties;
public class KafkaConsumerStudy {
public static void main(String[] args) {
//准备配置属性
Properties props = new Properties();
//kafka集群地址
props.put("bootstrap.servers", "node01:9092,node02:9092,node03:9092");
//消费者组id
props.put("group.id", "consumer-test");
//自动提交偏移量
props.put("enable.auto.commit", "true");
//自动提交偏移量的时间间隔
props.put("auto.commit.interval.ms", "1000");
//默认是latest
//earliest: 当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,从头开始消费
//latest: 当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据
//none : topic各分区都存在已提交的offset时,从offset后开始消费;只要有一个分区不存在已提交的offset,则抛出异常
props.put("auto.offset.reset","earliest");
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(props);
//指定消费哪些topic
consumer.subscribe(Arrays.asList("test"));
while (true) {
//不断的拉取数据
ConsumerRecords<String, String> records = consumer.poll(100);
for (ConsumerRecord<String, String> record : records) {
//该消息所在的分区号
int partition = record.partition();
//该消息对应的key
String key = record.key();
//该消息对应的偏移量
long offset = record.offset();
//该消息内容本身
String value = record.value();
System.out.println("partition:"+partition+"\t key:"+key+"\toffset:"+offset+"\tvalue:"+value);
}
}
}
}
- 手动提交偏移量代码开发
package com.kaikeba.consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Properties;
//todo:需求:开发kafka消费者代码(手动提交偏移量)
public class KafkaConsumerControllerOffset {
public static void main(String[] args) {
Properties props = new Properties();
props.put("bootstrap.servers", "node01:9092,node02:9092,node03:9092");
props.put("group.id", "controllerOffset");
//关闭自动提交,改为手动提交偏移量
props.put("enable.auto.commit", "false");
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(props);
//指定消费者要消费的topic
consumer.subscribe(Arrays.asList("test"));
//定义一个数字,表示消息达到多少后手动提交偏移量
final int minBatchSize = 20;
//定义一个数组,缓冲一批数据
List<ConsumerRecord<String, String>> buffer = new ArrayList<ConsumerRecord<String, String>>();
while (true) {
ConsumerRecords<String, String> records = consumer.poll(100);
for (ConsumerRecord<String, String> record : records) {
buffer.add(record);
}
if (buffer.size() >= minBatchSize) {
//insertIntoDb(buffer); 拿到数据之后,进行消费
System.out.println("缓冲区的数据条数:"+buffer.size());
System.out.println("我已经处理完这一批数据了...");
consumer.commitSync();
buffer.clear();
}
}
}
}
- 指定分区数据进行消费
每个topic都可能有多个分区
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;
import java.util.Arrays;
import java.util.Properties;
public class ConsumPartition {
public static void main(String[] args) {
Properties props= new Properties();
props.put("bootstrap.servers","localhost:9092"); props.put("group.id", "test");
props.put("enable.auto.commit","true");
props.put("auto.commit.interval.ms","1000");
props.put("key.deserializer","org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer","org.apache.kafka.common.serialization.StringDeserializer");
KafkaConsumer<String,String> consumer = new KafkaConsumer<>(props);
String topic ="foo";
TopicPartition partition0 = new TopicPartition(topic, 0);
TopicPartition partition1 = new TopicPartition(topic, 1);
consumer.assign(Arrays.asList(partition0, partition1));
//手动指定消费指定分区的数据---end
while (true) {
ConsumerRecords<String,String> records = consumer.poll(100);
for(ConsumerRecord<String, String> record : records)
System.out.printf("offset= %d, key = %s, value = %s%n", record.offset(), record.key(),record.value());
}
}
}