需求:将所有分区的数据存储到topic的第1分区
定义分区生成类:
package com.fengling;
import org.apache.kafka.clients.producer.Partitioner;
import org.apache.kafka.common.Cluster;
import java.util.Map;
public class CustomPartition implements Partitioner {
@Override
public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) {
// 控制分区
System.out.println("partition == 1");
return 1;
}
@Override
public void close() {
}
@Override
public void configure(Map<String, ?> configs) {
}
}
设置分区类(设置属性即可):
package com.fengling;
import org.apache.kafka.clients.producer.*;
import org.junit.Before;
import org.junit.Test;
import java.util.Properties;
public class KafkaCustomPartitionTest {
private Properties props;
private Producer<String, String> producer;
@Before
public void init() {
props = new Properties();
// Kafka服务端的主机名和端口号
props.put("bootstrap.servers", "hadoop129:9092,hadoop130:9092,hadoop131:9092");
// 等待所有副本节点的应答
props.put("acks", "all");
// 消息发送最大尝试次数
props.put("retries", 0);
// 一批消息处理大小
props.put("batch.size", 16384);
// 增加服务端请求延时
props.put("linger.ms", 1);
// 发送缓存区内存大小
props.put("buffer.memory", 33554432);
// key序列化
props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
// value序列化
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
// 自定义分区
props.put("partitioner.class", "com.fengling.CustomPartition");
}
@Test
public void produce() {
System.out.println("begin produce");
connectKafka();
sendMsg();
System.out.println("finish produce");
}
private void connectKafka() {
System.out.println("create a connection!");
producer = new KafkaProducer<String, String>(props);
}
private void sendMsg() {
for (int i = 1000; i < 5000; i++) {
String msg = "西藏318,2020我们一起走!!! 消息序号:" + i;
producer.send(new ProducerRecord<String, String>("we319", msg, "fengling"));
System.out.println("send one msg = " + msg);
try {
Thread.sleep(1000 * 2);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
}
发送消息: