自定义分区
package my.test.Partitioner;
import org.apache.kafka.clients.producer.Partitioner;
import org.apache.kafka.common.Cluster;
import java.util.Map;
public class MyPartitioner implements Partitioner {
@Override
public int partition(String topic, Object key, byte[] bytes, Object value, byte[] bytes1, Cluster cluster) {
return 0;
}
@Override
public void close() {
}
@Override
public void configure(Map<String, ?> map) {
}
}
使用自定义分区的Producer
package my.test.Partitioner;
import org.apache.kafka.clients.producer.*;
import java.util.Properties;
public class PartitionProducer {
public static void main(String[] args) {
Properties prop = new Properties();
prop.put("bootstrap.servers", "hadoop1:9092");
prop.put("acks", "all");
prop.put("key.serializer",
"org.apache.kafka.common.serialization.StringSerializer");
prop.put("value.serializer",
"org.apache.kafka.common.serialization.StringSerializer");
prop.put("partitioner.class","my.test.Partitioner.MyPartitioner");
KafkaProducer<String, String> producer = new KafkaProducer<>(prop);
for (int i = 0; i < 10; i++) {
System.out.println(i+1);
producer.send(new ProducerRecord<String, String>("first1", "aaa" + i), new Callback() {
@Override
public void onCompletion(RecordMetadata recordMetadata, Exception e) {
if (e == null){
System.out.println(recordMetadata.partition()+"--"+recordMetadata.offset());
}else{
e.printStackTrace();
}
}
});
}
producer.close();
}
}