import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
import java.util.Properties;
//kafka-2.1.0
public class Producer1 {
public static void main(String[] args) {
Properties props = new Properties();
props.put("bootstrap.servers", "192.168.226.128:9092");
props.put("acks", "all");
props.put("retries", 0);
//batch.size --批量提交大小
props.put("batch.size", 16384);
//提交延迟等待时间
props.put("linger.ms", 1);
//缓存大小
props.put("buffer.memory", 33554432);
//序列化
props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
//自定义Partition
props.put("partitioner.class","cn.uincloud.kafka.SimplePartition");
Producer<String,String> producer = new KafkaProducer<String,String>(props);
for(int i=0;i<20;i++){
producer.send(new ProducerRecord<String, String>("tp1",""+i,""+i));
}
producer.close();
}
}
import org.apache.kafka.clients.producer.Partitioner;
import org.apache.kafka.common.Cluster;
import org.apache.kafka.common.PartitionInfo;
import java.util.List;
import java.util.Map;
public class SimplePartition implements Partitioner {
public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) {
// 得到 topic 的 partitions 信息
List<PartitionInfo> partitions = cluster.partitionsForTopic(topic);
int numPartitions = partitions.size();
Integer ikey = Integer.valueOf(key.toString());
return ikey%numPartitions; //我配置的partitions为3
}
public void close() {
}
public void configure(Map<String, ?> map) {
}
}
import java.util.Arrays;
import java.util.Properties;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
public class ConsumerG implements Runnable{
private static Properties props;
static {
props = new Properties();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.226.128:9092");
//设置组id
props.put(ConsumerConfig.GROUP_ID_CONFIG ,"3") ;
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
}
public static void main(String[] args) {
//同一个组的三个Comsumer
ExecutorService executorService = Executors.newFixedThreadPool(3);
executorService.submit(new ConsumerG());
executorService.submit(new ConsumerG());
executorService.submit(new ConsumerG());
}
public void run() {
//创建一个kafka的consumer
KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(props);
consumer.subscribe(Arrays.asList("tp1"));
while (true) {
//获取给定分区的记录列表
ConsumerRecords<String, String> records = consumer.poll(100);
for (ConsumerRecord<String, String> record : records) {
System.out.printf("ThreadID:%s, offset = %d, key = %s, value = %s%n",Thread.currentThread().getId(), record.offset(), record.key(), record.value());
}
}
}
}