生产者:组装消息和topic,同时可指定key或分区,组装为ProducerRecord对象,序列化后进行网络传输,发送到相应的broker,返会一个响应(包含topic和分区信息以及分区里的偏移量)。
public static KafkaProducer<String, String> createProducer() {
Properties properties = new Properties();
properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "129.211.14.137:9092");
properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
KafkaProducer<String, String> producer = new KafkaProducer<String, String>(properties);
/**
* 一些使用上的配置
* acks 指定了必须要有多少个分区副本收到消息后,才算写入消息成功
* acks=0 不需要等待服务器响应,直接认为写入成功
* acks=1 只要集群leader收到了,则返回生产者写入成功的响应
* acks=all 只有全部参与复制节点收到消息后,返回生产者写入成功的响应
*
* pro.put("buffer.memory","1024") 设置生产者内存缓冲区大小。
* pro.put("compression.type","gzip") 指定压缩格式,默认不压缩
* snappy占少的cpu,更好性能和网络 gzip占cpu大,更好的压缩比,受限于带宽。 压缩可以降低网络开销和存储开销
* pro.put("retries","2") 生产者重发消息的次数,通过retry.backoff.ms 设置每次重发的间隔时间
* pro.put("batch.size","") 多个消息被发送到同一个分区,会把他们放到同一个批次里,
* 指定了一个批次可以使用的内存大小,按字节数计算。(感觉没啥用)
* pro.put("linger.ms","")指定在批次发送之前设定时间等待更多的消息加入此批次。
* 默认时只有可用线程就会把消息批次发送出去。
* pro.put("client.id","sdasda")标示client,服务器会用他来识别消息来源。
* pro.put("max.in.flight.requests.per.connection","")
* 指定在生产者收到服务端响应之前可以发送多少消息
*/
return producer;
}
/**
* 同步或异步的生产消息
*/
@Test
public void fire_and_forget() throws ExecutionException, InterruptedException {
ProducerRecord<String, String> record =
new ProducerRecord<String, String>("test", "hello world_fire_and_forget");
createProducer().send(record);
}
@Test
public void synsend() {
ProducerRecord<String, String> record =
new ProducerRecord<String, String>("test", "hello world_synsend");
try {
RecordMetadata recordMetadata = createProducer().send(record).get();
String topic = recordMetadata.topic();
int partition = recordMetadata.partition();
long offset = recordMetadata.offset();
System.out.println("topic:" + topic + ",partition:" + partition + ",offset:" + offset);
} catch (InterruptedException e) {
e.printStackTrace();
} catch (ExecutionException e) {
e.printStackTrace();
}
}
@Test
public void asyncsend() throws InterruptedException {
ProducerRecord<String, String> record =
new ProducerRecord<String, String>("test", "hello world_asyncsend");
createProducer().send(record, new Callback() {
@Override
public void onCompletion(RecordMetadata recordMetadata, Exception e) {
String topic = recordMetadata.topic();
int partition = recordMetadata.partition();
long offset = recordMetadata.offset();
//失败会返回非空的 exception
if (e == null) {
System.out.println("发送成功");
System.out.println("topic:" + topic + ",partition:" + partition + ",offset:" + offset);
} else {
e.printStackTrace();
}
}
});
Thread.sleep(3000);
}
消费者:
public static KafkaConsumer<String, String> createConsumer() {
Properties properties = new Properties();
properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "129.211.14.137:9092");
properties.put(ConsumerConfig.GROUP_ID_CONFIG, "myGroup");
properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
/**
* 消费者的一些配置
* auto.offset.reset 两种策略:latest(偏移无效时,从最新位置开始读取分区记录)/earliest(偏移无效时,从起始位置读取分区记录)
* max.poll.records poll的最大记录数
* enable.auto.commit 是否自动提交 没隔5s 会把poll接收到最大偏移量提交上去
*/
return new KafkaConsumer<String, String>(properties);
}
@Test
public void subscribe() {
KafkaConsumer<String, String> consumer = createConsumer();
consumer.subscribe(Collections.singletonList("test"));
try {
while (true) {
/**
* 根据 poll(time)时间轮训消息,超过后返回到消费的线程。
*/
ConsumerRecords<String, String> records = consumer.poll(10);
for (ConsumerRecord<String, String> record : records) {
System.out.println("topic:" + record.topic() + ",partition:" + record.partition() + ",offset:" + record.offset() + ",key:" + record.key() + ",value" + record.value());
//提交 poll返回的最新一次偏移量
//同步提交offset
//consumer.commitSync();
//异步提交offset
consumer.commitAsync(new OffsetCommitCallback() {
@Override
public void onComplete(Map<TopicPartition, OffsetAndMetadata> map, Exception e) {
if (e != null) {
System.out.println("fail");
} else {
System.out.println("success");
}
}
});
}
}
} catch (Exception e) {
e.printStackTrace();
} finally {
consumer.close();
}
}
取消kafka答应日志:需在日志中将kafka自带的logger去掉。
<logger name="org.apache.kafka" level="off" /> <logger name="org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer" level="off" /> <logger name="org.springframework.kafka.listener.adapter.RecordMessagingMessageListenerAdapter" level="off" />