Kafka学习笔记【三】-JAVA调用示例

1. pom.xml

<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>1.1.0</version>
</dependency>


2. TestProducer.java

package cn.gov.zjport.demo.kafka;

import java.util.Properties;

import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;

public class TestProducer {
public static void main(String[] args) {
Properties props = new Properties();
props.put("bootstrap.servers", "192.168.3.9:9092,192.168.3.10:9092,192.168.3.18:9092");
props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
//-1 所有节点, 0 不等待回应, 1等待一个节点回应
props.put("acks", "1");
//发送失败重试次数
props.put("retries", 1);
//满足以下三个条件之一就批量发送
props.put("batch.size", 16384);
props.put("linger.ms", 1);
props.put("buffer.memory", 33554432);
//压缩算法snappy/gzip, gzip对cpu要求高,性能慢,压缩率好
props.put("compression.codec", "snappy");

int i=0;
Producer<String, String> producer = new KafkaProducer<String, String>(props);
while(true){
try{
Thread.sleep(1000);

boolean sync = true; //是否同步

if (sync) {
try {
producer.send(new ProducerRecord<String, String>("topicA", "Hello")).get();
} catch (Exception e) {
e.printStackTrace();
}
} else {
producer.send(new ProducerRecord<String, String>("topicA", "Hello"));

//producer.send(new ProducerRecord<String, String>(TOPIC, KEY, VALUE));
//异步回调
/*producer.send(new ProducerRecord<String, String>(TIPIC, VALUE), new Callback() {
@Override
public void onCompletion(RecordMetadata metadata, Exception e) {
if (e != null) {
e.printStackTrace();
} else {
System.out.println(metadata.toString());//org.apache.kafka.clients.producer.RecordMetadata@1d89e2b5
System.out.println(metadata.offset());//1
}
}
});*/
}

System.out.println("Success:"+i);
}catch(Exception e){
e.printStackTrace();
}
i++;
}

/*producer.flush();
producer.close();*/
}
}


3. TestConsumer.java

package cn.gov.zjport.demo.kafka;

import java.util.Arrays;
import java.util.Properties;

import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;

public class TestConsumer {

public static void main(String[] args) {
Properties props = new Properties();
props.put("bootstrap.servers", "192.168.3.9:9092,192.168.3.10:9092,192.168.3.18:9092");
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
//相同目的的Consumers可以组成group, 并且group.id相同
props.setProperty("group.id", "TopicA-Consumers");
//关闭自动提交
props.setProperty("enable.auto.commit", "false");
//如果group没有partition的offset记录,则从头开始读取
props.setProperty("auto.offset.reset", "earliest");

Consumer<String, String> consumer = new KafkaConsumer<String, String>(props);
try{
//可预订多个主题
consumer.subscribe(Arrays.asList("topicA"));

while(true) {
ConsumerRecords<String, String> records = consumer.poll(1000);
if(records.count()>0){
for (ConsumerRecord<String, String> record : records) {
System.out.printf("offset = %d, key = %s, value = %s \r\n", record.offset(), record.key(), record.value());
}
//提交记录offset
consumer.commitSync();
}
}
}finally{
consumer.close();
}

}
}
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值