Kafka API实战

Kafka API实战

依赖

<!-- https://mvnrepository.com/artifact/org.apache.kafka/kafka-clients -->
<dependency>
  <groupId>org.apache.kafka</groupId>
  <artifactId>kafka-clients</artifactId>
  <version>2.2.0</version>
</dependency>

<!-- https://mvnrepository.com/artifact/org.apache.kafka/kafka-clients -->
<dependency>
  <groupId>org.apache.kafka</groupId>
  <artifactId>kafka-clients</artifactId>
  <version>2.2.0</version>
</dependency>

<!-- https://mvnrepository.com/artifact/log4j/log4j -->
<dependency>
  <groupId>log4j</groupId>
  <artifactId>log4j</artifactId>
  <version>1.2.17</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.slf4j/slf4j-api -->
<dependency>
  <groupId>org.slf4j</groupId>
  <artifactId>slf4j-api</artifactId>
  <version>1.7.25</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.slf4j/slf4j-log4j12 -->
<dependency>
  <groupId>org.slf4j</groupId>
  <artifactId>slf4j-log4j12</artifactId>
  <version>1.7.25</version>
</dependency>

log4j配置

log4j.rootLogger = info,console

log4j.appender.console = org.apache.log4j.ConsoleAppender
log4j.appender.console.Target = System.out
log4j.appender.console.layout = org.apache.log4j.PatternLayout
log4j.appender.console.layout.ConversionPattern =  %p %d{yyyy-MM-dd HH:mm:ss} %c - %m%n

消费者

//1.创建Kafka链接参数
Properties props=new Properties();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"CentOSA:9092,CentOSB:9092,CentOSC:9092");
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class.getName());
props.put(ConsumerConfig.GROUP_ID_CONFIG,"group01");

//2.创建Topic消费者
KafkaConsumer<String,String> consumer=new KafkaConsumer<String, String>(props);
//3.订阅topic开头的消息队列
consumer.subscribe(Pattern.compile("^topic.*$"));

//4.迭代遍历消息队列的数据
while (true){
  ConsumerRecords<String, String> consumerRecords = consumer.poll(Duration.ofSeconds(1));
  Iterator<ConsumerRecord<String, String>> recordIterator = consumerRecords.iterator();
  while (recordIterator.hasNext()){
    ConsumerRecord<String, String> record = recordIterator.next();
    String key = record.key();
    String value = record.value();
    long offset = record.offset();
    int partition = record.partition();
  }
}

生产者

//1.创建链接参数
Properties props=new Properties();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"CentOSA:9092,CentOSB:9092,CentOSC:9092");
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,StringSerializer.class.getName());

//2.创建生产者
KafkaProducer<String,String> producer=new KafkaProducer<String, String>(props);

//3.封账消息队列
for(Integer i=0;i< 10;i++){
  ProducerRecord<String, String> record = 
    														new ProducerRecord<>("topic01", "key" + i, "value" + i);
  producer.send(record);
}

producer.close();

序列化

反序列化

public class ObjectDeserializer implements Deserializer<Object> {
    @Override
    public void configure(Map<String, ?> configs, boolean isKey) {
        System.out.println("configure");
    }

    @Override
    public Object deserialize(String topic, byte[] data) {
        return SerializationUtils.deserialize(data);
    }

    @Override
    public void close() {
        System.out.println("close");
    }
}

序列化

public class ObjectSerializer implements Serializer<Object> {

    @Override
    public void configure(Map<String, ?> configs, boolean isKey) {
        System.out.println("configure");
    }

    @Override
    public byte[] serialize(String topic, Object data) {
        return SerializationUtils.serialize((Serializable) data);
    }

    @Override
    public void close() {
        System.out.println("close");
    }
}

生产者

//1.创建链接参数
Properties props=new Properties();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"CentOSA:9092,CentOSB:9092,CentOSC:9092");
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,ObjectSerializer.class.getName());

//2.创建生产者
KafkaProducer<String,User> producer=new KafkaProducer<String, User>(props);

//3.封装消息队列
for(Integer i=0;i< 10;i++){
  ProducerRecord<String, User> record = new ProducerRecord<>("topic01", "key"+i,new User(i,"user"+i,new Date()));
  producer.send(record);
}
producer.close();

消费者

//1.创建Kafka链接参数
Properties props=new Properties();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"CentOSA:9092,CentOSB:9092,CentOSC:9092");
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,ObjectDeserializer.class.getName());
props.put(ConsumerConfig.GROUP_ID_CONFIG,"group01");

//2.创建Topic消费者
KafkaConsumer<String,User> consumer=new KafkaConsumer<String, User>(props);
//3.订阅topic开头的消息队列
consumer.subscribe(Pattern.compile("^topic.*$"));

while (true){
  ConsumerRecords<String, User> consumerRecords = consumer.poll(Duration.ofSeconds(1));
  Iterator<ConsumerRecord<String, User>> recordIterator = consumerRecords.iterator();
  while (recordIterator.hasNext()){
    ConsumerRecord<String, User> record = recordIterator.next();
    String key = record.key();
    User value = record.value();
    long offset = record.offset();
    int partition = record.partition();
    System.out.println("key:"+key+",value:"+value+",partition:"+partition+",offset:"+offset);
  }
}

自定义分区

public class UserDefinePartitioner  implements Partitioner {
    private AtomicInteger atomicInteger=new AtomicInteger(0);
    @Override
    public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) {
        int numPartitions = cluster.partitionsForTopic(topic).size();
        if(keyBytes==null || keyBytes.length==0){
            return atomicInteger.addAndGet(1) & Integer.MAX_VALUE% numPartitions;
        } else {
            return Utils.toPositive(Utils.murmur2(keyBytes)) % numPartitions;
        }
    }

    @Override
    public void close() {
        System.out.println("close");
    }

    @Override
    public void configure(Map<String, ?> configs) {
        System.out.println("configure");
    }
}
//1.创建链接参数
Properties props=new Properties();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"CentOSA:9092,CentOSB:9092,CentOSC:9092");
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,StringSerializer.class.getName());
props.put(ProducerConfig.PARTITIONER_CLASS_CONFIG,UserDefinePartitioner.class.getName());

//2.创建生产者
KafkaProducer<String,String> producer=new KafkaProducer<String, String>(props);

//3.封账消息队列
for(Integer i=0;i< 10;i++){
  ProducerRecord<String, String> record = new ProducerRecord<>("topic01",  "value" + i);
  producer.send(record);
}

producer.close();

拦截器

public class UserDefineProducerInterceptor implements ProducerInterceptor {
  @Override
  public ProducerRecord onSend(ProducerRecord record) {
    ProducerRecord wrapRecord = new ProducerRecord(record.topic(), record.key(), record.value());
    wrapRecord.headers().add("user","baizhi".getBytes());
    return wrapRecord;
  }

  @Override
  public void onAcknowledgement(RecordMetadata metadata, Exception exception) {
    System.out.println("metadata:"+metadata+",exception:"+exception);
  }

  @Override
  public void close() {
    System.out.println("close");
  }

  @Override
  public void configure(Map<String, ?> configs) {
    System.out.println("configure");
  }
}
//1.创建链接参数
Properties props=new Properties();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"CentOSA:9092,CentOSB:9092,CentOSC:9092");
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,StringSerializer.class.getName());
props.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG,UserDefineProducerInterceptor.class.getName());

//2.创建生产者
KafkaProducer<String,String> producer=new KafkaProducer<String, String>(props);

//3.封账消息队列
for(Integer i=0;i< 10;i++){
  ProducerRecord<String, String> record = new ProducerRecord<>("topic01", "key" + i, "value" + i);
  producer.send(record);
}

producer.close();

Offset控制

//1.创建Kafka链接参数
Properties props=new Properties();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"CentOSA:9092,CentOSB:9092,CentOSC:9092");
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class.getName());
props.put(ConsumerConfig.GROUP_ID_CONFIG,"group01");
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,false);

//2.创建Topic消费者
KafkaConsumer<String,String> consumer=new KafkaConsumer<String, String>(props);
//3.订阅topic开头的消息队列
consumer.subscribe(Pattern.compile("^topic.*$"));

while (true){
  ConsumerRecords<String, String> consumerRecords = consumer.poll(Duration.ofSeconds(1));
  Iterator<ConsumerRecord<String, String>> recordIterator = consumerRecords.iterator();
  while (recordIterator.hasNext()){
    ConsumerRecord<String, String> record = recordIterator.next();
    String key = record.key();
    String value = record.value();
    long offset = record.offset();
    int partition = record.partition();
    Map<TopicPartition, OffsetAndMetadata> offsets=new HashMap<TopicPartition, OffsetAndMetadata>();

    offsets.put(new TopicPartition(record.topic(),partition),new OffsetAndMetadata(offset));
    consumer.commitAsync(offsets, new OffsetCommitCallback() {
      @Override
      public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception exception) {
        System.out.println("完成:"+offset+"提交!");
      }
    });
    System.out.println("key:"+key+",value:"+value+",partition:"+partition+",offset:"+offset);

  }
}


### Acks&Retries

Kafka生产者在发送完一个的消息之后,要求Broker在规定的额时间内应答,如果没有在规定时间内应答,Kafka生产者会尝试n次重新发送消息。

![image-20191109120918615](assets/image-20191109120918615.png)

如果重试N<=n次成功则认定此消息发送成功,如果N>n次依然失败,则认定本次发送失败,向上层跑出异常。开启重试虽然增强了可靠性,但是可能会导致服务器端存储重复消息。

```java
//1.创建链接参数
Properties props=new Properties();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"CentOSA:9092,CentOSB:9092,CentOSC:9092");
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,StringSerializer.class.getName());
props.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG,UserDefineProducerInterceptor.class.getName());
props.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG,1);//延时
props.put(ProducerConfig.ACKS_CONFIG,"-1");//-1全部应答,1,0三种
props.put(ProducerConfig.RETRIES_CONFIG,10);//重试次数

//2.创建生产者
KafkaProducer<String,String> producer=new KafkaProducer<String, String>(props);

//3.封账消息队列
for(Integer i=0;i< 1;i++){
  ProducerRecord<String, String> record = new ProducerRecord<>("topic01", "key" + i, "value" + i);
  producer.send(record);
}
producer.close();

幂等性

HTTP/1.1中对幂等性的定义是:一次和多次请求某一个资源对于资源本身应该具有同样的结果(网络超时等问题除外)。也就是说,其任意多次执行对资源本身所产生的影响均与一次执行的影响相同。

Methods can also have the property of “idempotence” in that (aside from error or expiration issues) the side-effects of N > 0 identical requests is the same as for a single request.

Kafka在0.11.0.0版本支持增加了对幂等的支持。幂等是针对生产者角度的特性。幂等可以保证上生产者发送的消息,不会丢失,而且不会重复。实现幂等的关键点就是服务端可以区分请求是否重复,过滤掉重复的请求。要区分请求是否重复的有两点:

唯一标识:要想区分请求是否重复,请求中就得有唯一标识。例如支付请求中,订单号就是唯一标识

记录下已处理过的请求标识:光有唯一标识还不够,还需要记录下那些请求是已经处理过的,这样当收到新的请求时,用新请求中的标识和处理记录进行比较,如果处理记录中有相同的标识,说明是重复交易,拒绝掉。

Kafka可能存在多个生产者,会同时产生消息,但对Kafka来说,只需要保证每个生产者内部的消息幂等就可以了,所有引入了PID来标识不同的生产者。

对于Kafka来说,要解决的是生产者发送消息的幂等问题。也即需要区分每条消息是否重复。
Kafka通过为每条消息增加一个Sequence Numbler,通过Sequence Numbler来区分每条消息。每条消息对应一个分区,不同的分区产生的消息不可能重复。所有Sequence Numbler对应每个分区

Broker端在缓存中保存了这seq number,对于接收的每条消息,如果其序号比Broker缓存中序号大于1则接受它,否则将其丢弃。

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-e0l56fHR-1586943094135)(assets/image-20191109121427520.png)]

//1.创建链接参数
Properties props=new Properties();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"CentOSA:9092,CentOSB:9092,CentOSC:9092");
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,StringSerializer.class.getName());
props.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG,UserDefineProducerInterceptor.class.getName());
props.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG,1);
props.put(ProducerConfig.ACKS_CONFIG,"-1");
props.put(ProducerConfig.RETRIES_CONFIG,3);
props.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG,true);//开启幂等性

//2.创建生产者
KafkaProducer<String,String> producer=new KafkaProducer<String, String>(props);

//3.封账消息队列
for(Integer i=0;i< 1;i++){
ProducerRecord<String, String> record = new ProducerRecord<>("topic01", "key" + i, "value" + i);
producer.send(record);
}

producer.close();

事务控制

生产者Only

//1.创建链接参数
Properties props=new Properties();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"CentOSA:9092,CentOSB:9092,CentOSC:9092");
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,StringSerializer.class.getName());
props.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG,"transaction-id");

//2.创建生产者
KafkaProducer<String,String> producer=new KafkaProducer<String, String>(props);
producer.initTransactions();//初始化事务

try{
  producer.beginTransaction();//开启事务
  //3.封账消息队列
  for(Integer i=0;i< 10;i++){
    Thread.sleep(10000);
    ProducerRecord<String, String> record = new ProducerRecord<>("topic01", "key" + i, "value" + i);
    producer.send(record);
  }
  producer.commitTransaction();//提交事务
}catch (Exception e){
  producer.abortTransaction();//终止事务
}

producer.close();
}

消费者&生产者

//1.生产者&消费者
KafkaProducer<String,String> producer=buildKafkaProducer();
KafkaConsumer<String, String> consumer = buildKafkaConsumer("group01");

consumer.subscribe(Arrays.asList("topic01"));
producer.initTransactions();//初始化事务

try{
  while(true){
    ConsumerRecords<String, String> consumerRecords = consumer.poll(Duration.ofSeconds(1));
    Iterator<ConsumerRecord<String, String>> consumerRecordIterator = consumerRecords.iterator();
    //开启事务控制
    producer.beginTransaction();
    Map<TopicPartition, OffsetAndMetadata> offsets=new HashMap<TopicPartition, OffsetAndMetadata>();
    while (consumerRecordIterator.hasNext()){
      ConsumerRecord<String, String> record = consumerRecordIterator.next();
      //创建Record
      ProducerRecord<String,String> producerRecord=new ProducerRecord<String,String>("topic02",record.key(),record.value());
      producer.send(producerRecord);
      //记录元数据
      offsets.put(new TopicPartition(record.topic(),record.partition()),new OffsetAndMetadata(record.offset()+1));
    }
    //提交事务
    producer.sendOffsetsToTransaction(offsets,"group01");
    producer.commitTransaction();
  }
}catch (Exception e){
  producer.abortTransaction();//终止事务
}finally {
  producer.close();
}
public static KafkaProducer<String,String> buildKafkaProducer(){
  Properties props=new Properties();
  props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"CentOSA:9092,CentOSB:9092,CentOSC:9092");
  props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
  props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,StringSerializer.class.getName());
  props.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG,"transaction-id");
  return new KafkaProducer<String, String>(props);
}
public static KafkaConsumer<String,String> buildKafkaConsumer(String group){
  Properties props=new Properties();
  props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"CentOSA:9092,CentOSB:9092,CentOSC:9092");
  props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
  props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class.getName());
  props.put(ConsumerConfig.GROUP_ID_CONFIG,group);
  props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,false);
  props.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG,"read_committed");

  return new KafkaConsumer<String, String>(props);
}
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值