Kafka客户端Java代码使用


引入POM

<dependency>
    <groupId>org.apache.kafka</groupId>
    <artifactId>kafka-clients</artifactId>
    <version>2.5.1</version>
</dependency>

生产者(Producer)代码

在配置生产者的时候详情配置查看ProducerConfig里面有很多的默认配置

package com.study.spark.kafka;

import org.apache.kafka.clients.producer.*;
import org.apache.kafka.common.serialization.StringSerializer;

import java.util.Properties;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;

/**
 * 生产者
 */
public class ProducerStart {

    private static final String brokerList = "192.168.1.1:9092,192.168.1.2:9092";
    private static final String topic = "topic-test";

    private static Properties getKafkaConfig(){

        Properties properties = new Properties();
        //根据发送消息的key类型来设置
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringSerializer");
        //根据发送消息的value类型来设置
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringSerializer");
        //broker,代理服务器地址列表
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,brokerList);
        //生产者客户端ID
        properties.put(ProducerConfig.CLIENT_ID_CONFIG,"product.client.id.test");
        return properties;
    }

    private static void send(){
        KafkaProducer<String,String> producer = new KafkaProducer<>(getKafkaConfig());

        //创建消息记录
        ProducerRecord<String,String> record = new ProducerRecord<>(topic,"Hello World!");
        //异步发送信息 发送消息
        producer.send(record);
        producer.send(record, new Callback() {
            //异步回调消息
            @Override
            public void onCompletion(RecordMetadata recordMetadata, Exception e) {
                System.out.println(recordMetadata.toString());
                e.printStackTrace();
            }
        });
        //同步
        try{
            RecordMetadata recordMetadata = producer.send(record).get();
            System.out.println(recordMetadata.toString());
        }catch (InterruptedException | ExecutionException e){
            e.printStackTrace();
        }
        //关闭生产者
        producer.close();
    }
}


ProducerRecord(生产者消息记录)

public class ProducerRecord<K, V> {
    //主题
    private final String topic;
    //分区数量
    private final Integer partition;
    //消息头部
    private final Headers headers;
    //键
    private final K key;
    //值
    private final V value;
    //消息时间戳
    private final Long timestamp;

RecordMetadata(记录元数据)

在生产者,消息发送成功之后返回

public final class RecordMetadata {
    public static final int UNKNOWN_PARTITION = -1;
    //消息偏移量
    private final long offset;
    //时间戳
    private final long timestamp;
    //序列化Key值长度
    private final int serializedKeySize;
    //序列化Value值长度
    private final int serializedValueSize;
    //主题与分区信息
    private final TopicPartition topicPartition;
    private volatile Long checksum;

消费者(Consumer)代码

一个分区只能分区给一个消费者消费,除非出现消费者异常,否则这个分区的数据其他消费者是不能消费的。但是一个消费者可以消费多个分区。

  1. 判断当前消费者分配到的主体和分区
  2. 判断当前消息被消费到哪里了,并将消息跳转到指定的分区位置
  3. 开始获取消息并开始消费消息
  4. 将消费了的消息Offset提交到服务器上,已拉取下一次消息、消费者异常恢复等情况确定消息的位置。

在配置消费者的时候详情配置查看ConsumerConfig里面有很多的默认配置

package com.study.spark.kafka;

import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.time.Duration;
import java.util.*;

/**
 * 消费者
 */
public class CustomerStart {
    private static final String brokerList = "192.168.1.2:9092";
    private static final String topic = "topic-test";
    private static final String groupId = "group.test";


    private Map<TopicPartition, OffsetAndMetadata> indexMap;
    
    private KafkaConsumer<String,String> consumer;

    private  Properties getKafkaConfig(){
        Properties properties = new Properties();
        //消息key解析 类
        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        //消息value解析 类
        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        //代理服务器地址列表(Broker List)
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,brokerList);
        //消费者组
        properties.put(ConsumerConfig.GROUP_ID_CONFIG,groupId);
        return properties;
    }


    private  void reciver(){
        consumer = new KafkaConsumer<String, String>(getKafkaConfig());
        //订阅主题
        consumer.subscribe(Collections.singletonList(topic));
        //获取分配到的主体与分区信息
        Set<TopicPartition> assignment = new HashSet<>();
        while (assignment.size() == 0){
            consumer.poll(Duration.ofMinutes(1));
            assignment = consumer.assignment();
        }
        //将指定到对应分区中某个Offset
        //1. 通过主体名称和分区获取到 处理过后的  offset
        //2. 在将消息跳转到指定的offset之后消费
        for (TopicPartition topicPartition : assignment){
            long offset = 10; //获取到消息偏移量(offset)
            consumer.seek(topicPartition,offset);
        }
        //从消息头开始消费
        consumer.seekToBeginning(assignment);
        //从消息末尾开始消费,跳过前面的消息
        consumer.seekToEnd(assignment);

        //在拉取消息
        while (true){
            //拉取消息间隔时间
            ConsumerRecords<String,String> records = consumer.poll(Duration.ofMinutes(1));
            //获取当前消费者,消费的主题和分区
            Set<TopicPartition> topicPartitions = records.partitions();

            //根据主题来获取消息
            records.records("topic");
            while (topicPartitions.iterator().hasNext()){
                //根据分区来获取消息
                records.records(topicPartitions.iterator().next());
            }

            //打印获取到的消息列表
            for (ConsumerRecord<String,String> record : records) {
                System.out.println(record.value());
                //处理分区的信息

            }
            //提交分区,异步提交
            consumer.commitAsync();
            consumer.commitAsync(new OffsetCommitCallBackImpl();
        }


    }
    
    public class OffsetCommitCallBackImpl implements OffsetCommitCallback{
        @Override
        public void onComplete(Map<TopicPartition, OffsetAndMetadata> map, Exception e) {
            //判断是否有异常
            if(e != null){
                System.out.println("提交成功!");
            }else {
                //在提交的时候,要判断我们已经提交的Offset是否大于,发生异常的提交
                //如果是:那么就不需要在提交,否则重新提交。
                System.out.println("提交Offset失败!,需要重新提交!");
                Iterator<Map.Entry<TopicPartition, OffsetAndMetadata>> iterator = map.entrySet().iterator();
                //需要重新发送的Offset
                Map<TopicPartition, OffsetAndMetadata> sendMap = new HashMap<>();
                while (iterator.hasNext()){
                    Map.Entry<TopicPartition, OffsetAndMetadata> entry =  iterator.next();
                    OffsetAndMetadata indexOffset = indexMap.get(entry.getKey());
                    if(indexOffset != null && indexOffset.offset() <= entry.getValue().offset()){
                       sendMap.put(entry.getKey(),entry.getValue());
                    }
                }
                //在这里可以添加,线程睡眠时间,防止过快的发送提交Offset
                if(!sendMap.isEmpty()){
                    consumer.commitAsync(sendMap,new OffsetCommitCallBackImpl());
                }
            }
        }
    }
}

ConsumerRecords(消费者记录)

public class ConsumerRecord<K, V> {
    public static final long NO_TIMESTAMP = -1L;
    public static final int NULL_SIZE = -1;
    public static final int NULL_CHECKSUM = -1;
    //主题
    private final String topic;
    //分区
    private final int partition;
    //消息偏移量
    private final long offset;
    //时间戳
    private final long timestamp;
    private final TimestampType timestampType;
    //序列化后Key值大小
    private final int serializedKeySize;
    //序列化后Value值大小
    private final int serializedValueSize;
    //头部信息
    private final Headers headers;
    //键
    private final K key;
    //值
    private final V value;
    private final Optional<Integer> leaderEpoch;
    private volatile Long checksum;

Partitioner(分区器)

生产者发送消息使用,如果消息没有指定partitions值,那么就采用分区器,根据key这个字段来计算partition的值,分区器的作用是为消息分配分区。

ProducerConfig代码中默认分区 DefaultPartitioner

  static {
        CONFIG = (new ConfigDef())..define("partitioner.class", Type.CLASS, DefaultPartitioner.class, Importance.MEDIUM, "Partitioner class that implements the <code>org.apache.kafka.clients.producer.Partitioner</code> interface.");
    }
    

DefaultPartitioner(默认分区器)
如果Key不为null,则采用key进行哈希(murmur2算法),计算出分区。
如果key为null,消息将会轮询的方式发往主题内的各个可用分区。

public class DefaultPartitioner implements Partitioner {
    private final StickyPartitionCache stickyPartitionCache = new StickyPartitionCache();

    //获取当前的分区
    public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) {
        if (keyBytes == null) {
            return this.stickyPartitionCache.partition(topic, cluster);
        } else {
            List<PartitionInfo> partitions = cluster.partitionsForTopic(topic);
            int numPartitions = partitions.size();
            return Utils.toPositive(Utils.murmur2(keyBytes)) % numPartitions;
        }
    }
    //当有新的批 生成的分区
     public void onNewBatch(String topic, Cluster cluster, int prevPartition) {
        this.stickyPartitionCache.nextPartition(topic, cluster, prevPartition);
    }

ProducerInterceptor(生产者拦截器)

可以自定义实现拦截器方法,用来统一修改、添加、统计等,对发送消息进行处理

public interface ProducerInterceptor<K, V> extends Configurable {
   //这个是在发送消息之前的拦截
    ProducerRecord<K, V> onSend(ProducerRecord<K, V> var1);
  //这个是在消息回调或应道之前的拦截
    void onAcknowledgement(RecordMetadata var1, Exception var2);

    void close();
}

基于Krb认证

Java代码配置信息,在生成者、消费者、主题管理的配置信息,添加如下配置,即可以实现Krb认证

//设置安全认证协议
properties.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name);
//配置JAAS配置文件
System.setProperty(JaasUtils.JAVA_LOGIN_CONFIG_PARAM,"/cib/f021l0/jaas.config");
 //设置krb认证配置信息
 System.setProperty("java.security.krb5.config","/cib/f021l0/krb5.conf");

jaas.config配置信息

KafkaClient {
   com.sun.security.auth.module.Krb5LoginModule  required
   useKeyTab=true
   storeKey=true
   keyTab="/cib/f021l0/f021l0.keytab"
   principal="f021l0@BDBP.CIB"
}
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值