Java api对kafka读写消息实例

Java 调用 kafka (kafka版本:kafka_2.11-0.11.0.1)的实例:

测试结论:

1、consumer.assign()手动分配partition,消费者的group.id不起作用,比较灵活,可以指定一个消费者监听不同topic的不同partition。

consumer1 —》 (topic1.partition2, topic2.partition3, topic0.partition0)

consumer0 —》 (topic1.partition1, topic2.partition1, topic0.partition3)

2、consumer.subscribe()自动分配partition, 指定消费者的group.id。

当不同消费者在不同的group.id中时,按照kafka的默认配置,只有一个kafka组才能消费到消息。

当不同消费者处于相同的group.id中时,按照kafka的默认配置,不同的消费者会被分配到零个或多个partition。

 

生产者代码:

public class Producer {

    private final KafkaProducer<String, String> producer;

    public final static String TOPIC = "jiqimao";

    private Producer() {
        producer = KafkaAdminUtils.createProducer();
    }

    public void produce() {
        int messageNo = 0;
        final int messageNum = 9;

        while(messageNo < messageNum) {
            String key = String.valueOf(messageNo);
            String data = String.format("%s, message %s.", LocalTime.now(), key);

            try {
                producer.send(new ProducerRecord(TOPIC, messageNo%3, messageNo, data), new Callback() {
                    @Override
                    public void onCompletion(RecordMetadata recordMetadata, Exception e) {
                        System.out.println("回调信息 --> offset = " + recordMetadata.offset() + ", partition = " + recordMetadata.partition());
                    }
                });
            } catch (Exception e) {
                e.printStackTrace();
            }

            messageNo++;
        }

        producer.close();
    }

    public static void main(String[] args) {
        KafkaAdminUtils.init();

        new Producer().produce();
    }
}

 

消费者代码:

public class Consumer implements Runnable {

    private final int No;
    private final KafkaConsumer consumer;

    public final static List<String> TOPIC = Arrays.asList("jiqimao");

    private Consumer(int no){
        consumer = KafkaAdminUtils.createConsumer(TOPIC, "group-"+no, "group-1", no);
        No = no;
    }

    @Override
    public void run(){
        this.consume();
    }

    public void consume(){
        while(true) {
            ConsumerRecords<String, String> records = consumer.poll(1000);
            System.out.printf("Consumer %d start comsume.\n", No);
            for (ConsumerRecord<String, String> record : records) {
                System.out.println("consumer: " + No + ", topic = " + record.topic() + ", partition = " + record.partition() + ", offset = " + record.offset() + ", value = " + record.value());
            }
        }
    }

    public static void main(String[] args){
        KafkaAdminUtils.init();

//        new Consumer(0).consume();
//        new Consumer(1).consume();
//        new Consumer(2).consume();

        new Thread(new Consumer(0)).start();
        new Thread(new Consumer(1)).start();
//        new Thread(new Consumer(2)).start();

//        Consumer consumer1 = new Consumer(0);
//        Consumer consumer2 = new Consumer(1);
//        Consumer consumer3 = new Consumer("three");

//        consumer1.consume();
//        consumer2.consume();
//        consumer3.consume();

//        new Thread(consumer1).start();
//        new Thread(consumer2).start();
//        new Thread(consumer3).start();

//        try{
//            Thread.sleep(1000);
//        }catch (InterruptedException iE){
//            iE.printStackTrace();
//        }
    }
}

 

Kafka工具类:

public class KafkaAdminUtils {

    private static final String topic = "jiqimao";

    private static AdminClient admin;

    public static void initClient(){
        if (admin == null) {
            Properties props = new Properties();
            props.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
            admin = AdminClient.create(props);
        }
    }

    public static void init(){
        try {
            initClient();
//            listTopics();
//            deleteTopics(Arrays.asList(topic));
//            listTopics();
//            createTopic(Arrays.asList(topic));
            listTopics();
            showTopicsDetail(Arrays.asList(topic));
        }catch (Exception e){
            e.printStackTrace();
        }
    }

    public static KafkaProducer createProducer(){
        Properties properties = new Properties();
        properties.put("bootstrap.servers", "localhost:9092");//xxx服务器ip
        properties.put("acks", "all");//所有follower都响应了才认为消息提交成功,即"committed"
        properties.put("retries", 0);//retries = MAX 无限重试,直到你意识到出现了问题:)
        properties.put("batch.size", 16384);//producer将试图批处理消息记录,以减少请求次数.默认的批量处理消息字节数
        //batch.size当批量的数据大小达到设定值后,就会立即发送,不顾下面的linger.ms
        properties.put("linger.ms", 1);//延迟1ms发送,这项设置将通过增加小的延迟来完成--即,不是立即发送一条记录,producer将会等待给定的延迟时间以允许其他消息记录发送,这些消息记录可以批量处理
        properties.put("buffer.memory", 33554432);//producer可以用来缓存数据的内存大小。
        properties.put("key.serializer",
                "org.apache.kafka.common.serialization.IntegerSerializer");
        properties.put("value.serializer",
                "org.apache.kafka.common.serialization.StringSerializer");

        KafkaProducer<String, String> producer = new KafkaProducer<String, String>(properties);
        return producer;
    }

    public static KafkaConsumer createConsumer(List<String> topics, String groupName, String groupId, int partition){
        Properties properties = new Properties();
        properties.put("bootstrap.servers", "localhost:9092");//xxx是服务器集群的ip
        properties.put("group.id", groupId);
        properties.put("group.name", groupName);
        properties.put("enable.auto.commit", "true");
        properties.put("auto.commit.interval.ms", "1000");
        properties.put("auto.offset.reset", "latest");
        properties.put("session.timeout.ms", "30000");
        properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties);
        consumer.subscribe(Arrays.asList(topic));                                   // 自动分配partition
//        consumer.assign(Arrays.asList(new TopicPartition(topic, partition)));     // 手动分配partition
        return consumer;
    }

    public static void createTopic(List<String> topics){
        List<NewTopic> newTopicList = new ArrayList<>();
        topics.forEach(topic -> {
            NewTopic newTopic = new NewTopic(topic,3, (short)1);
            newTopicList.add(newTopic);
        });
        admin.createTopics(newTopicList);
    }

    public static void deleteTopics(List<String> topics) throws InterruptedException, ExecutionException, TimeoutException {
        admin.deleteTopics(topics).all().get(10, TimeUnit.SECONDS);
    }

    public static Collection<String> listTopics() throws InterruptedException, ExecutionException {
        Collection list = admin.listTopics().listings().get().stream().map(TopicListing::name).collect(Collectors.toList());
        System.out.println(list);
        return list;
    }

    public static void showTopicsDetail(List<String> topics) throws InterruptedException, ExecutionException {
        admin.describeTopics(topics).all().get().forEach((topic, description) -> {
            System.out.println("==== Topic " + topic + " Begin ====");
            for (TopicPartitionInfo partition : description.partitions()) {
                System.out.println(partition.toString());
            }
            System.out.println("==== Topic " + topic + " End ====");
        });
    }
}
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值