(三) Kafka的生产者消费者Api使用以及参数配置详解

  • 生产者代码

public class HelloKafkaProducer {
    public static void main(String[] args) throws ExecutionException, InterruptedException {
        Properties properties = new Properties();
        /*必要参数*/
        properties.put("bootstrap.servers", "s227:9092,s228:9092,s229:9092");
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        KafkaProducer<String, String> producer = new KafkaProducer<>(properties);
        CountDownLatch countDownLatch = new CountDownLatch(10000);
        for (int i = 0; i < 10000; i++) {
            ProducerRecord<String, String> record = new ProducerRecord<>("test01", "test_key" + i, "test_value" + i);
//            Future<RecordMetadata> future = producer.send(record);
//            RecordMetadata recordMetadata = future.get();
//            if (null != recordMetadata) {
//                System.out.println(recordMetadata.offset() + "=>" + recordMetadata.partition());
//            }
            producer.send(record, new Callback() {
                @Override
                public void onCompletion(RecordMetadata recordMetadata, Exception exception) {
                    if (null != exception) {
                        exception.printStackTrace();
                    }
                    if (recordMetadata != null) {
                        System.out.println(recordMetadata.offset() + "=>" + recordMetadata.partition());
                    }
                    countDownLatch.countDown();
                }
            });
        }
        countDownLatch.await();
        System.out.println("消息已经发送!");
        producer.close();
    }
}
  • 消费者代码

public class HelloKafkaConsumer {
    public static void main(String[] args) {
        Properties properties = new Properties();
        /*必要参数*/
        properties.put("bootstrap.servers", "s227:9092,s228:9092,s229:9092");
        properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.put("group.id", "test01_group");
        KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(properties);
        String topic = "test01";
        consumer.subscribe(Collections.singleton(topic));

        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(500);//500毫秒拉取一次数据
            records.forEach(record -> {
                System.out.println("分区:" + record.partition());
                System.out.println("消费的分区偏移量:" + record.offset());
                System.out.println("key:" + record.key());
                System.out.println("value:" + record.value());
                System.out.println("===========================================");
            });
        }

    }
}
  • 多线程模式下生产者代码

import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.serialization.StringSerializer;

import java.util.Properties;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;

/**
 * 〈功能简述〉<br> 
 * 〈〉
 *
 * @author 张晓文
 * @create 2019/5/8
 * @since 1.0.0
 */
public class KafkaConcurrentProducer {
    private static ExecutorService executorService = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors());
    private static CountDownLatch countDownLatch = new CountDownLatch(1000);

    private static DemoUser makeUser(int id) {
        DemoUser demoUser = new DemoUser(id);
        demoUser.setName("xiaowen" + id);
        return demoUser;
    }

    public static void main(String[] args) throws InterruptedException {
        Properties properties = new Properties();
        properties.put("bootstrap.servers", "s227:9092,s228:9092,s229:9092");
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");

//        ProducerConfig.ACKS_CONFIG


        //thread safe class
        KafkaProducer<String, String> producer = new KafkaProducer<>(properties);
        for (int i = 0; i < 1000; i++) {
            DemoUser demoUser = makeUser(i);
            ProducerRecord<String, String> record = new ProducerRecord<>(
                    "test01",
                    null,
                    System.currentTimeMillis(),
                    demoUser.getId() + "",
                    demoUser.toString());
            ProducerWorker worker = new ProducerWorker(record, producer);
            executorService.submit(worker);
        }
        countDownLatch.await();
        executorService.shutdown();
    }

    private static class ProducerWorker implements Runnable {
        private ProducerRecord<String, String> record;
        private KafkaProducer<String, String> producer;

        public ProducerWorker(ProducerRecord<String, String> record, KafkaProducer<String, String> producer) {
            this.record = record;
            this.producer = producer;
        }

        @Override
        public void run() {
            String id = Thread.currentThread().getId() + "-" + System.identityHashCode(producer);
            producer.send(record, (recordMetadata, exception) -> {
                if (null != exception) {
                    exception.printStackTrace();
                }
                if (recordMetadata != null) {
                    System.out.println("偏移量:" + recordMetadata.offset() + "=>分区:" + recordMetadata.partition());
                }
                System.out.println(id + ":[数据-" + record + "-]已经发送完成");
                countDownLatch.countDown();
            });
        }
    }


    public static class DemoUser {
        private int id;
        private String name;

        @Override
        public String toString() {
            return "DemoUser{" +
                    "id=" + id +
                    ", name='" + name + '\'' +
                    '}';
        }

        public String getName() {
            return name;
        }

        public void setName(String name) {
            this.name = name;
        }

        public DemoUser(int id) {
            this.id = id;
        }

        public int getId() {
            return id;
        }

        public void setId(int id) {
            this.id = id;
        }
    }

}

 

  • Kafka生产者参数

1.acks 必须要多少个分区副本收到了消息

   0       1 (默认)       all 可靠性最高,延迟高

 2.buffer.memory 生产者缓冲区大小 (生产太快会导致阻塞或抛出异常)

   32M(默认)

3.max.block.ms  获取kafka元数据(分区,offset)的信息 等待最长时间

   默认60000ms 60秒

4.retries  生产者重试次数  消息发送失败最大尝试次数

  默认为0 不重试 
5.retry.backoff.ms  和4搭配使用

  上一次重发和下次重发间隔时间100ms(默认)

6.batch.size 消息占用内存大小

  16k(默认)

7.linger.ms  指明发送消息batchSize的时间 和6组合起作用 

   默认为0 (来一条发一条)

   6和7搭配使用 哪个先到就发送

8.compression.type 

   none(默认),gzip,snappy

9.client.id

   标识符 设置任意字符串,用于做消息追踪 broker查看是从哪个生产者发送来的消息

10.max.in.flight.requests.per.connection

   默认为1

   生产者等到broker给响应才会继续生产 

11.max.request.size

  控制生产者生产消息的最大大小

  1M (默认) 1个请求 一个批次的消息的大小      和server.properties的message.max.bytes最好一样

  • 0
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值