kafka原生AIP发送消息配置

package com.liuze.learnkafka;

import org.apache.kafka.clients.producer.*;
import org.junit.jupiter.api.Test;

import java.util.Properties;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;

/**
 * @ClassName kafkaTest
 * @Date 2022/4/6 09:59
 * @Version 1.0
 */
public class kafkaTest {
    private static final String KAFKA_TOPIC = "myTopic";
    private static final String KAFKA_TOPIC5 = "myTopic5";

    /**
     * 属性配置
     *
     * @return
     */
    public static Properties getProperties() {
        Properties props = new Properties();
        props.put("bootstrap.servers", "ip:port");
        //props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "ip:port");
        // 当producer向leader发送数据时,可以通过request.required.acks参数来设置数据可靠性的级别,分别是0, 1,all。
        props.put("acks", "all");
        //props.put(ProducerConfig.ACKS_CONFIG, "all");
        // 请求失败,生产者会自动重试,指定是0次,如果启用重试,则会有重复消息的可能性
        props.put("retries", 0);
        //props.put(ProducerConfig.RETRIES_CONFIG, 0);
        // 生产者缓存每个分区未发送的消息,缓存的大小是通过 batch.size 配置指定的,默认值是16KB
        props.put("batch.size", 16384);
        /**
         * 默认值就是0,消息是立刻发送的,即便batch.size缓冲空间还没有满
         * 如果想减少请求的数量,可以设置 linger.ms 大于0,即消息在缓冲区保留的时间,超过设置的值就会被提交到          服务端
         * 通俗解释是,本该早就发出去的消息被迫至少等待了linger.ms时间,相对于这时间内积累了更多消息,批量发送           减少请求
         * 如果batch被填满或者linger.ms达到上限,满足其中一个就会被发送
         */
        props.put("linger.ms", 1);
        /**
         * buffer.memory的用来约束Kafka Producer能够使用的内存缓冲的大小的,默认值32MB。
         * 如果buffer.memory设置的太小,可能导致消息快速的写入内存缓冲里,但Sender线程来不及把消息发送到             Kafka服务器
         * 会造成内存缓冲很快就被写满,而一旦被写满,就会阻塞用户线程,不让继续往Kafka写消息了
         * buffer.memory要大于batch.size,否则会报申请内存不#足的错误,不要超过物理内存,根据实际情况调整
         * 需要结合实际业务情况压测进行配置
         */
        props.put("buffer.memory", 33554432);
        /**
         * key的序列化器,将用户提供的 key和value对象ProducerRecord 进行序列化处理,key.serializer必须被          设置,
         * 即使消息中没有指定key,序列化器必须是一个实
         org.apache.kafka.common.serialization.Serializer接口的类,
         * 将key序列化成字节数组。
         */
        props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        return props;
    }

    /**
     * 发送消息
     */
    @Test
    public void toSend() {
        Properties properties = getProperties();
        Producer<String, String> producer = new KafkaProducer<String, String>(properties);
        for (int i = 0; i < 10; i++) {
            Future<RecordMetadata> send = producer.send(new ProducerRecord<String, String>(KAFKA_TOPIC, "liuze-test" + i, "" + i));
            try {
                //发送状态:myTopic-   2   @ 0
                //        topic  -分区编号@偏移量
                RecordMetadata recordMetadata = send.get();
                System.out.println("发送状态:" + recordMetadata);
            } catch (InterruptedException | ExecutionException e) {
                e.printStackTrace();
            }
        }
        producer.close();
    }

    /**
     * 发送消息回调
     *
     * @return void
     * @Description
     * @date 2022/4/6 10:46
     */
    @Test
    public void toSendCallback() {
        Properties properties = getProperties();
        Producer<String, String> producer = new KafkaProducer<String, String>(properties);
        for (int i = 0; i < 10; i++) {
            Future<RecordMetadata> send = producer.send(new ProducerRecord<String, String>(KAFKA_TOPIC, "liuze-test" + i, "" + i), (recordMetadata, e) -> {
                if (e == null) {
                    System.out.println("发送状态:" + recordMetadata.toString());
                } else {
                    e.printStackTrace();
                }
            });
        }
        producer.close();
    }

    /**
     * 发送指定分区
     *
     * @return void
     * @Description
     * @date 2022/4/6 10:47
     */
    @Test
    public void toSendSpecifyPartition() {
        Producer<String, String> producer = null;
        try {
            Properties properties = getProperties();
            producer = new KafkaProducer<>(properties);
            for (int i = 0; i < 10; i++) {
                producer.send(new ProducerRecord<>(KAFKA_TOPIC5, 3, "myTopic5" + i, "" + i), (metadata, exception) -> {
                    if (exception == null) {
                        System.out.println("发送状态:" + metadata.toString());
                    } else {
                        exception.printStackTrace();
                    }
                });
            }
        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            assert producer != null;
            producer.close();
        }
    }

    /**
     * 自定义分区规则配置
     */
    @Test
    public void toSendSpecifyPartition1() {
        Producer<String, String> producer = null;
        try {
            Properties properties = getProperties();
            //com........... 到配置规则路径下从com开始
            properties.put("partitioner.class", "com...........");
            producer = new KafkaProducer<>(properties);
            for (int i = 0; i < 10; i++) {
                producer.send(new ProducerRecord<>(KAFKA_TOPIC5, "liuze", "" + i), (metadata, exception) -> {
                    if (exception == null) {
                        System.out.println("发送状态:" + metadata.toString());
                    } else {
                        exception.printStackTrace();
                    }
                });
            }
        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            assert producer != null;
            producer.close();
        }
    }
}
package com.liuze.learnkafka.config;

import org.apache.kafka.clients.producer.Partitioner;
import org.apache.kafka.clients.producer.internals.StickyPartitionCache;
import org.apache.kafka.common.Cluster;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.utils.Utils;

import java.util.List;
import java.util.Map;

/**
 * 自定义默认分区规则
 *
 * @ClassName LiuzePartitioner
 * @Date 2022/4/6 11:04
 * @Version 1.0
 */
public class LiuzePartitioner implements Partitioner {
    private final StickyPartitionCache stickyPartitionCache = new StickyPartitionCache();

    @Override
    public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) {
        if (keyBytes == null) {
            return stickyPartitionCache.partition(topic, cluster);
        } else if ("liuze".equals(key)) {
            return 0;
        }
        List<PartitionInfo> partitions = cluster.partitionsForTopic(topic);
        int numPartitions = partitions.size();
        // hash the keyBytes to choose a partition
        return Utils.toPositive(Utils.murmur2(keyBytes)) % numPartitions;
    }

    @Override
    public void close() {

    }

    @Override
    public void configure(Map<String, ?> configs) {

    }
}

pom

<dependency>
    <groupId>org.apache.kafka</groupId>
    <artifactId>kafka-clients</artifactId>
    <version>2.4.0</version>
</dependency>
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值