手撸kafka producer生产者的分区器(partition)API

简介:本篇博客是对kafka produce 生产者分区器的API(Java)
包含以下内容:分区使用原则,分区器使用原则,分区器相关代码编写及pom.xml配置文件编写,到最后的运行结果。

使用kafka producer分区器的好处:

1、方便在集群中扩展
2、可以提高并发性

分区原则

在这里插入图片描述

1、 指明 partition 的情况下,直接将指明的值直接作为 partiton 值;
2、没有指明 partition 值但有 key 的情况下,将 key 的 hash 值与 topic 的 partition 数进行取余得到 partition 值;
3、 既没有 partition 值又没有 key 值的情况下, kafka采用Sticky Partition(黏性分区器),会随机选择一个分区,并尽可能一直使用该分区,待该分区的batch已满或者已完成,kafka再随机一个分区进行使用.(以前是一条条的轮询,现在是一批次的轮询)

分区器API

相关配置

1、创建kafka api的maven项目
2、给pom.xml配置成如下样子:

<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
    <modelVersion>4.0.0</modelVersion>

    <groupId>org.example</groupId>
    <artifactId>kafkaDemmo</artifactId>
    <version>1.0-SNAPSHOT</version>

    <properties>
        <maven.compiler.source>8</maven.compiler.source>
        <maven.compiler.target>8</maven.compiler.target>
    </properties>

    <dependencies>
        <dependency>
            <groupId>org.apache.kafka</groupId>
            <artifactId>kafka-clients</artifactId>
            <version>2.4.1</version>
        </dependency>

        <dependency>
            <groupId>org.slf4j</groupId>
            <artifactId>slf4j-nop</artifactId>
            <version>1.7.2</version>
        </dependency>

    </dependencies>
    
</project>

3、在main下的resources中添加log4j.properties文件,内容如下:

log4j.rootLogger=INFO, stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=%d %p [%c] - %m%n
log4j.appender.logfile=org.apache.log4j.FileAppender
log4j.appender.logfile.File=target/spring.log
log4j.appender.logfile.layout=org.apache.log4j.PatternLayout
log4j.appender.logfile.layout.ConversionPattern=%d %p [%c] - %m%n

代码编写

消费者代码编写

代码内容如下:

package com.lqs.kafka.consumer;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;

import java.time.Duration;
import java.util.ArrayList;
import java.util.Properties;

/**
 * @author qingSong liu
 * @version 1.0
 * @time 2021/12/28 21:58
 */

public class CustomConsumerDemo {

    public static void main(String[] args) {

        Properties properties = new Properties();

        //给消费者配置对象添加参数
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "bdc112:9092");

        //配置序列化,必须要配置
        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");

        //配置消费者组对象,这也是必须要配置的
        properties.put(ConsumerConfig.GROUP_ID_CONFIG, "demo");

        //创建消费者对象
        KafkaConsumer<String, String> stringStringKafkaConsumer = new KafkaConsumer<>(properties);

        //注册主题
        ArrayList<String> strings = new ArrayList<>();
        strings.add("first01");
        stringStringKafkaConsumer.subscribe(strings);

        //拉取数据打印
        while (true) {
            ConsumerRecords<String, String> consumerRecords = stringStringKafkaConsumer.poll(Duration.ofSeconds(1));

            for (ConsumerRecord<String, String> consumerRecord : consumerRecords) {
                System.out.println(consumerRecord);
            }
        }
    }

}

自定义partition代码

package com.lqs.kafka.partitioner;

import org.apache.kafka.clients.producer.Partitioner;
import org.apache.kafka.common.Cluster;

import java.util.Map;

/**
 * @author qingSong liu
 * @version 1.0
 * @time 2021/12/28 20:12
 * <p>
 * 这是一个自定义的分区器
 * 1、实现接口Partitioner
 * 2、实现3个方法:Partitioner,close,configure
 * 3、编写Partition方法,返回分区号
 */

public class PartitionerDemo implements Partitioner {

    /**
     * 返回对应信息的分区号的方法
     *
     * @param topic      主题
     * @param key        消息key
     * @param keyBytes   消息的key序列化后的字节数组
     * @param value      消息的value
     * @param valueBytes 消息的value序列化后的字节数组
     * @param cluster    集群元数据可以查看分区信息
     * @return 设置好后的分区
     */
    @Override
    public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) {
        //获取消息
        String s1 = value.toString();
        //创建partition
        int partition;
        //判断消息是否包含lqs
        if (s1.contains("lqs")) {
            partition = 0;
            //判断消息是否包含test
        } else if (s1.contains("test")) {
            partition = 1;
        } else {
            partition = 2;
        }

        return partition;
    }

    @Override
    /**
     * 关闭资源
     */
    public void close() {

    }

    /**
     * 配置方法
     *
     * @param configs 配置的configs
     */
    @Override
    public void configure(Map<String, ?> configs) {

    }
}

创建CustomProducerCallBackDemo类使用自定义分区

package com.lqs.kafka.partitioner;

import org.apache.kafka.clients.producer.*;

import java.util.Properties;

/**
 * @author qingSong liu
 * @version 1.0
 * @time 2021/12/28 20:20
 */

public class CustomProducerCallBackDemo {

    public static void main(String[] args) {

        //创建配置对象
        Properties properties = new Properties();

        //给配置对象添加链接
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "bdc112:9092");

        //设置批次大小,默认为16k
        properties.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384);

        //设置等待时间为1毫秒
        properties.put(ProducerConfig.LINGER_MS_CONFIG, 1);

        //设置RecordAccumulator(记录累加器)缓冲区大小为默认值32m
        properties.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432);

        //设置key和value的序列化,注意,这个是必须要设置的
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");

        //自定义分区
        properties.put(ProducerConfig.PARTITIONER_CLASS_CONFIG, "com.lqs.kafka.partitioner.PartitionerDemo");

        //创建kafka生产者对象
        KafkaProducer<String, String> stringStringKafkaProducer = new KafkaProducer<>(properties);

        for (int i = 0; i < 12; i++) {
            if (i % 2 == 0) {
                //调用send方法发送消息
                stringStringKafkaProducer.send(new ProducerRecord<>("first01", "lqs" + i), new Callback() {
                    @Override
                    public void onCompletion(RecordMetadata metadata, Exception exception) {
                        //判断是否发送成功
                        if (exception != null) {
                            exception.printStackTrace();
                        } else {
                            System.out.println(metadata.toString());
                        }
                    }
                });
            } else {
                stringStringKafkaProducer.send(new ProducerRecord<>("first01", "sfa" + i), new Callback() {
                    @Override
                    public void onCompletion(RecordMetadata metadata, Exception exception) {
                        if (exception != null) {
                            exception.printStackTrace();
                        } else {
                            System.out.println(metadata.toString());
                        }
                    }
                });
            }
        }

        //关闭资源链接
        stringStringKafkaProducer.close();

    }

}

运行测试结果

注意:在运行前记得先启动Zookeeper和kafka!!!

1、先运行消费者代码
在这里插入图片描述
2、运行生产者代码
在这里插入图片描述
3、查看刚运行的消费者代码结果里面出现了新的内容:
出现一下内容,说明手写成功。

F:\installSoftware\IDE\Java\jdk1.8.0_202\bin\java.exe "-javaagent:F:\installSoftware\IDE\IntelliJ IDEA 2021.2\lib\idea_rt.jar=4587:F:\installSoftware\IDE\IntelliJ IDEA 2021.2\bin" -Dfile.encoding=UTF-8 -classpath F:\installSoftware\IDE\Java\jdk1.8.0_202\jre\lib\charsets.jar;F:\installSoftware\IDE\Java\jdk1.8.0_202\jre\lib\deploy.jar;F:\installSoftware\IDE\Java\jdk1.8.0_202\jre\lib\ext\access-bridge-64.jar;F:\installSoftware\IDE\Java\jdk1.8.0_202\jre\lib\ext\cldrdata.jar;F:\installSoftware\IDE\Java\jdk1.8.0_202\jre\lib\ext\dnsns.jar;F:\installSoftware\IDE\Java\jdk1.8.0_202\jre\lib\ext\jaccess.jar;F:\installSoftware\IDE\Java\jdk1.8.0_202\jre\lib\ext\jfxrt.jar;F:\installSoftware\IDE\Java\jdk1.8.0_202\jre\lib\ext\localedata.jar;F:\installSoftware\IDE\Java\jdk1.8.0_202\jre\lib\ext\nashorn.jar;F:\installSoftware\IDE\Java\jdk1.8.0_202\jre\lib\ext\sunec.jar;F:\installSoftware\IDE\Java\jdk1.8.0_202\jre\lib\ext\sunjce_provider.jar;F:\installSoftware\IDE\Java\jdk1.8.0_202\jre\lib\ext\sunmscapi.jar;F:\installSoftware\IDE\Java\jdk1.8.0_202\jre\lib\ext\sunpkcs11.jar;F:\installSoftware\IDE\Java\jdk1.8.0_202\jre\lib\ext\zipfs.jar;F:\installSoftware\IDE\Java\jdk1.8.0_202\jre\lib\javaws.jar;F:\installSoftware\IDE\Java\jdk1.8.0_202\jre\lib\jce.jar;F:\installSoftware\IDE\Java\jdk1.8.0_202\jre\lib\jfr.jar;F:\installSoftware\IDE\Java\jdk1.8.0_202\jre\lib\jfxswt.jar;F:\installSoftware\IDE\Java\jdk1.8.0_202\jre\lib\jsse.jar;F:\installSoftware\IDE\Java\jdk1.8.0_202\jre\lib\management-agent.jar;F:\installSoftware\IDE\Java\jdk1.8.0_202\jre\lib\plugin.jar;F:\installSoftware\IDE\Java\jdk1.8.0_202\jre\lib\resources.jar;F:\installSoftware\IDE\Java\jdk1.8.0_202\jre\lib\rt.jar;F:\Data\codeWorkSpace\myJDBCCode\jdbc\lib\druid-1.1.10.jar;F:\Data\codeWorkSpace\KafkaCode\kafkaDemmo\target\classes;F:\installSoftware\BigDatas\apache-maven-3.5.4\data-repository\org\apache\kafka\kafka-clients\2.4.1\kafka-clients-2.4.1.jar;F:\installSoftware\BigDatas\apache-maven-3.5.4\data-repository\com\github\luben\zstd-jni\1.4.3-1\zstd-jni-1.4.3-1.jar;F:\installSoftware\BigDatas\apache-maven-3.5.4\data-repository\org\lz4\lz4-java\1.6.0\lz4-java-1.6.0.jar;F:\installSoftware\BigDatas\apache-maven-3.5.4\data-repository\org\xerial\snappy\snappy-java\1.1.7.3\snappy-java-1.1.7.3.jar;F:\installSoftware\BigDatas\apache-maven-3.5.4\data-repository\org\slf4j\slf4j-api\1.7.28\slf4j-api-1.7.28.jar;F:\installSoftware\BigDatas\apache-maven-3.5.4\data-repository\org\slf4j\slf4j-nop\1.7.2\slf4j-nop-1.7.2.jar com.lqs.kafka.consumer.CustomConsumerDemo
ConsumerRecord(topic = first01, partition = 2, leaderEpoch = 2, offset = 24, CreateTime = 1640754679164, serialized key size = -1, serialized value size = 4, headers = RecordHeaders(headers = [], isReadOnly = false), key = null, value = sfa1)
ConsumerRecord(topic = first01, partition = 2, leaderEpoch = 2, offset = 25, CreateTime = 1640754679165, serialized key size = -1, serialized value size = 4, headers = RecordHeaders(headers = [], isReadOnly = false), key = null, value = sfa3)
ConsumerRecord(topic = first01, partition = 2, leaderEpoch = 2, offset = 26, CreateTime = 1640754679165, serialized key size = -1, serialized value size = 4, headers = RecordHeaders(headers = [], isReadOnly = false), key = null, value = sfa5)
ConsumerRecord(topic = first01, partition = 2, leaderEpoch = 2, offset = 27, CreateTime = 1640754679165, serialized key size = -1, serialized value size = 4, headers = RecordHeaders(headers = [], isReadOnly = false), key = null, value = sfa7)
ConsumerRecord(topic = first01, partition = 2, leaderEpoch = 2, offset = 28, CreateTime = 1640754679165, serialized key size = -1, serialized value size = 4, headers = RecordHeaders(headers = [], isReadOnly = false), key = null, value = sfa9)
ConsumerRecord(topic = first01, partition = 2, leaderEpoch = 2, offset = 29, CreateTime = 1640754679166, serialized key size = -1, serialized value size = 5, headers = RecordHeaders(headers = [], isReadOnly = false), key = null, value = sfa11)
ConsumerRecord(topic = first01, partition = 0, leaderEpoch = 2, offset = 213, CreateTime = 1640754679156, serialized key size = -1, serialized value size = 4, headers = RecordHeaders(headers = [], isReadOnly = false), key = null, value = lqs0)
ConsumerRecord(topic = first01, partition = 0, leaderEpoch = 2, offset = 214, CreateTime = 1640754679165, serialized key size = -1, serialized value size = 4, headers = RecordHeaders(headers = [], isReadOnly = false), key = null, value = lqs2)
ConsumerRecord(topic = first01, partition = 0, leaderEpoch = 2, offset = 215, CreateTime = 1640754679165, serialized key size = -1, serialized value size = 4, headers = RecordHeaders(headers = [], isReadOnly = false), key = null, value = lqs4)
ConsumerRecord(topic = first01, partition = 0, leaderEpoch = 2, offset = 216, CreateTime = 1640754679165, serialized key size = -1, serialized value size = 4, headers = RecordHeaders(headers = [], isReadOnly = false), key = null, value = lqs6)
ConsumerRecord(topic = first01, partition = 0, leaderEpoch = 2, offset = 217, CreateTime = 1640754679165, serialized key size = -1, serialized value size = 4, headers = RecordHeaders(headers = [], isReadOnly = false), key = null, value = lqs8)
ConsumerRecord(topic = first01, partition = 0, leaderEpoch = 2, offset = 218, CreateTime = 1640754679166, serialized key size = -1, serialized value size = 5, headers = RecordHeaders(headers = [], isReadOnly = false), key = null, value = lqs10)

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

小雏菊的成长

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值