KafkaTemplate是如何发送消息的?

Kafka使用KafkaTemplate发送消息,需要先实例化bean.配置如下

<!-- 定义producer的参数 -->
	<bean id="producerProperties" class="java.util.HashMap">
		<constructor-arg>
			<map>
				<entry key="bootstrap.servers" value="${kafka.bootstrap.servers}" />
				<entry key="group.id" value="0" />
				<entry key="retries" value="2" />
				<entry key="batch.size" value="16384" />
				<entry key="linger.ms" value="1" />
				<entry key="buffer.memory" value="33554432" />
				<entry key="max.request.size" value="10000000"></entry>
				<entry key="send.buffer.bytes" value="10000000"></entry>
				<entry key="key.serializer"
					value="org.apache.kafka.common.serialization.StringSerializer" />
				<entry key="value.serializer"
					value="org.apache.kafka.common.serialization.StringSerializer" />
			</map>
		</constructor-arg>
	</bean>

	<!-- 创建kafkatemplate需要使用的producerfactory bean -->
	<bean id="producerFactory"
		class="org.springframework.kafka.core.DefaultKafkaProducerFactory">
		<constructor-arg>
			<ref bean="producerProperties" />
		</constructor-arg>
	</bean>

	<!-- 创建kafkatemplate bean,使用的时候,只需要注入这个bean,即可使用template的send消息方法 -->
	<bean id="kafkaTemplate" class="org.springframework.kafka.core.KafkaTemplate">
		<constructor-arg ref="producerFactory" />
		<constructor-arg name="autoFlush" value="true" />
		<property name="defaultTopic" value="mhb-test" />
	</bean>

使用时直接注入就可以使用了.

@Autowired
private KafkaTemplate<String, String> kafkaTemplate;
ListenableFuture<SendResult<String, String>> future = kafkaTemplate.send(topic,key, JSON.toJSONString(obj));
future.get();

 这个是通过Spring包装后的用法.Spring增加了ProducerFactory创建Producer对象,并且给Producer增加了事务功能,把参数包装成ProducerRecord对象,调用Kafka-client包中Producer类的send方法.

protected ListenableFuture<SendResult<K, V>> doSend(ProducerRecord<K, V> producerRecord) {
        if (this.transactional) {
            Assert.state(this.inTransaction(), "No transaction is in process; possible solutions: run the template operation within the scope of a template.executeInTransaction() operation, start a transaction with @Transactional before invoking the template method, run in a transaction started by a listener container when consuming a record");
        }
//增加事务功能,使用factory创建producer
        Producer<K, V> producer = this.getTheProducer();
        this.logger.trace(() -> {
            return "Sending: " + producerRecord;
        });
        SettableListenableFuture<SendResult<K, V>> future = new SettableListenableFuture();
//发送Kafka,包装返回结果  
producer.send(producerRecord, this.buildCallback(producerRecord, producer, future));
        if (this.autoFlush) {
            this.flush();
        }

        this.logger.trace(() -> {
            return "Sent: " + producerRecord;
        });
        return future;
}

接下来都是kafka-client包内的内容了.KafkaProducer实现了Producer接口,在发送前还调用了拦截器ProducerInterceptor,这个拦截器能拦截甚至更改record数据.官方介绍如下.

A plugin interface that allows you to intercept (and possibly mutate) the records received by the producer before they are published to the Kafka cluster.

拦截器处理完后就是真正发送到Kafka了.调用了org.apache.kafka.clients.producer.KafkaProducer#doSend方法.源码如下:

private Future<RecordMetadata> doSend(ProducerRecord<K, V> record, Callback callback) {
        TopicPartition tp = null;

        try {
//0.检查和一些参数的初始化,计算等待时间,因为Kafka是批量发送.
            this.throwIfProducerClosed();

            KafkaProducer.ClusterAndWaitTime clusterAndWaitTime;
            try {
                clusterAndWaitTime = this.waitOnMetadata(record.topic(), record.partition(), this.maxBlockTimeMs);
            } catch (KafkaException var19) {
                if (this.metadata.isClosed()) {
                    throw new KafkaException("Producer closed while send in progress", var19);
                }

                throw var19;
            }

            long remainingWaitMs = Math.max(0L, this.maxBlockTimeMs - clusterAndWaitTime.waitedOnMetadataMs);
            Cluster cluster = clusterAndWaitTime.cluster;

            byte[] serializedKey;
            try {//1.序列化key
                serializedKey = this.keySerializer.serialize(record.topic(), record.headers(), record.key());
            } catch (ClassCastException var18) {
                throw new SerializationException("Can't convert key of class " + record.key().getClass().getName() + " to class " + this.producerConfig.getClass("key.serializer").getName() + " specified in key.serializer", var18);
            }

            byte[] serializedValue;
            try {//2.序列化value
                serializedValue = this.valueSerializer.serialize(record.topic(), record.headers(), record.value());
            } catch (ClassCastException var17) {
                throw new SerializationException("Can't convert value of class " + record.value().getClass().getName() + " to class " + this.producerConfig.getClass("value.serializer").getName() + " specified in value.serializer", var17);
            }
//3,计算根据key,计算所在分区
            int partition = this.partition(record, serializedKey, serializedValue, cluster);
//组装TopicPartition对象
            tp = new TopicPartition(record.topic(), partition);
            this.setReadOnly(record.headers());
            Header[] headers = record.headers().toArray();
            int serializedSize = AbstractRecords.estimateSizeInBytesUpperBound(this.apiVersions.maxUsableProduceMagic(), this.compressionType, serializedKey, serializedValue, headers);
            this.ensureValidRecordSize(serializedSize);
            long timestamp = record.timestamp() == null ? this.time.milliseconds() : record.timestamp();
            this.log.trace("Sending record {} with callback {} to topic {} partition {}", new Object[]{record, callback, record.topic(), partition});
//组装Callback 对象
            Callback interceptCallback = new KafkaProducer.InterceptorCallback(callback, this.interceptors, tp);
            if (this.transactionManager != null && this.transactionManager.isTransactional()) {
                this.transactionManager.maybeAddPartitionToTransaction(tp);
            }
//3.添加到RecordAccumulator中等待发送
            RecordAppendResult result = this.accumulator.append(tp, timestamp, serializedKey, serializedValue, headers, interceptCallback, remainingWaitMs);
            if (result.batchIsFull || result.newBatchCreated) {
                this.log.trace("Waking up the sender since topic {} partition {} is either full or getting a new batch", record.topic(), partition);
                this.sender.wakeup();
            }
//4.返回结果
            return result.future;
        } catch (ApiException var20) {
            this.log.debug("Exception occurred during message send:", var20);
            if (callback != null) {
                callback.onCompletion((RecordMetadata)null, var20);
            }

            this.errors.record();
            this.interceptors.onSendError(record, tp, var20);
            return new KafkaProducer.FutureFailure(var20);
        } catch (InterruptedException var21) {
            this.errors.record();
            this.interceptors.onSendError(record, tp, var21);
            throw new InterruptException(var21);
        } catch (BufferExhaustedException var22) {
            this.errors.record();
            this.metrics.sensor("buffer-exhausted-records").record();
            this.interceptors.onSendError(record, tp, var22);
            throw var22;
        } catch (KafkaException var23) {
            this.errors.record();
            this.interceptors.onSendError(record, tp, var23);
            throw var23;
        } catch (Exception var24) {
            this.interceptors.onSendError(record, tp, var24);
            throw var24;
        }
 }

 这里是发送Kafka消息的核心逻辑了,这段代码非常重要,尤其是计算分区逻辑和Kafka批量发送逻辑.

分区默认是采用hash算法计算key,转32位后与总分区取余.

发送消息是批量发送,先把数据在client中存下来,等队列满了或者等待时间到了就发送给Kafka服务器.重点关注org.apache.kafka.clients.producer.internals.RecordAccumulator#append方法,代码如下,具体逻辑将在下一篇中补充.

public RecordAccumulator.RecordAppendResult append(TopicPartition tp, long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long maxTimeToBlock) throws InterruptedException {
        this.appendsInProgress.incrementAndGet();
        ByteBuffer buffer = null;
        if (headers == null) {
            headers = Record.EMPTY_HEADERS;
        }

        RecordAccumulator.RecordAppendResult var16;
        try {
//1.检查是否有包含该主题分区的批处理对象的双端队列,如果没有则新建
            Deque<ProducerBatch> dq = this.getOrCreateDeque(tp);
            synchronized(dq) {
                if (this.closed) {
                    throw new KafkaException("Producer closed while send in progress");
                }
//尝试向批处理对象追加消息,并返回追加结果,如果队列里没有批处理对象,则返回空
                RecordAccumulator.RecordAppendResult appendResult = this.tryAppend(timestamp, key, value, headers, callback, dq);
                if (appendResult != null) {
                    RecordAccumulator.RecordAppendResult var14 = appendResult;
                    return var14;
                }
            }

            byte maxUsableMagic = this.apiVersions.maxUsableProduceMagic();
            int size = Math.max(this.batchSize, AbstractRecords.estimateSizeInBytesUpperBound(maxUsableMagic, this.compression, key, value, headers));
            this.log.trace("Allocating a new {} byte message buffer for topic {} partition {}", new Object[]{size, tp.topic(), tp.partition()});
            buffer = this.free.allocate(size, maxTimeToBlock);
            synchronized(dq) {
                if (this.closed) {
                    throw new KafkaException("Producer closed while send in progress");
                }

                RecordAccumulator.RecordAppendResult appendResult = this.tryAppend(timestamp, key, value, headers, callback, dq);
                if (appendResult == null) {
//2. 将消息写入内存中,封装成一个内存消息对象
                    MemoryRecordsBuilder recordsBuilder = this.recordsBuilder(buffer, maxUsableMagic);
//根据内存消息对象新建一个批处理对象
                    ProducerBatch batch = new ProducerBatch(tp, recordsBuilder, this.time.milliseconds());
//批量处理
                    FutureRecordMetadata future = (FutureRecordMetadata)Utils.notNull(batch.tryAppend(timestamp, key, value, headers, callback, this.time.milliseconds()));
//将批处理对象添加到双端队列中
                    dq.addLast(batch);
                    this.incomplete.add(batch);
                    buffer = null;
                    RecordAccumulator.RecordAppendResult var19 = new RecordAccumulator.RecordAppendResult(future, dq.size() > 1 || batch.isFull(), true);
                    return var19;
                }

                var16 = appendResult;
            }
        } finally {
            if (buffer != null) {
                this.free.deallocate(buffer);
            }

            this.appendsInProgress.decrementAndGet();
        }

        return var16;
    }

 

  • 0
    点赞
  • 9
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

飞翔的咩咩

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值