kafka源码分析 消费消息

kafka 消费消息源码分析

消费消息的实例代码

package com.example.demo.kafka;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.time.Duration;
import java.util.Collections;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicBoolean;

public class ConsumerAnalysis {

    public static final AtomicBoolean IS_RUNNING = new AtomicBoolean(true);

    public static Properties initConfig() {
        Properties properties = new Properties();
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, CommonHelper.BROKER_LIST);
        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        properties.put(ConsumerConfig.GROUP_ID_CONFIG, "ConsumerAnalysisGroup-1");
        properties.put(ConsumerConfig.CLIENT_ID_CONFIG, "demo-consumer-client-1");
        return properties;
    }

    public static void main(String[] args) {
        Properties properties = initConfig();
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties);
        consumer.subscribe(Collections.singletonList(CommonHelper.TOPIC));

        try {
            while (IS_RUNNING.get()) {
                ConsumerRecords<String, String> records = consumer.poll(10000);
                System.out.println("records count is " + records.count());
                for (ConsumerRecord<String, String> record : records) {
                    System.out.println("topic=" + record.topic()
                            + ", partition = " + record.partition()
                            + ", offset=" + record.offset());

                    System.out.println("key=" + record.offset()
                            + ", value= " + record.value());
                }
            }
        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            consumer.close();
        }

    }
}

过程步骤

  1. 配置消费者客户端参数以及创建消费者实例
  2. 订阅Topic
  3. 拉取消息并消费
  4. 提交消费offset
  5. 关闭消费者实例

参数说明

  • ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG:对应的参数其实就是bootstrap.servers,用于建立到Kafka集群的初始连接的主机/端口对列表。客户端将使用所有服务器,而不管这里为引导指定了哪些服务器;此列表只影响用于发现完整服务器集的初始主机。这个列表的格式应该是host1:port1,host2:port2,…。由于这些服务器仅用于初始连接,以发现完整的集群成员(可能会动态更改),因此该列表不需要包含完整的服务器集(但是,在服务器关闭的情况下,您可能需要多个服务器)。

  • ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG:对应的参数是key.deserializer,实现org.apache.kafka.common.serialization.Deserializer 接口的密钥的反序列化器类。

  • ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG:对应的参数是value.deserializer,实现 org.apache.kafka.common.serialization.Deserializer 接口的值的反序列化器类。

  • ConsumerConfig.GROUP_ID_CONFIG:对应的参数是group.id,标识此消费者所属的消费者组的唯一字符串。 如果消费者通过使用 subscribe(topic) 或基于 Kafka 的偏移管理策略使用组管理功能,则需要此属性。

  • ConsumerConfig.CLIENT_ID_CONFIG:对应的参数是client.id,发出请求时传递给服务器的 id 字符串。 这样做的目的是通过允许将逻辑应用程序名称包含在服务器端请求日志记录中,从而能够跟踪请求源,而不仅仅是 ip/port。

    Note 在创建生产者的时候使用了ConsumerConfig类,在这个类中,是用了static{}块,来初始化一些默认配置。还有一些其他的关于生产者的配置可以在ConsumerConfig类中观察到。

创建消费者实例主流程

补流程图

private KafkaConsumer(ConsumerConfig config,
                      Deserializer<K> keyDeserializer,
                      Deserializer<V> valueDeserializer) {
    try {
        // 如果没有配置客户端id参数,会默认生成一个
        String clientId = config.getString(ConsumerConfig.CLIENT_ID_CONFIG);
        if (clientId.isEmpty())
            clientId = "consumer-" + CONSUMER_CLIENT_ID_SEQUENCE.getAndIncrement();
        this.clientId = clientId;
        // 获取消费者配置
        String groupId = config.getString(ConsumerConfig.GROUP_ID_CONFIG);

        LogContext logContext = new LogContext("[Consumer clientId=" + clientId + ", groupId=" + groupId + "] ");
        this.log = logContext.logger(getClass());

        log.debug("Initializing the Kafka consumer");
        // 配置请求超时的时间,在时间超时且重试次数之后还是没有响应以失败处理
        this.requestTimeoutMs = config.getInt(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG);
        // 与消费者组进行心跳的超时时间。超过时长则脱离消费组 触发再分配。
        int sessionTimeOutMs = config.getInt(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG);
        // 在没有达到fetch.min.bytes的时候,服务器在响应 fetch 请求之前将阻塞的最长时间。
        int fetchMaxWaitMs = config.getInt(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG);
        // 请求超时时间必须大于心跳的超时时间和阻塞的最长时间
        if (this.requestTimeoutMs <= sessionTimeOutMs || this.requestTimeoutMs <= fetchMaxWaitMs)
            throw new ConfigException(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG + " should be greater than " + ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG + " and " + ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG);
        this.time = Time.SYSTEM;

        // 采集消费者客户端的指标
        Map<String, String> metricsTags = Collections.singletonMap("client-id", clientId);
        MetricConfig metricConfig = new MetricConfig().samples(config.getInt(ConsumerConfig.METRICS_NUM_SAMPLES_CONFIG))
                .timeWindow(config.getLong(ConsumerConfig.METRICS_SAMPLE_WINDOW_MS_CONFIG), TimeUnit.MILLISECONDS)
                .recordLevel(Sensor.RecordingLevel.forName(config.getString(ConsumerConfig.METRICS_RECORDING_LEVEL_CONFIG)))
                .tags(metricsTags);
        List<MetricsReporter> reporters = config.getConfiguredInstances(ConsumerConfig.METRIC_REPORTER_CLASSES_CONFIG,
                MetricsReporter.class);
        reporters.add(new JmxReporter(JMX_PREFIX));
        this.metrics = new Metrics(metricConfig, reporters, time);
        // 请求失败后,重试之间的等待时间
        this.retryBackoffMs = config.getLong(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG);

        // load interceptors and make sure they get clientId
        // 加载用户配置的拦截器 并输入clientID
        Map<String, Object> userProvidedConfigs = config.originals();
        userProvidedConfigs.put(ConsumerConfig.CLIENT_ID_CONFIG, clientId);
        List<ConsumerInterceptor<K, V>> interceptorList = (List) (new ConsumerConfig(userProvidedConfigs, false)).getConfiguredInstances(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG,
                ConsumerInterceptor.class);
        this.interceptors = new ConsumerInterceptors<>(interceptorList);
        // 读取key和vlaue的反序列化器,若用户没有配置,则会忽略这两个配置
        if (keyDeserializer == null) {
            this.keyDeserializer = config.getConfiguredInstance(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,
                    Deserializer.class);
            this.keyDeserializer.configure(config.originals(), true);
        } else {
            config.ignore(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG);
            this.keyDeserializer = keyDeserializer;
        }
        if (valueDeserializer == null) {
            this.valueDeserializer = config.getConfiguredInstance(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
                    Deserializer.class);
            this.valueDeserializer.configure(config.originals(), false);
        } else {
            config.ignore(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG);
            this.valueDeserializer = valueDeserializer;
        }
        // 初始化一个集群监听器实例
        ClusterResourceListeners clusterResourceListeners = configureClusterResourceListeners(keyDeserializer, valueDeserializer, reporters, interceptorList);
        // 配置元数据信息,除了异常触发更新,会定时更新元数据
        this.metadata = new Metadata(retryBackoffMs, config.getLong(ConsumerConfig.METADATA_MAX_AGE_CONFIG),
                true, false, clusterResourceListeners);
        // 开始配置服务器地址
        List<InetSocketAddress> addresses = ClientUtils.parseAndValidateAddresses(config.getList(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG));
        this.metadata.update(Cluster.bootstrap(addresses), Collections.<String>emptySet(), 0);
        String metricGrpPrefix = "consumer";
        ConsumerMetrics metricsRegistry = new ConsumerMetrics(metricsTags.keySet(), "consumer");
        ChannelBuilder channelBuilder = ClientUtils.createChannelBuilder(config);

        // 配置事务隔离级别
        IsolationLevel isolationLevel = IsolationLevel.valueOf(
                config.getString(ConsumerConfig.ISOLATION_LEVEL_CONFIG).toUpperCase(Locale.ROOT));
        Sensor throttleTimeSensor = Fetcher.throttleTimeSensor(metrics, metricsRegistry.fetcherMetrics);

        // 与消费组的心跳协调时间,该值必须设置为低于 <code>session.timeout.ms</code>,但通常不应设置为高于该值的 1/3。 它可以调整得更低,以控制正常重新平衡的预期时间。
        int heartbeatIntervalMs = config.getInt(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG);

        // 初始化一个网络客户端
        NetworkClient netClient = new NetworkClient(
                new Selector(config.getLong(ConsumerConfig.CONNECTIONS_MAX_IDLE_MS_CONFIG), metrics, time, metricGrpPrefix, channelBuilder, logContext),
                this.metadata,
                clientId,
                100, // a fixed large enough value will suffice for max in-flight requests
                config.getLong(ConsumerConfig.RECONNECT_BACKOFF_MS_CONFIG),
                config.getLong(ConsumerConfig.RECONNECT_BACKOFF_MAX_MS_CONFIG),
                config.getInt(ConsumerConfig.SEND_BUFFER_CONFIG),
                config.getInt(ConsumerConfig.RECEIVE_BUFFER_CONFIG),
                config.getInt(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG),
                time,
                true,
                new ApiVersions(),
                throttleTimeSensor,
                logContext);
        // 将网络客户端转成消费者客户端
        this.client = new ConsumerNetworkClient(
                logContext,
                netClient,
                metadata,
                time,
                retryBackoffMs,
                config.getInt(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG),
                heartbeatIntervalMs); //Will avoid blocking an extended period of time to prevent heartbeat thread starvation
        // 配置offset重置策略
        OffsetResetStrategy offsetResetStrategy = OffsetResetStrategy.valueOf(config.getString(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG).toUpperCase(Locale.ROOT));
        // 主题 分区 offset 情况跟踪
        this.subscriptions = new SubscriptionState(offsetResetStrategy);
        // 分配分配器
        this.assignors = config.getConfiguredInstances(
                ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG,
                PartitionAssignor.class);
        // 创建消费者组协调器实例--里面拥有元数据监听
        this.coordinator = new ConsumerCoordinator(logContext,
                this.client,
                groupId,
                config.getInt(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG),
                config.getInt(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG),
                heartbeatIntervalMs,
                assignors,
                this.metadata,
                this.subscriptions,
                metrics,
                metricGrpPrefix,
                this.time,
                retryBackoffMs,
                config.getBoolean(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG),
                config.getInt(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG),
                this.interceptors,
                config.getBoolean(ConsumerConfig.EXCLUDE_INTERNAL_TOPICS_CONFIG),
                config.getBoolean(ConsumerConfig.LEAVE_GROUP_ON_CLOSE_CONFIG));
        // 抓取器 获取与broker之间的线程
        this.fetcher = new Fetcher<>(
                logContext,
                this.client,
                config.getInt(ConsumerConfig.FETCH_MIN_BYTES_CONFIG),
                config.getInt(ConsumerConfig.FETCH_MAX_BYTES_CONFIG),
                config.getInt(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG),
                config.getInt(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG),
                config.getInt(ConsumerConfig.MAX_POLL_RECORDS_CONFIG),
                config.getBoolean(ConsumerConfig.CHECK_CRCS_CONFIG),
                this.keyDeserializer,
                this.valueDeserializer,
                this.metadata,
                this.subscriptions,
                metrics,
                metricsRegistry.fetcherMetrics,
                this.time,
                this.retryBackoffMs,
                this.requestTimeoutMs,
                isolationLevel);

        config.logUnused();
        AppInfoParser.registerAppInfo(JMX_PREFIX, clientId, metrics);

        log.debug("Kafka consumer initialized");
    } catch (Throwable t) {
        // call close methods if internal objects are already constructed
        // this is to prevent resource leak. see KAFKA-2121
        close(0, true);
        // now propagate the exception
        throw new KafkaException("Failed to construct kafka consumer", t);
    }
}

订阅主题

订阅只保留最后一次的Topic,可以是一个,可以是一组,也可以是正则的Topics(会动态变化)。

@Override
public void subscribe(Collection<String> topics, ConsumerRebalanceListener listener) {
    // 获取锁和检查消费者是否关闭
    acquireAndEnsureOpen();
    try {
        if (topics == null) {
            throw new IllegalArgumentException("Topic collection to subscribe to cannot be null");
        } else if (topics.isEmpty()) {
            // treat subscribing to empty topic list as the same as unsubscribing
            // 主题为空作用跟退订一致
            this.unsubscribe();
        } else {
            for (String topic : topics) {
                if (topic == null || topic.trim().isEmpty())
                    throw new IllegalArgumentException("Topic collection to subscribe to cannot contain null or empty topic");
            }
            // 检查分区分配器
            throwIfNoAssignorsConfigured();

            log.debug("Subscribed to topic(s): {}", Utils.join(topics, ", "));
            // 修改订阅主题
            this.subscriptions.subscribe(new HashSet<>(topics), listener);
            // 如果元数据中的Topic信息不包含目前消费组的Topics,这进行元数据中Topics更新
            metadata.setTopics(subscriptions.groupSubscription());
        }
    } finally {
        release();
    }
}
public void subscribe(Set<String> topics, ConsumerRebalanceListener listener) {
    if (listener == null)
        throw new IllegalArgumentException("RebalanceListener cannot be null");
    // 确定订阅类型
    setSubscriptionType(SubscriptionType.AUTO_TOPICS);
    // 确定消费者均衡监听器
    this.rebalanceListener = listener;
    // 改变这个消费者订阅消息和增加所在消费者的订阅消息
    changeSubscription(topics);
}

消费数据

通过poll方法进行一批次的消息获取,timeout是一次一轮询消息可以花费的时间。

@Override
public ConsumerRecords<K, V> poll(long timeout) {
    acquireAndEnsureOpen();
    try {
        // 轮询的时间长度为0 是立即范围
        if (timeout < 0)
            throw new IllegalArgumentException("Timeout must not be negative");

        // 消费者没有订阅主题或者分配分区
        if (this.subscriptions.hasNoSubscriptionOrUserAssignment())
            throw new IllegalStateException("Consumer is not subscribed to any topics or assigned any partitions");

        // poll for new data until the timeout expires
        long start = time.milliseconds();
        long remaining = timeout;
        do {
            //一次轮询数据
            Map<TopicPartition, List<ConsumerRecord<K, V>>> records = pollOnce(remaining);
            if (!records.isEmpty()) {
                // before returning the fetched records, we can send off the next round of fetches
                // and avoid block waiting for  their responses to enable pipelining while the user
                // is handling the fetched records.
                //
                // NOTE: since the consumed position has already been updated, we must not allow
                // wakeups or any other errors to be triggered prior to returning the fetched records.
                // 在返回获取的记录之前,我们可以发送下一轮获取并避免在用户处理获取的记录时阻塞等待他们的响应以启用流水线。
                // 注意:由于消耗的位置已经更新,我们不能允许在返回获取的记录之前触发唤醒或任何其他错误。
                if (fetcher.sendFetches() > 0 || client.hasPendingRequests())
                    client.pollNoWakeup();

                // 拦截器处理
                return this.interceptors.onConsume(new ConsumerRecords<>(records));
            }

            long elapsed = time.milliseconds() - start;
            remaining = timeout - elapsed;
        } while (remaining > 0);

        return ConsumerRecords.empty();
    } finally {
        release();
    }
}

消费位移

根据消费者轮询消息方法 KafkaConsumer.pollOnce(long timeout),可以找出,是由消费者协调器ConsumerCoordinator进行offset的处理。

/**
 * Do one round of polling. In addition to checking for new data, this does any needed offset commits
 * (if auto-commit is enabled), and offset resets (if an offset reset policy is defined).
 * @param timeout The maximum time to block in the underlying call to {@link ConsumerNetworkClient#poll(long)}.
 * @return The fetched records (may be empty)
 */
private Map<TopicPartition, List<ConsumerRecord<K, V>>> pollOnce(long timeout) {
    client.maybeTriggerWakeup();

    long startMs = time.milliseconds();
    // 消费者协调器进行轮询操作
    coordinator.poll(startMs, timeout);

    // Lookup positions of assigned partitions
    boolean hasAllFetchPositions = updateFetchPositions();

    // if data is available already, return it immediately
    Map<TopicPartition, List<ConsumerRecord<K, V>>> records = fetcher.fetchedRecords();
    if (!records.isEmpty())
        return records;

    // send any new fetches (won't resend pending fetches)
    fetcher.sendFetches();

    long nowMs = time.milliseconds();
    long remainingTimeMs = Math.max(0, timeout - (nowMs - startMs));
    long pollTimeout = Math.min(coordinator.timeToNextPoll(nowMs), remainingTimeMs);

    // We do not want to be stuck blocking in poll if we are missing some positions
    // since the offset lookup may be backing off after a failure
    if (!hasAllFetchPositions && pollTimeout > retryBackoffMs)
        pollTimeout = retryBackoffMs;

    client.poll(pollTimeout, nowMs, new PollCondition() {
        @Override
        public boolean shouldBlock() {
            // since a fetch might be completed by the background thread, we need this poll condition
            // to ensure that we do not block unnecessarily in poll()
            return !fetcher.hasCompletedFetches();
        }
    });

    // after the long poll, we should check whether the group needs to rebalance
    // prior to returning data so that the group can stabilize faster
    if (coordinator.needRejoin())
        return Collections.emptyMap();

    return fetcher.fetchedRecords();
}

消费者协调器的poll方法,主要用确认协调器还在运行,且消费者在一个消费组中,同时用来控制offset的自动提交。

public void poll(long now, long remainingMs) {
    // 调用offset提交的回调,检查是否有提交记录 记录中是否有异常,日志输出。
    invokeCompletedOffsetCommitCallbacks();

    // 当订阅类型是AUTO_TOPICS或者AUTO_PATTERN,对消费者协调器和组协调器进行有效性的确认处理,若客户端需要重新加入消费组对正则的订阅主题方式需要进行元数据的更新
    if (subscriptions.partitionsAutoAssigned()) {
        if (coordinatorUnknown()) {
            ensureCoordinatorReady();
            now = time.milliseconds();
        }

        if (needRejoin()) {
            // due to a race condition between the initial metadata fetch and the initial rebalance,
            // we need to ensure that the metadata is fresh before joining initially. This ensures
            // that we have matched the pattern against the cluster's topics at least once before joining.
            if (subscriptions.hasPatternSubscription())
                client.ensureFreshMetadata();

            ensureActiveGroup();
            now = time.milliseconds();
        }

        pollHeartbeat(now);
    } else {
        // For manually assigned partitions, if there are no ready nodes, await metadata.
        // If connections to all nodes fail, wakeups triggered while attempting to send fetch
        // requests result in polls returning immediately, causing a tight loop of polls. Without
        // the wakeup, poll() with no channels would block for the timeout, delaying re-connection.
        // awaitMetadataUpdate() initiates new connections with configured backoff and avoids the busy loop.
        // When group management is used, metadata wait is already performed for this scenario as
        // coordinator is unknown, hence this check is not required.
        // 对于采用手动分配分区方式的客户端,在没有就绪节点和需要更新元数据的情况下,需要进行一次元数据的更新,在更新时候还是没有就绪节点则放弃这次轮询。
        if (metadata.updateRequested() && !client.hasReadyNodes()) {
            boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs);
            if (!metadataUpdated && !client.hasReadyNodes())
                return;
            now = time.milliseconds();
        }
    }
    //尝试进行offset的异步自动提交
    maybeAutoCommitOffsetsAsync(now);
}

如果配置了enable.auto.committrue,且满足auto.commit.interval.ms配置的时长,默认5s,则进行异步式的offset自动提交

    private void doAutoCommitOffsetsAsync() {
        // 获取所有主题分区的offset信息,不包含offset为NULL的
        Map<TopicPartition, OffsetAndMetadata> allConsumedOffsets = subscriptions.allConsumed();
        log.debug("Sending asynchronous auto-commit of offsets {}", allConsumedOffsets);

        commitOffsetsAsync(allConsumedOffsets, new OffsetCommitCallback() {
            // 回调主要用来处理异常输出日志
            @Override
            public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception exception) {
                if (exception != null) {
                    if (exception instanceof RetriableException) {
                        log.debug("Asynchronous auto-commit of offsets {} failed due to retriable error: {}", offsets,
                                exception);
                        nextAutoCommitDeadline = Math.min(time.milliseconds() + retryBackoffMs, nextAutoCommitDeadline);
                    } else {
                        log.warn("Asynchronous auto-commit of offsets {} failed: {}", offsets, exception.getMessage());
                    }
                } else {
                    log.debug("Completed asynchronous auto-commit of offsets {}", offsets);
                }
            }
        });
    }
public void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback) {
    invokeCompletedOffsetCommitCallbacks();

    if (!coordinatorUnknown()) {
        doCommitOffsetsAsync(offsets, callback);
    } else {
        // we don't know the current coordinator, so try to find it and then send the commit
        // or fail (we don't want recursive retries which can cause offset commits to arrive
        // out of order). Note that there may be multiple offset commits chained to the same
        // coordinator lookup request. This is fine because the listeners will be invoked in
        // the same order that they were added. Note also that AbstractCoordinator prevents
        // multiple concurrent coordinator lookup requests.
        pendingAsyncCommits.incrementAndGet();
        lookupCoordinator().addListener(new RequestFutureListener<Void>() {
            @Override
            public void onSuccess(Void value) {
                pendingAsyncCommits.decrementAndGet();
                doCommitOffsetsAsync(offsets, callback);
                client.pollNoWakeup();
            }

            @Override
            public void onFailure(RuntimeException e) {
                pendingAsyncCommits.decrementAndGet();
                completedOffsetCommits.add(new OffsetCommitCompletion(callback, offsets,
                        new RetriableCommitFailedException(e)));
            }
        });
    }

    // ensure the commit has a chance to be transmitted (without blocking on its completion).
    // Note that commits are treated as heartbeats by the coordinator, so there is no need to
    // explicitly allow heartbeats through delayed task execution.
    client.pollNoWakeup();
}

对于协调器正常的会直接进行offset的提交

private void doCommitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback) {
    RequestFuture<Void> future = sendOffsetCommitRequest(offsets);
    final OffsetCommitCallback cb = callback == null ? defaultOffsetCommitCallback : callback;
    future.addListener(new RequestFutureListener<Void>() {
        @Override
        public void onSuccess(Void value) {
            if (interceptors != null)
                interceptors.onCommit(offsets);

            completedOffsetCommits.add(new OffsetCommitCompletion(cb, offsets, null));
        }

        @Override
        public void onFailure(RuntimeException e) {
            Exception commitException = e;

            if (e instanceof RetriableException)
                commitException = new RetriableCommitFailedException(e);

            completedOffsetCommits.add(new OffsetCommitCompletion(cb, offsets, commitException));
        }
    });
}

对于协调器有问题的,尝试找到它然后发送提交或失败(我们不希望递归重试会导致偏移提交无序到达)。 请注意,可能有多个偏移提交链接到同一个协调器查找请求。 这很好,因为侦听器将按照添加它们的相同顺序被调用。 另请注意,AbstractCoordinator 可防止多个并发协调器查找请求。

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

顧棟

若对你有帮助,望对作者鼓励一下

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值