1:RecordAccumulator
位置:org.apache.kafka.clients.producer.internals
作用:内存缓冲区,用于存储数据用来发送给server,作为一个消息累加器
1.1:属性
private final int batchSize;
private final CompressionType compression;//压缩方式
private final long lingerMs;/*batch创建多长时间后必须被发送出去*/
private final long retryBackoffMs;/*batch发送失败重试的时间间隔*/
private final BufferPool free;/*相当于内存的缓冲池默认大小16M,用于创建批次存放数据,每次批次发送后就释放放回pool池中*/
private final Time time;
private final ApiVersions apiVersions;
private final ConcurrentMap<TopicPartition, Deque<ProducerBatch>> batches; /*其是线程安全的。存放topicPartition-[batch1,batch2...]批次*/
private final IncompleteBatches incomplete;
// The following variables are only accessed by the sender thread, so we don't need to protect them.以下变量仅由发送者线程访问,因此我们不需要保护它们。
/*存储那些分区需要保证幂等性,分区中消息的有序。正在进行批处理发送中的分区*/
private final Set<TopicPartition> muted;
1.2:方法
主要的追加数据的方法
public RecordAppendResult append(TopicPartition tp,
long timestamp,
byte[] key,
byte[] value,
Header[] headers,
Callback callback,
long maxTimeToBlock) throws InterruptedException {
// We keep track of the number of appending thread to make sure we do not miss batches in
// abortIncompleteBatches().
appendsInProgress.incrementAndGet();
ByteBuffer buffer = null;/*生产者是多线程的,所以下面采用加锁synchronized和双重追加数据检查机制tryAppend处理dq*/
if (headers == null) headers = Record.EMPTY_HEADERS;
try {
// check if we have an in-progress batch检查该topic分区是否有正进行的批次,获取该分区对应的Deque队列,每个队列中可能存在着多批次未发送的数据
Deque<ProducerBatch> dq = getOrCreateDeque(tp);
/*只有一个线程进入*/
synchronized (dq)
{/*通过加锁给dq批次追加数据,防止多线程不安全*/
if (closed)
throw new IllegalStateException("Cannot send after the producer is closed.");
/*情况1:如果写入分区对应的没有任何一个批次存在,说明需要先创建批次,添加失败appendResult=null*/
RecordAppendResult appendResult = tryAppend(timestamp, key, value, headers, callback, dq);
if (appendResult != null)
return appendResult;
}
//情况2:写入的分区没有正在进行的记录批处理,尝试创建的批处理追加数据
byte maxUsableMagic = apiVersions.maxUsableProduceMagic();
/*取批次(默认16k)设置的值和消息大小(默认1M)取最大值,所以两者一般需要调优*/
int size = Math.max(this.batchSize, AbstractRecords.estimateSizeInBytesUpperBound(maxUsableMagic, compression, key, value, headers));
log.trace("Allocating a new {} byte message buffer for topic {} partition {}", size, tp.topic(), tp.partition());
/*分配内存:取16k和消息大小的最大值*/
buffer = free.allocate(size, maxTimeToBlock);
synchronized (dq) {
// Need to check if producer is closed again after grabbing the dequeue lock.
if (closed)
throw new IllegalStateException("Cannot send after the producer is closed.");
RecordAppendResult appendResult = tryAppend(timestamp, key, value, headers, callback, dq);
if (appendResult != null) {
// Somebody else found us a batch, return the one we waited for! Hopefully this doesn't happen often...
return appendResult;
}
/*下面三层封装进行了一些属性的分层解析,没有一次定义多次属性,解耦了*/
/*将分配的内存空间buffer封装为MemoryRecordsBuilder,用于写入record记录*/
MemoryRecordsBuilder recordsBuilder = recordsBuilder(buffer, maxUsableMagic);
/*初始化ProducerBatch,初始化内存空间大小=。时对MemoryRecordsBuilder封装*/
ProducerBatch batch = new ProducerBatch(tp, recordsBuilder, time.milliseconds());
/*batch.tryAppend:消息如何按照二进制的规范写入MemoryRecordsBuilder,再写到ProducerBatch*/
FutureRecordMetadata future = Utils.notNull(batch.tryAppend(timestamp, key, value, headers, callback, time.milliseconds()));
/*封装后的batch,写入队列*/
dq.addLast(batch);
incomplete.add(batch);
// Don't deallocate this buffer in the finally block as it's being used in the record batch
buffer = null;
//更新是否批的状态,检查是否需要发送
return new RecordAppendResult(future, dq.size() > 1 || batch.isFull(), true);
}
} finally {
if (buffer != null)
free.deallocate(buffer);
appendsInProgress.decrementAndGet();
}
}
private RecordAppendResult tryAppend(long timestamp, byte[] key, byte[] value, Header[] headers,
Callback callback, Deque<ProducerBatch> deque) {
/*取出未发送批次中的最新批次的数据*/
ProducerBatch last = deque.peekLast();
if (last != null) {
FutureRecordMetadata future = last.tryAppend(timestamp, key, value, headers, callback, time.milliseconds());
if (future == null)
last.closeForRecordAppends();
else
return new RecordAppendResult(future, deque.size() > 1 || last.isFull(), false);
}
return null;
}
判断batch是否需要被发送的算法实现,获取已经可以发送分区数据对应的leader所在的node节点。
public ReadyCheckResult ready(Cluster cluster, long nowMs) {
Set<Node> readyNodes = new HashSet<>();
long nextReadyCheckDelayMs = Long.MAX_VALUE;
Set<String> unknownLeaderTopics = new HashSet<>();
boolean exhausted = this.free.queued() > 0;
/*遍历所有的batch,获取可以发送batch对应的partition leader*/
for (Map.Entry<TopicPartition, Deque<ProducerBatch>> entry : this.batches.entrySet()) {
TopicPartition part = entry.getKey();
Deque<ProducerBatch> deque = entry.getValue();
/*找出分区所在的leader*/
Node leader = cluster.leaderFor(part);
/*并发性设计:取数据同样加锁,和写入数据一样,保证线程安全性*/
synchronized (deque) {
if (leader == null && !deque.isEmpty()) {
// This is a partition for which leader is not known, but messages are available to send.
// Note that entries are currently not removed from batches when deque is empty.
unknownLeaderTopics.add(part.topic());
} else if (!readyNodes.contains(leader) && !muted.contains(part)) {
/*每轮算法只取每个分区的第一个batch进行发送,而不是一个分区的所有batch都判断*/
ProducerBatch batch = deque.peekFirst();
if (batch != null) {
long waitedTimeMs = batch.waitedTimeMs(nowMs);
//判断是否进行过失败重试,第一次肯定是0 ,默认值,进行过重试后判断时间间隔
boolean backingOff = batch.attempts() > 0 && waitedTimeMs < retryBackoffMs;
/*判断retryBackoffMs超时时间和lingerMs batch允许最长创建时间,两个必须都满足。
* lingerMs默认是0,也就是说来了就发送,不会等待batch是否写满
* */
long timeToWaitMs = backingOff ? retryBackoffMs : lingerMs;
boolean full = deque.size() > 1 || batch.isFull();
/*通过等待时间进行判断,batch是否过期,默认是0时肯定是超时的,expired=true*/
boolean expired = waitedTimeMs >= timeToWaitMs;
/*判断batch是否需要被发送,满了或者过期或者客户端被关闭或强制flush时*/
boolean sendable = full || expired || exhausted || closed || flushInProgress();
if (sendable && !backingOff) {
readyNodes.add(leader);
} else {//如果没有符合发送的batch
/*lingerMs-已经等待时间=该批次下次被发送的最长时间*/
long timeLeftMs = Math.max(timeToWaitMs - waitedTimeMs, 0);
// Note that this results in a conservative estimate since an un-sendable partition may have
// a leader that will later be found to have sendable data. However, this is good enough
// since we'll just wake up and then sleep again for the remaining time.
/*取所有分区最小的timeLeftMs更新为下次检查dequeue时间*/
nextReadyCheckDelayMs = Math.min(timeLeftMs, nextReadyCheckDelayMs);
}
}
}
}
}
return new ReadyCheckResult(readyNodes, nextReadyCheckDelayMs, unknownLeaderTopics);
}
创建或者得到分区对应的Deque队列
private Deque<ProducerBatch> getOrCreateDeque(TopicPartition tp) {
Deque<ProducerBatch> d = this.batches.get(tp);
if (d != null)
return d;
d = new ArrayDeque<>();
/*putIfAbsent线程安全的,保证一个分区只有一个队列*/
Deque<ProducerBatch> previous = this.batches.putIfAbsent(tp, d);
if (previous == null)
return d;
else
return previous;
}