private boolean doFlush(boolean applyAllDeletes) throws IOException {
if (tragedy.get() != null) {
throw new IllegalStateException("this writer hit an unrecoverable error; cannot flush", tragedy.get());
}
doBeforeFlush();
testPoint("startDoFlush");
boolean success = false;
try {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", " start flush: applyAllDeletes=" + applyAllDeletes);
infoStream.message("IW", " index before flush " + segString());
}
boolean anyChanges = false;
synchronized (fullFlushLock) {
boolean flushSuccess = false;
try {
//将各个dwpt flush
long seqNo = docWriter.flushAllThreads();
if (seqNo < 0) {
seqNo = -seqNo;
anyChanges = true;
} else {
anyChanges = false;
}
if (!anyChanges) {
// flushCount is incremented in flushAllThreads
flushCount.incrementAndGet();
}
//将更新的信息写入indexWrit中
publishFlushedSegments(true);
flushSuccess = true;
} finally {
assert holdsFullFlushLock();
//跟新flushControl中的信息。 将delete信息加入到ticketQueue中
docWriter.finishFullFlush(flushSuccess);
// 处理eventQueue中的信息
processEvents(false);
}
}
if (applyAllDeletes) {
// 等待所有删除信息都执行完
applyAllDeletesAndUpdates();
}
anyChanges |= maybeMerge.getAndSet(false);
synchronized(this) {
//Ensures that all changes in the reader-pool are written to disk.
// 确保reader-pool中的所有变更信息都写入磁盘了
writeReaderPool(applyAllDeletes);
doAfterFlush();
success = true;
return anyChanges;
}
} catch (VirtualMachineError tragedy) {
tragicEvent(tragedy, "doFlush");
throw tragedy;
} finally {
if (!success) {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "hit exception during flush");
}
maybeCloseOnTragicEvent();
}
}
}
org.apache.lucene.index.FrozenBufferedUpdates#apply
/**
* Applies pending delete-by-term, delete-by-query and doc values updates to all segments in the
* index, returning the number of new deleted or updated documents.
*/
long apply(BufferedUpdatesStream.SegmentState[] segStates) throws IOException {
assert applyLock.isHeldByCurrentThread();
if (delGen == -1) {
// we were not yet pushed
throw new IllegalArgumentException(
"gen is not yet set; call BufferedUpdatesStream.push first");
}
assert applied.getCount() != 0;
if (privateSegment != null) {
assert segStates.length == 1;
assert privateSegment == segStates[0].reader.getOriginalSegmentInfo();
}
totalDelCount += applyTermDeletes(segStates);
totalDelCount += applyQueryDeletes(segStates);
totalDelCount += applyDocValuesUpdates(segStates);
return totalDelCount;
}
org.apache.lucene.index.ReadersAndUpdates
/** Opens SegmentReader and inits SegmentState for each segment. */
private BufferedUpdatesStream.SegmentState[] openSegmentStates(
List<SegmentCommitInfo> infos, Set<SegmentCommitInfo> alreadySeenSegments, long delGen)
throws IOException {
List<BufferedUpdatesStream.SegmentState> segStates = new ArrayList<>();
try {
for (SegmentCommitInfo info : infos) {
if (info.getBufferedDeletesGen() <= delGen && alreadySeenSegments.contains(info) == false) {
segStates.add(
new BufferedUpdatesStream.SegmentState(
getPooledInstance(info, true), this::release, info));
alreadySeenSegments.add(info);
}
}
} catch (Throwable t) {
try {
IOUtils.close(segStates);
} catch (Throwable t1) {
t.addSuppressed(t1);
}
throw t;
}
return segStates.toArray(new BufferedUpdatesStream.SegmentState[0]);
}