创建或获得PerField。
PerFild们使用同一个缓存。 用各自的 postingsArray 进行区分自己的数据。
相当于同一个物体,每个人戴不同的眼镜, 就可以获得自己对这个物体的理解
private PerField getOrAddField(String name, IndexableFieldType fieldType, boolean invert) {
// Make sure we have a PerField allocated
final int hashPos = name.hashCode() & hashMask;
PerField fp = fieldHash[hashPos];
while (fp != null && !fp.fieldInfo.name.equals(name)) {
fp = fp.next;
}
if (fp == null) {
// First time we are seeing this field in this segment
FieldInfo fi = fieldInfos.getOrAdd(name);
initIndexOptions(fi, fieldType.indexOptions());
Map<String, String> attributes = fieldType.getAttributes();
if (attributes != null) {
attributes.forEach((k, v) -> fi.putAttribute(k, v));
}
fp = new PerField(docWriter.getIndexCreatedVersionMajor(), fi, invert);
fp.next = fieldHash[hashPos];
fieldHash[hashPos] = fp;
totalFieldCount++;
// At most 50% load factor:
if (totalFieldCount >= fieldHash.length/2) {
rehash();
}
if (totalFieldCount > fields.length) {
PerField[] newFields = new PerField[ArrayUtil.oversize(totalFieldCount, RamUsageEstimator.NUM_BYTES_OBJECT_REF)];
System.arraycopy(fields, 0, newFields, 0, fields.length);
fields = newFields;
}
} else if (invert && fp.invertState == null) {
initIndexOptions(fp.fieldInfo, fieldType.indexOptions());
fp.setInvertState();
}
return fp;
}
public PerField(int indexCreatedVersionMajor, FieldInfo fieldInfo, boolean invert) {
this.indexCreatedVersionMajor = indexCreatedVersionMajor;
this.fieldInfo = fieldInfo;
similarity = docState.similarity;
if (invert) {
setInvertState();
}
}
void setInvertState() {
invertState = new FieldInvertState(indexCreatedVersionMajor, fieldInfo.name, fieldInfo.getIndexOptions());
termsHashPerField = termsHash.addField(invertState, fieldInfo);
if (fieldInfo.omitsNorms() == false) {
assert norms == null;
// Even if no documents actually succeed in setting a norm, we still write norms for this segment:
norms = new NormValuesWriter(fieldInfo, docState.docWriter.bytesUsed);
}
}
@Override
public TermsHashPerField addField(FieldInvertState invertState, FieldInfo fieldInfo) {
return new FreqProxTermsWriterPerField(invertState, this, fieldInfo, nextTermsHash.addField(invertState, fieldInfo));
}
public TermsHashPerField(int streamCount, FieldInvertState fieldState, TermsHash termsHash, TermsHashPerField nextPerField, FieldInfo fieldInfo) {
intPool = termsHash.intPool;
bytePool = termsHash.bytePool;
termBytePool = termsHash.termBytePool;
docState = termsHash.docState;
this.termsHash = termsHash;
bytesUsed = termsHash.bytesUsed;
this.fieldState = fieldState;
this.streamCount = streamCount;
numPostingInt = 2*streamCount;
this.fieldInfo = fieldInfo;
this.nextPerField = nextPerField;
PostingsBytesStartArray byteStarts = new PostingsBytesStartArray(this, bytesUsed);
bytesHash = new BytesRefHash(termBytePool, HASH_INIT_SIZE, byteStarts);
}
void add() throws IOException {
// We are first in the chain so we must "intern" the
// term text into textStart address
// Get the text & hash of this term.
int termID = bytesHash.add(termAtt.getBytesRef());
//System.out.println("add term=" + termBytesRef.utf8ToString() + " doc=" + docState.docID + " termID=" + termID);
if (termID >= 0) {// New posting
bytesHash.byteStart(termID);
// Init stream slices
if (numPostingInt + intPool.intUpto > IntBlockPool.INT_BLOCK_SIZE) {
intPool.nextBuffer();
}
if (ByteBlockPool.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE) {
bytePool.nextBuffer();
}
intUptos = intPool.buffer;
intUptoStart = intPool.intUpto;
intPool.intUpto += streamCount;
postingsArray.intStarts[termID] = intUptoStart + intPool.intOffset;
for(int i=0;i<streamCount;i++) {
final int upto = bytePool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
intUptos[intUptoStart+i] = upto + bytePool.byteOffset;
}
postingsArray.byteStarts[termID] = intUptos[intUptoStart];
newTerm(termID);
} else {
termID = (-termID)-1;
int intStart = postingsArray.intStarts[termID];
intUptos = intPool.buffers[intStart >> IntBlockPool.INT_BLOCK_SHIFT];
intUptoStart = intStart & IntBlockPool.INT_BLOCK_MASK;
addTerm(termID);
}
// Termvector 的PerField也要执行add操作
if (doNextCall) {
nextPerField.add(postingsArray.textStarts[termID]);
}
}
public DefaultIndexingChain(DocumentsWriterPerThread docWriter) throws IOException {
this.docWriter = docWriter;
this.fieldInfos = docWriter.getFieldInfosBuilder();
this.docState = docWriter.docState;
this.bytesUsed = docWriter.bytesUsed;
final TermsHash termVectorsWriter;
if (docWriter.getSegmentInfo().getIndexSort() == null) {
storedFieldsConsumer = new StoredFieldsConsumer(docWriter);
termVectorsWriter = new TermVectorsConsumer(docWriter);
} else {
storedFieldsConsumer = new SortingStoredFieldsConsumer(docWriter);
termVectorsWriter = new SortingTermVectorsConsumer(docWriter);
}
// termVectorsWriter作为nextTermsHash
termsHash = new FreqProxTermsWriter(docWriter, termVectorsWriter);
}
@Override
public TermsHashPerField addField(FieldInvertState invertState, FieldInfo fieldInfo) {
return new FreqProxTermsWriterPerField(invertState, this, fieldInfo, nextTermsHash.addField(invertState, fieldInfo));
}
引用自参考文档:
综上所述,lucene对缓存的实现可谓煞费苦心,之所以做的这么复杂我觉得有以下几点考虑:
- 节约内存。比如用那个三个指针记录两个缓存块的偏移、Slice长度的分配策略、差值编码等。
- 方便对内存资源进行控制。几乎所有数据都是通过BlockPool来管理的,这样的好处是内存使用情况非常容易统计,FlushPolicy很容易获取当前的内存使用情况,从而触发刷新逻辑。
- 缓存不必针对每个Field,也就是说同一个Segment所有Field的数据可以放在一块缓存中,每个Field有自己的PostingList(应该是postingsArray),所有Field的Term字面量共享一个缓存以及上层的Hash,这样便能很大程度上节约存储空间。对应一个具体的Field,判断Term是否存在首先判断在Term缓存块中是否存在,接着判断PostingList中是否有入口。
参考