Search in sources :

Example 1 with SerializedDocument

use of org.apache.lucene.codecs.compressing.CompressingStoredFieldsReader.SerializedDocument in project lucene-solr by apache.

the class CompressingStoredFieldsWriter method merge.

@Override
public int merge(MergeState mergeState) throws IOException {
    int docCount = 0;
    int numReaders = mergeState.maxDocs.length;
    MatchingReaders matching = new MatchingReaders(mergeState);
    if (mergeState.needsIndexSort) {
        /**
       * If all readers are compressed and they have the same fieldinfos then we can merge the serialized document
       * directly.
       */
        List<CompressingStoredFieldsMergeSub> subs = new ArrayList<>();
        for (int i = 0; i < mergeState.storedFieldsReaders.length; i++) {
            if (matching.matchingReaders[i] && mergeState.storedFieldsReaders[i] instanceof CompressingStoredFieldsReader) {
                CompressingStoredFieldsReader storedFieldsReader = (CompressingStoredFieldsReader) mergeState.storedFieldsReaders[i];
                storedFieldsReader.checkIntegrity();
                subs.add(new CompressingStoredFieldsMergeSub(storedFieldsReader, mergeState.docMaps[i], mergeState.maxDocs[i]));
            } else {
                return super.merge(mergeState);
            }
        }
        final DocIDMerger<CompressingStoredFieldsMergeSub> docIDMerger = DocIDMerger.of(subs, true);
        while (true) {
            CompressingStoredFieldsMergeSub sub = docIDMerger.next();
            if (sub == null) {
                break;
            }
            assert sub.mappedDocID == docCount;
            SerializedDocument doc = sub.reader.document(sub.docID);
            startDocument();
            bufferedDocs.copyBytes(doc.in, doc.length);
            numStoredFieldsInDoc = doc.numStoredFields;
            finishDocument();
            ++docCount;
        }
        finish(mergeState.mergeFieldInfos, docCount);
        return docCount;
    }
    for (int readerIndex = 0; readerIndex < numReaders; readerIndex++) {
        MergeVisitor visitor = new MergeVisitor(mergeState, readerIndex);
        CompressingStoredFieldsReader matchingFieldsReader = null;
        if (matching.matchingReaders[readerIndex]) {
            final StoredFieldsReader fieldsReader = mergeState.storedFieldsReaders[readerIndex];
            // we can only bulk-copy if the matching reader is also a CompressingStoredFieldsReader
            if (fieldsReader != null && fieldsReader instanceof CompressingStoredFieldsReader) {
                matchingFieldsReader = (CompressingStoredFieldsReader) fieldsReader;
            }
        }
        final int maxDoc = mergeState.maxDocs[readerIndex];
        final Bits liveDocs = mergeState.liveDocs[readerIndex];
        // if its some other format, or an older version of this format, or safety switch:
        if (matchingFieldsReader == null || matchingFieldsReader.getVersion() != VERSION_CURRENT || BULK_MERGE_ENABLED == false) {
            // naive merge...
            StoredFieldsReader storedFieldsReader = mergeState.storedFieldsReaders[readerIndex];
            if (storedFieldsReader != null) {
                storedFieldsReader.checkIntegrity();
            }
            for (int docID = 0; docID < maxDoc; docID++) {
                if (liveDocs != null && liveDocs.get(docID) == false) {
                    continue;
                }
                startDocument();
                storedFieldsReader.visitDocument(docID, visitor);
                finishDocument();
                ++docCount;
            }
        } else if (matchingFieldsReader.getCompressionMode() == compressionMode && matchingFieldsReader.getChunkSize() == chunkSize && matchingFieldsReader.getPackedIntsVersion() == PackedInts.VERSION_CURRENT && liveDocs == null && !tooDirty(matchingFieldsReader)) {
            // if the format is older, its always handled by the naive merge case above
            assert matchingFieldsReader.getVersion() == VERSION_CURRENT;
            matchingFieldsReader.checkIntegrity();
            // flush any pending chunks
            if (numBufferedDocs > 0) {
                flush();
                // incomplete: we had to force this flush
                numDirtyChunks++;
            }
            // iterate over each chunk. we use the stored fields index to find chunk boundaries,
            // read the docstart + doccount from the chunk header (we write a new header, since doc numbers will change),
            // and just copy the bytes directly.
            IndexInput rawDocs = matchingFieldsReader.getFieldsStream();
            CompressingStoredFieldsIndexReader index = matchingFieldsReader.getIndexReader();
            rawDocs.seek(index.getStartPointer(0));
            int docID = 0;
            while (docID < maxDoc) {
                // read header
                int base = rawDocs.readVInt();
                if (base != docID) {
                    throw new CorruptIndexException("invalid state: base=" + base + ", docID=" + docID, rawDocs);
                }
                int code = rawDocs.readVInt();
                // write a new index entry and new header for this chunk.
                int bufferedDocs = code >>> 1;
                indexWriter.writeIndex(bufferedDocs, fieldsStream.getFilePointer());
                // rebase
                fieldsStream.writeVInt(docBase);
                fieldsStream.writeVInt(code);
                docID += bufferedDocs;
                docBase += bufferedDocs;
                docCount += bufferedDocs;
                if (docID > maxDoc) {
                    throw new CorruptIndexException("invalid state: base=" + base + ", count=" + bufferedDocs + ", maxDoc=" + maxDoc, rawDocs);
                }
                // copy bytes until the next chunk boundary (or end of chunk data).
                // using the stored fields index for this isn't the most efficient, but fast enough
                // and is a source of redundancy for detecting bad things.
                final long end;
                if (docID == maxDoc) {
                    end = matchingFieldsReader.getMaxPointer();
                } else {
                    end = index.getStartPointer(docID);
                }
                fieldsStream.copyBytes(rawDocs, end - rawDocs.getFilePointer());
            }
            if (rawDocs.getFilePointer() != matchingFieldsReader.getMaxPointer()) {
                throw new CorruptIndexException("invalid state: pos=" + rawDocs.getFilePointer() + ", max=" + matchingFieldsReader.getMaxPointer(), rawDocs);
            }
            // since we bulk merged all chunks, we inherit any dirty ones from this segment.
            numChunks += matchingFieldsReader.getNumChunks();
            numDirtyChunks += matchingFieldsReader.getNumDirtyChunks();
        } else {
            // if the format is older, its always handled by the naive merge case above
            assert matchingFieldsReader.getVersion() == VERSION_CURRENT;
            matchingFieldsReader.checkIntegrity();
            for (int docID = 0; docID < maxDoc; docID++) {
                if (liveDocs != null && liveDocs.get(docID) == false) {
                    continue;
                }
                SerializedDocument doc = matchingFieldsReader.document(docID);
                startDocument();
                bufferedDocs.copyBytes(doc.in, doc.length);
                numStoredFieldsInDoc = doc.numStoredFields;
                finishDocument();
                ++docCount;
            }
        }
    }
    finish(mergeState.mergeFieldInfos, docCount);
    return docCount;
}
Also used : SerializedDocument(org.apache.lucene.codecs.compressing.CompressingStoredFieldsReader.SerializedDocument) ArrayList(java.util.ArrayList) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) StoredFieldsReader(org.apache.lucene.codecs.StoredFieldsReader) Bits(org.apache.lucene.util.Bits) IndexInput(org.apache.lucene.store.IndexInput)

Aggregations

ArrayList (java.util.ArrayList)1 StoredFieldsReader (org.apache.lucene.codecs.StoredFieldsReader)1 SerializedDocument (org.apache.lucene.codecs.compressing.CompressingStoredFieldsReader.SerializedDocument)1 CorruptIndexException (org.apache.lucene.index.CorruptIndexException)1 IndexInput (org.apache.lucene.store.IndexInput)1 Bits (org.apache.lucene.util.Bits)1