Search in sources :

Example 1 with TrackingDirectoryWrapper

use of org.apache.lucene.store.TrackingDirectoryWrapper in project lucene-solr by apache.

the class DocumentsWriterPerThread method sealFlushedSegment.

/**
   * Seals the {@link SegmentInfo} for the new flushed segment and persists
   * the deleted documents {@link MutableBits}.
   */
void sealFlushedSegment(FlushedSegment flushedSegment, Sorter.DocMap sortMap) throws IOException {
    assert flushedSegment != null;
    SegmentCommitInfo newSegment = flushedSegment.segmentInfo;
    IndexWriter.setDiagnostics(newSegment.info, IndexWriter.SOURCE_FLUSH);
    IOContext context = new IOContext(new FlushInfo(newSegment.info.maxDoc(), newSegment.sizeInBytes()));
    boolean success = false;
    try {
        if (indexWriterConfig.getUseCompoundFile()) {
            Set<String> originalFiles = newSegment.info.files();
            // TODO: like addIndexes, we are relying on createCompoundFile to successfully cleanup...
            indexWriter.createCompoundFile(infoStream, new TrackingDirectoryWrapper(directory), newSegment.info, context);
            filesToDelete.addAll(originalFiles);
            newSegment.info.setUseCompoundFile(true);
        }
        // Have codec write SegmentInfo.  Must do this after
        // creating CFS so that 1) .si isn't slurped into CFS,
        // and 2) .si reflects useCompoundFile=true change
        // above:
        codec.segmentInfoFormat().write(directory, newSegment.info, context);
        // slurp the del file into CFS:
        if (flushedSegment.liveDocs != null) {
            final int delCount = flushedSegment.delCount;
            assert delCount > 0;
            if (infoStream.isEnabled("DWPT")) {
                infoStream.message("DWPT", "flush: write " + delCount + " deletes gen=" + flushedSegment.segmentInfo.getDelGen());
            }
            // TODO: we should prune the segment if it's 100%
            // deleted... but merge will also catch it.
            // TODO: in the NRT case it'd be better to hand
            // this del vector over to the
            // shortly-to-be-opened SegmentReader and let it
            // carry the changes; there's no reason to use
            // filesystem as intermediary here.
            SegmentCommitInfo info = flushedSegment.segmentInfo;
            Codec codec = info.info.getCodec();
            final MutableBits bits;
            if (sortMap == null) {
                bits = flushedSegment.liveDocs;
            } else {
                bits = sortLiveDocs(flushedSegment.liveDocs, sortMap);
            }
            codec.liveDocsFormat().writeLiveDocs(bits, directory, info, delCount, context);
            newSegment.setDelCount(delCount);
            newSegment.advanceDelGen();
        }
        success = true;
    } finally {
        if (!success) {
            if (infoStream.isEnabled("DWPT")) {
                infoStream.message("DWPT", "hit exception creating compound file for newly flushed segment " + newSegment.info.name);
            }
        }
    }
}
Also used : Codec(org.apache.lucene.codecs.Codec) MutableBits(org.apache.lucene.util.MutableBits) IOContext(org.apache.lucene.store.IOContext) FlushInfo(org.apache.lucene.store.FlushInfo) TrackingDirectoryWrapper(org.apache.lucene.store.TrackingDirectoryWrapper)

Example 2 with TrackingDirectoryWrapper

use of org.apache.lucene.store.TrackingDirectoryWrapper in project lucene-solr by apache.

the class TestDoc method merge.

private SegmentCommitInfo merge(Directory dir, SegmentCommitInfo si1, SegmentCommitInfo si2, String merged, boolean useCompoundFile) throws Exception {
    IOContext context = newIOContext(random(), new IOContext(new MergeInfo(-1, -1, false, -1)));
    SegmentReader r1 = new SegmentReader(si1, Version.LATEST.major, context);
    SegmentReader r2 = new SegmentReader(si2, Version.LATEST.major, context);
    final Codec codec = Codec.getDefault();
    TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(si1.info.dir);
    final SegmentInfo si = new SegmentInfo(si1.info.dir, Version.LATEST, null, merged, -1, false, codec, Collections.emptyMap(), StringHelper.randomId(), new HashMap<>(), null);
    SegmentMerger merger = new SegmentMerger(Arrays.<CodecReader>asList(r1, r2), si, InfoStream.getDefault(), trackingDir, new FieldInfos.FieldNumbers(), context);
    MergeState mergeState = merger.merge();
    r1.close();
    r2.close();
    ;
    si.setFiles(new HashSet<>(trackingDir.getCreatedFiles()));
    if (useCompoundFile) {
        Collection<String> filesToDelete = si.files();
        codec.compoundFormat().write(dir, si, context);
        si.setUseCompoundFile(true);
        for (String name : filesToDelete) {
            si1.info.dir.deleteFile(name);
        }
    }
    return new SegmentCommitInfo(si, 0, -1L, -1L, -1L);
}
Also used : MergeInfo(org.apache.lucene.store.MergeInfo) TrackingDirectoryWrapper(org.apache.lucene.store.TrackingDirectoryWrapper) Codec(org.apache.lucene.codecs.Codec) IOContext(org.apache.lucene.store.IOContext)

Example 3 with TrackingDirectoryWrapper

use of org.apache.lucene.store.TrackingDirectoryWrapper in project lucene-solr by apache.

the class IndexWriter method mergeMiddle.

/** Does the actual (time-consuming) work of the merge,
   *  but without holding synchronized lock on IndexWriter
   *  instance */
private int mergeMiddle(MergePolicy.OneMerge merge, MergePolicy mergePolicy) throws IOException {
    merge.checkAborted();
    Directory mergeDirectory = config.getMergeScheduler().wrapForMerge(merge, directory);
    List<SegmentCommitInfo> sourceSegments = merge.segments;
    IOContext context = new IOContext(merge.getStoreMergeInfo());
    final TrackingDirectoryWrapper dirWrapper = new TrackingDirectoryWrapper(mergeDirectory);
    if (infoStream.isEnabled("IW")) {
        infoStream.message("IW", "merging " + segString(merge.segments));
    }
    merge.readers = new ArrayList<>(sourceSegments.size());
    // This is try/finally to make sure merger's readers are
    // closed:
    boolean success = false;
    try {
        int segUpto = 0;
        while (segUpto < sourceSegments.size()) {
            final SegmentCommitInfo info = sourceSegments.get(segUpto);
            // Hold onto the "live" reader; we will use this to
            // commit merged deletes
            final ReadersAndUpdates rld = readerPool.get(info, true);
            // Carefully pull the most recent live docs and reader
            SegmentReader reader;
            final Bits liveDocs;
            final int delCount;
            synchronized (this) {
                // Must sync to ensure BufferedDeletesStream cannot change liveDocs,
                // pendingDeleteCount and field updates while we pull a copy:
                reader = rld.getReaderForMerge(context);
                liveDocs = rld.getReadOnlyLiveDocs();
                delCount = rld.getPendingDeleteCount() + info.getDelCount();
                assert reader != null;
                assert rld.verifyDocCounts();
                if (infoStream.isEnabled("IW")) {
                    if (rld.getPendingDeleteCount() != 0) {
                        infoStream.message("IW", "seg=" + segString(info) + " delCount=" + info.getDelCount() + " pendingDelCount=" + rld.getPendingDeleteCount());
                    } else if (info.getDelCount() != 0) {
                        infoStream.message("IW", "seg=" + segString(info) + " delCount=" + info.getDelCount());
                    } else {
                        infoStream.message("IW", "seg=" + segString(info) + " no deletes");
                    }
                }
            }
            // make a new reader with updated live docs and del count.
            if (reader.numDeletedDocs() != delCount) {
                // beware of zombies
                assert delCount > reader.numDeletedDocs();
                SegmentReader newReader;
                synchronized (this) {
                    // We must also sync on IW here, because another thread could be writing
                    // new DV updates / remove old gen field infos files causing FNFE:
                    newReader = new SegmentReader(info, reader, liveDocs, info.info.maxDoc() - delCount);
                }
                boolean released = false;
                try {
                    rld.release(reader);
                    released = true;
                } finally {
                    if (!released) {
                        newReader.decRef();
                    }
                }
                reader = newReader;
            }
            merge.readers.add(reader);
            assert delCount <= info.info.maxDoc() : "delCount=" + delCount + " info.maxDoc=" + info.info.maxDoc() + " rld.pendingDeleteCount=" + rld.getPendingDeleteCount() + " info.getDelCount()=" + info.getDelCount();
            segUpto++;
        }
        //      System.out.println("[" + Thread.currentThread().getName() + "] IW.mergeMiddle: merging " + merge.getMergeReaders());
        // Let the merge wrap readers
        List<CodecReader> mergeReaders = new ArrayList<>();
        for (SegmentReader reader : merge.readers) {
            CodecReader wrappedReader = merge.wrapForMerge(reader);
            validateMergeReader(wrappedReader);
            mergeReaders.add(wrappedReader);
        }
        final SegmentMerger merger = new SegmentMerger(mergeReaders, merge.info.info, infoStream, dirWrapper, globalFieldNumberMap, context);
        merge.checkAborted();
        merge.mergeStartNS = System.nanoTime();
        // This is where all the work happens:
        if (merger.shouldMerge()) {
            merger.merge();
        }
        MergeState mergeState = merger.mergeState;
        assert mergeState.segmentInfo == merge.info.info;
        merge.info.info.setFiles(new HashSet<>(dirWrapper.getCreatedFiles()));
        if (infoStream.isEnabled("IW")) {
            if (merger.shouldMerge()) {
                String pauseInfo = merge.getMergeProgress().getPauseTimes().entrySet().stream().filter((e) -> e.getValue() > 0).map((e) -> String.format(Locale.ROOT, "%.1f sec %s", e.getValue() / 1000000000., e.getKey().name().toLowerCase(Locale.ROOT))).collect(Collectors.joining(", "));
                if (!pauseInfo.isEmpty()) {
                    pauseInfo = " (" + pauseInfo + ")";
                }
                long t1 = System.nanoTime();
                double sec = (t1 - merge.mergeStartNS) / 1000000000.;
                double segmentMB = (merge.info.sizeInBytes() / 1024. / 1024.);
                infoStream.message("IW", "merge codec=" + codec + " maxDoc=" + merge.info.info.maxDoc() + "; merged segment has " + (mergeState.mergeFieldInfos.hasVectors() ? "vectors" : "no vectors") + "; " + (mergeState.mergeFieldInfos.hasNorms() ? "norms" : "no norms") + "; " + (mergeState.mergeFieldInfos.hasDocValues() ? "docValues" : "no docValues") + "; " + (mergeState.mergeFieldInfos.hasProx() ? "prox" : "no prox") + "; " + (mergeState.mergeFieldInfos.hasProx() ? "freqs" : "no freqs") + "; " + (mergeState.mergeFieldInfos.hasPointValues() ? "points" : "no points") + "; " + String.format(Locale.ROOT, "%.1f sec%s to merge segment [%.2f MB, %.2f MB/sec]", sec, pauseInfo, segmentMB, segmentMB / sec));
            } else {
                infoStream.message("IW", "skip merging fully deleted segments");
            }
        }
        if (merger.shouldMerge() == false) {
            // Merge would produce a 0-doc segment, so we do nothing except commit the merge to remove all the 0-doc segments that we "merged":
            assert merge.info.info.maxDoc() == 0;
            commitMerge(merge, mergeState);
            return 0;
        }
        assert merge.info.info.maxDoc() > 0;
        // Very important to do this before opening the reader
        // because codec must know if prox was written for
        // this segment:
        //System.out.println("merger set hasProx=" + merger.hasProx() + " seg=" + merge.info.name);
        boolean useCompoundFile;
        synchronized (this) {
            // Guard segmentInfos
            useCompoundFile = mergePolicy.useCompoundFile(segmentInfos, merge.info, this);
        }
        if (useCompoundFile) {
            success = false;
            Collection<String> filesToRemove = merge.info.files();
            TrackingDirectoryWrapper trackingCFSDir = new TrackingDirectoryWrapper(mergeDirectory);
            try {
                createCompoundFile(infoStream, trackingCFSDir, merge.info.info, context);
                success = true;
            } catch (Throwable t) {
                synchronized (this) {
                    if (merge.isAborted()) {
                        // merged files:
                        if (infoStream.isEnabled("IW")) {
                            infoStream.message("IW", "hit merge abort exception creating compound file during merge");
                        }
                        return 0;
                    } else {
                        handleMergeException(t, merge);
                    }
                }
            } finally {
                if (success == false) {
                    if (infoStream.isEnabled("IW")) {
                        infoStream.message("IW", "hit exception creating compound file during merge");
                    }
                    // Safe: these files must exist
                    deleteNewFiles(merge.info.files());
                }
            }
            // So that, if we hit exc in deleteNewFiles (next)
            // or in commitMerge (later), we close the
            // per-segment readers in the finally clause below:
            success = false;
            synchronized (this) {
                // delete new non cfs files directly: they were never
                // registered with IFD
                deleteNewFiles(filesToRemove);
                if (merge.isAborted()) {
                    if (infoStream.isEnabled("IW")) {
                        infoStream.message("IW", "abort merge after building CFS");
                    }
                    // Safe: these files must exist
                    deleteNewFiles(merge.info.files());
                    return 0;
                }
            }
            merge.info.info.setUseCompoundFile(true);
        } else {
            // So that, if we hit exc in commitMerge (later),
            // we close the per-segment readers in the finally
            // clause below:
            success = false;
        }
        // Have codec write SegmentInfo.  Must do this after
        // creating CFS so that 1) .si isn't slurped into CFS,
        // and 2) .si reflects useCompoundFile=true change
        // above:
        boolean success2 = false;
        try {
            codec.segmentInfoFormat().write(directory, merge.info.info, context);
            success2 = true;
        } finally {
            if (!success2) {
                // Safe: these files must exist
                deleteNewFiles(merge.info.files());
            }
        }
        if (infoStream.isEnabled("IW")) {
            infoStream.message("IW", String.format(Locale.ROOT, "merged segment size=%.3f MB vs estimate=%.3f MB", merge.info.sizeInBytes() / 1024. / 1024., merge.estimatedMergeBytes / 1024 / 1024.));
        }
        final IndexReaderWarmer mergedSegmentWarmer = config.getMergedSegmentWarmer();
        if (poolReaders && mergedSegmentWarmer != null) {
            final ReadersAndUpdates rld = readerPool.get(merge.info, true);
            final SegmentReader sr = rld.getReader(IOContext.READ);
            try {
                mergedSegmentWarmer.warm(sr);
            } finally {
                synchronized (this) {
                    rld.release(sr);
                    readerPool.release(rld);
                }
            }
        }
        if (!commitMerge(merge, mergeState)) {
            // aborted
            return 0;
        }
        success = true;
    } finally {
        // an exc:
        if (success == false) {
            closeMergeReaders(merge, true);
        }
    }
    return merge.info.info.maxDoc();
}
Also used : Query(org.apache.lucene.search.Query) Arrays(java.util.Arrays) MMapDirectory(org.apache.lucene.store.MMapDirectory) Date(java.util.Date) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) OpenMode(org.apache.lucene.index.IndexWriterConfig.OpenMode) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Locale(java.util.Locale) Map(java.util.Map) Directory(org.apache.lucene.store.Directory) LockValidatingDirectoryWrapper(org.apache.lucene.store.LockValidatingDirectoryWrapper) IOContext(org.apache.lucene.store.IOContext) Sort(org.apache.lucene.search.Sort) BytesRef(org.apache.lucene.util.BytesRef) Collection(java.util.Collection) Set(java.util.Set) FieldInfosFormat(org.apache.lucene.codecs.FieldInfosFormat) MatchAllDocsQuery(org.apache.lucene.search.MatchAllDocsQuery) Collectors(java.util.stream.Collectors) List(java.util.List) InfoStream(org.apache.lucene.util.InfoStream) Entry(java.util.Map.Entry) Accountable(org.apache.lucene.util.Accountable) Queue(java.util.Queue) ThreadInterruptedException(org.apache.lucene.util.ThreadInterruptedException) FieldNumbers(org.apache.lucene.index.FieldInfos.FieldNumbers) LockObtainFailedException(org.apache.lucene.store.LockObtainFailedException) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) MergeInfo(org.apache.lucene.store.MergeInfo) BinaryDocValuesUpdate(org.apache.lucene.index.DocValuesUpdate.BinaryDocValuesUpdate) Lock(org.apache.lucene.store.Lock) LinkedList(java.util.LinkedList) FSDirectory(org.apache.lucene.store.FSDirectory) Bits(org.apache.lucene.util.Bits) ArrayUtil(org.apache.lucene.util.ArrayUtil) Iterator(java.util.Iterator) UnicodeUtil(org.apache.lucene.util.UnicodeUtil) Analyzer(org.apache.lucene.analysis.Analyzer) NumericDocValuesUpdate(org.apache.lucene.index.DocValuesUpdate.NumericDocValuesUpdate) IOUtils(org.apache.lucene.util.IOUtils) IOException(java.io.IOException) Version(org.apache.lucene.util.Version) AtomicLong(java.util.concurrent.atomic.AtomicLong) Constants(org.apache.lucene.util.Constants) StringHelper(org.apache.lucene.util.StringHelper) Field(org.apache.lucene.document.Field) Closeable(java.io.Closeable) Codec(org.apache.lucene.codecs.Codec) TrackingDirectoryWrapper(org.apache.lucene.store.TrackingDirectoryWrapper) FlushInfo(org.apache.lucene.store.FlushInfo) Collections(java.util.Collections) ArrayList(java.util.ArrayList) TrackingDirectoryWrapper(org.apache.lucene.store.TrackingDirectoryWrapper) IOContext(org.apache.lucene.store.IOContext) Bits(org.apache.lucene.util.Bits) MMapDirectory(org.apache.lucene.store.MMapDirectory) Directory(org.apache.lucene.store.Directory) FSDirectory(org.apache.lucene.store.FSDirectory)

Example 4 with TrackingDirectoryWrapper

use of org.apache.lucene.store.TrackingDirectoryWrapper in project lucene-solr by apache.

the class ReadersAndUpdates method writeFieldUpdates.

// Writes field updates (new _X_N updates files) to the directory
public synchronized void writeFieldUpdates(Directory dir, DocValuesFieldUpdates.Container dvUpdates) throws IOException {
    assert Thread.holdsLock(writer);
    assert dvUpdates.any();
    // Do this so we can delete any created files on
    // exception; this saves all codecs from having to do
    // it:
    TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(dir);
    final Map<Integer, Set<String>> newDVFiles = new HashMap<>();
    Set<String> fieldInfosFiles = null;
    FieldInfos fieldInfos = null;
    boolean success = false;
    try {
        final Codec codec = info.info.getCodec();
        // reader could be null e.g. for a just merged segment (from
        // IndexWriter.commitMergedDeletes).
        final SegmentReader reader = this.reader == null ? new SegmentReader(info, writer.segmentInfos.getIndexCreatedVersionMajor(), IOContext.READONCE) : this.reader;
        try {
            // clone FieldInfos so that we can update their dvGen separately from
            // the reader's infos and write them to a new fieldInfos_gen file
            FieldInfos.Builder builder = new FieldInfos.Builder(writer.globalFieldNumberMap);
            // clone FI.attributes as well FI.dvGen
            for (FieldInfo fi : reader.getFieldInfos()) {
                FieldInfo clone = builder.add(fi);
                // copy the stuff FieldInfos.Builder doesn't copy
                for (Entry<String, String> e : fi.attributes().entrySet()) {
                    clone.putAttribute(e.getKey(), e.getValue());
                }
                clone.setDocValuesGen(fi.getDocValuesGen());
            }
            // create new fields or update existing ones to have NumericDV type
            for (String f : dvUpdates.numericDVUpdates.keySet()) {
                FieldInfo fieldInfo = builder.getOrAdd(f);
                fieldInfo.setDocValuesType(DocValuesType.NUMERIC);
            }
            // create new fields or update existing ones to have BinaryDV type
            for (String f : dvUpdates.binaryDVUpdates.keySet()) {
                FieldInfo fieldInfo = builder.getOrAdd(f);
                fieldInfo.setDocValuesType(DocValuesType.BINARY);
            }
            fieldInfos = builder.finish();
            final DocValuesFormat docValuesFormat = codec.docValuesFormat();
            //          System.out.println("[" + Thread.currentThread().getName() + "] RLD.writeFieldUpdates: applying numeric updates; seg=" + info + " updates=" + numericFieldUpdates);
            handleNumericDVUpdates(fieldInfos, dvUpdates.numericDVUpdates, trackingDir, docValuesFormat, reader, newDVFiles);
            //        System.out.println("[" + Thread.currentThread().getName() + "] RAU.writeFieldUpdates: applying binary updates; seg=" + info + " updates=" + dvUpdates.binaryDVUpdates);
            handleBinaryDVUpdates(fieldInfos, dvUpdates.binaryDVUpdates, trackingDir, docValuesFormat, reader, newDVFiles);
            //        System.out.println("[" + Thread.currentThread().getName() + "] RAU.writeFieldUpdates: write fieldInfos; seg=" + info);
            fieldInfosFiles = writeFieldInfosGen(fieldInfos, trackingDir, docValuesFormat, codec.fieldInfosFormat());
        } finally {
            if (reader != this.reader) {
                //          System.out.println("[" + Thread.currentThread().getName() + "] RLD.writeLiveDocs: closeReader " + reader);
                reader.close();
            }
        }
        success = true;
    } finally {
        if (!success) {
            // Advance only the nextWriteFieldInfosGen and nextWriteDocValuesGen, so
            // that a 2nd attempt to write will write to a new file
            info.advanceNextWriteFieldInfosGen();
            info.advanceNextWriteDocValuesGen();
            // Delete any partially created file(s):
            for (String fileName : trackingDir.getCreatedFiles()) {
                IOUtils.deleteFilesIgnoringExceptions(dir, fileName);
            }
        }
    }
    // copy all the updates to mergingUpdates, so they can later be applied to the merged segment
    if (isMerging) {
        for (Entry<String, NumericDocValuesFieldUpdates> e : dvUpdates.numericDVUpdates.entrySet()) {
            DocValuesFieldUpdates updates = mergingDVUpdates.get(e.getKey());
            if (updates == null) {
                mergingDVUpdates.put(e.getKey(), e.getValue());
            } else {
                updates.merge(e.getValue());
            }
        }
        for (Entry<String, BinaryDocValuesFieldUpdates> e : dvUpdates.binaryDVUpdates.entrySet()) {
            DocValuesFieldUpdates updates = mergingDVUpdates.get(e.getKey());
            if (updates == null) {
                mergingDVUpdates.put(e.getKey(), e.getValue());
            } else {
                updates.merge(e.getValue());
            }
        }
    }
    // writing field updates succeeded
    assert fieldInfosFiles != null;
    info.setFieldInfosFiles(fieldInfosFiles);
    // were updated now.
    assert !newDVFiles.isEmpty();
    for (Entry<Integer, Set<String>> e : info.getDocValuesUpdatesFiles().entrySet()) {
        if (!newDVFiles.containsKey(e.getKey())) {
            newDVFiles.put(e.getKey(), e.getValue());
        }
    }
    info.setDocValuesUpdatesFiles(newDVFiles);
    // wrote new files, should checkpoint()
    writer.checkpoint();
    // if there is a reader open, reopen it to reflect the updates
    if (reader != null) {
        SegmentReader newReader = new SegmentReader(info, reader, liveDocs, info.info.maxDoc() - info.getDelCount() - pendingDeleteCount);
        boolean reopened = false;
        try {
            reader.decRef();
            reader = newReader;
            reopened = true;
        } finally {
            if (!reopened) {
                newReader.decRef();
            }
        }
    }
}
Also used : Set(java.util.Set) HashMap(java.util.HashMap) TrackingDirectoryWrapper(org.apache.lucene.store.TrackingDirectoryWrapper) DocValuesFormat(org.apache.lucene.codecs.DocValuesFormat) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Codec(org.apache.lucene.codecs.Codec)

Example 5 with TrackingDirectoryWrapper

use of org.apache.lucene.store.TrackingDirectoryWrapper in project lucene-solr by apache.

the class ReadersAndUpdates method writeLiveDocs.

// Commit live docs (writes new _X_N.del files) and field updates (writes new
// _X_N updates files) to the directory; returns true if it wrote any file
// and false if there were no new deletes or updates to write:
public synchronized boolean writeLiveDocs(Directory dir) throws IOException {
    assert Thread.holdsLock(writer);
    //System.out.println("rld.writeLiveDocs seg=" + info + " pendingDelCount=" + pendingDeleteCount + " numericUpdates=" + numericUpdates);
    if (pendingDeleteCount == 0) {
        return false;
    }
    // We have new deletes
    assert liveDocs.length() == info.info.maxDoc();
    // Do this so we can delete any created files on
    // exception; this saves all codecs from having to do
    // it:
    TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(dir);
    // We can write directly to the actual name (vs to a
    // .tmp & renaming it) because the file is not live
    // until segments file is written:
    boolean success = false;
    try {
        Codec codec = info.info.getCodec();
        codec.liveDocsFormat().writeLiveDocs((MutableBits) liveDocs, trackingDir, info, pendingDeleteCount, IOContext.DEFAULT);
        success = true;
    } finally {
        if (!success) {
            // Advance only the nextWriteDelGen so that a 2nd
            // attempt to write will write to a new file
            info.advanceNextWriteDelGen();
            // Delete any partially created file(s):
            for (String fileName : trackingDir.getCreatedFiles()) {
                IOUtils.deleteFilesIgnoringExceptions(dir, fileName);
            }
        }
    }
    // If we hit an exc in the line above (eg disk full)
    // then info's delGen remains pointing to the previous
    // (successfully written) del docs:
    info.advanceDelGen();
    info.setDelCount(info.getDelCount() + pendingDeleteCount);
    pendingDeleteCount = 0;
    return true;
}
Also used : Codec(org.apache.lucene.codecs.Codec) TrackingDirectoryWrapper(org.apache.lucene.store.TrackingDirectoryWrapper)

Aggregations

TrackingDirectoryWrapper (org.apache.lucene.store.TrackingDirectoryWrapper)10 IOContext (org.apache.lucene.store.IOContext)7 Codec (org.apache.lucene.codecs.Codec)5 FlushInfo (org.apache.lucene.store.FlushInfo)5 IOException (java.io.IOException)3 MergeInfo (org.apache.lucene.store.MergeInfo)3 ArrayList (java.util.ArrayList)2 HashMap (java.util.HashMap)2 Set (java.util.Set)2 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)2 DocValuesConsumer (org.apache.lucene.codecs.DocValuesConsumer)2 Sort (org.apache.lucene.search.Sort)2 Closeable (java.io.Closeable)1 Arrays (java.util.Arrays)1 Collection (java.util.Collection)1 Collections (java.util.Collections)1 Date (java.util.Date)1 HashSet (java.util.HashSet)1 Iterator (java.util.Iterator)1 LinkedList (java.util.LinkedList)1