Search in sources :

Example 1 with MergeTrigger

use of org.apache.lucene.index.MergeTrigger in project OpenSearch by opensearch-project.

the class EvilInternalEngineTests method testOutOfMemoryErrorWhileMergingIsRethrownAndIsUncaught.

public void testOutOfMemoryErrorWhileMergingIsRethrownAndIsUncaught() throws IOException, InterruptedException {
    engine.close();
    final AtomicReference<Throwable> maybeFatal = new AtomicReference<>();
    final CountDownLatch latch = new CountDownLatch(1);
    final Thread.UncaughtExceptionHandler uncaughtExceptionHandler = Thread.getDefaultUncaughtExceptionHandler();
    try {
        /*
             * We want to test that the out of memory error thrown from the merge goes uncaught; this gives us confidence that an out of
             * memory error thrown while merging will lead to the node being torn down.
             */
        Thread.setDefaultUncaughtExceptionHandler((t, e) -> {
            maybeFatal.set(e);
            latch.countDown();
        });
        final AtomicReference<List<SegmentCommitInfo>> segmentsReference = new AtomicReference<>();
        final FilterMergePolicy mergePolicy = new FilterMergePolicy(newMergePolicy()) {

            @Override
            public MergeSpecification findForcedMerges(SegmentInfos segmentInfos, int maxSegmentCount, Map<SegmentCommitInfo, Boolean> segmentsToMerge, MergeContext mergeContext) throws IOException {
                final List<SegmentCommitInfo> segments = segmentsReference.get();
                if (segments != null) {
                    final MergeSpecification spec = new MergeSpecification();
                    spec.add(new OneMerge(segments));
                    return spec;
                }
                return super.findForcedMerges(segmentInfos, maxSegmentCount, segmentsToMerge, mergeContext);
            }

            @Override
            public MergeSpecification findMerges(MergeTrigger mergeTrigger, SegmentInfos segmentInfos, MergeContext mergeContext) throws IOException {
                final List<SegmentCommitInfo> segments = segmentsReference.get();
                if (segments != null) {
                    final MergeSpecification spec = new MergeSpecification();
                    spec.add(new OneMerge(segments));
                    return spec;
                }
                return super.findMerges(mergeTrigger, segmentInfos, mergeContext);
            }
        };
        try (Engine e = createEngine(defaultSettings, store, primaryTranslogDir, mergePolicy, (directory, iwc) -> {
            final MergeScheduler mergeScheduler = iwc.getMergeScheduler();
            assertNotNull(mergeScheduler);
            iwc.setMergeScheduler(new FilterMergeScheduler(mergeScheduler) {

                @Override
                public void merge(MergeSource mergeSource, MergeTrigger trigger) throws IOException {
                    final FilterMergeSource wrappedMergeSource = new FilterMergeSource(mergeSource) {

                        @Override
                        public MergePolicy.OneMerge getNextMerge() {
                            synchronized (mergeSource) {
                                /*
                                             * This will be called when we flush when we will not be ready to return the segments.
                                             * After the segments are on disk, we can only return them from here once or the merge
                                             * scheduler will be stuck in a loop repeatedly peeling off the same segments to schedule
                                             * for merging.
                                             */
                                if (segmentsReference.get() == null) {
                                    return super.getNextMerge();
                                } else {
                                    final List<SegmentCommitInfo> segments = segmentsReference.getAndSet(null);
                                    return new MergePolicy.OneMerge(segments);
                                }
                            }
                        }

                        @Override
                        public void merge(MergePolicy.OneMerge merge) {
                            throw new OutOfMemoryError("640K ought to be enough for anybody");
                        }
                    };
                    super.merge(wrappedMergeSource, trigger);
                }
            });
            return new IndexWriter(directory, iwc);
        }, null, null)) {
            // force segments to exist on disk
            final ParsedDocument doc1 = testParsedDocument("1", null, testDocumentWithTextField(), B_1, null);
            e.index(indexForDoc(doc1));
            e.flush();
            final List<SegmentCommitInfo> segments = StreamSupport.stream(e.getLastCommittedSegmentInfos().spliterator(), false).collect(Collectors.toList());
            segmentsReference.set(segments);
            // trigger a background merge that will be managed by the concurrent merge scheduler
            e.forceMerge(randomBoolean(), 0, false, false, false, UUIDs.randomBase64UUID());
            /*
                 * Merging happens in the background on a merge thread, and the maybeDie handler is invoked on yet another thread; we have
                 * to wait for these events to finish.
                 */
            latch.await();
            assertNotNull(maybeFatal.get());
            assertThat(maybeFatal.get(), instanceOf(OutOfMemoryError.class));
            assertThat(maybeFatal.get(), hasToString(containsString("640K ought to be enough for anybody")));
        }
    } finally {
        Thread.setDefaultUncaughtExceptionHandler(uncaughtExceptionHandler);
    }
}
Also used : SegmentInfos(org.apache.lucene.index.SegmentInfos) SegmentCommitInfo(org.apache.lucene.index.SegmentCommitInfo) MergeTrigger(org.apache.lucene.index.MergeTrigger) ParsedDocument(org.opensearch.index.mapper.ParsedDocument) FilterMergePolicy(org.apache.lucene.index.FilterMergePolicy) MergePolicy(org.apache.lucene.index.MergePolicy) List(java.util.List) MergeScheduler(org.apache.lucene.index.MergeScheduler) AtomicReference(java.util.concurrent.atomic.AtomicReference) IOException(java.io.IOException) CountDownLatch(java.util.concurrent.CountDownLatch) FilterMergePolicy(org.apache.lucene.index.FilterMergePolicy) IndexWriter(org.apache.lucene.index.IndexWriter) Map(java.util.Map)

Aggregations

IOException (java.io.IOException)1 List (java.util.List)1 Map (java.util.Map)1 CountDownLatch (java.util.concurrent.CountDownLatch)1 AtomicReference (java.util.concurrent.atomic.AtomicReference)1 FilterMergePolicy (org.apache.lucene.index.FilterMergePolicy)1 IndexWriter (org.apache.lucene.index.IndexWriter)1 MergePolicy (org.apache.lucene.index.MergePolicy)1 MergeScheduler (org.apache.lucene.index.MergeScheduler)1 MergeTrigger (org.apache.lucene.index.MergeTrigger)1 SegmentCommitInfo (org.apache.lucene.index.SegmentCommitInfo)1 SegmentInfos (org.apache.lucene.index.SegmentInfos)1 ParsedDocument (org.opensearch.index.mapper.ParsedDocument)1