use of org.apache.lucene.index.FilterMergePolicy in project OpenSearch by opensearch-project.
the class EvilInternalEngineTests method testOutOfMemoryErrorWhileMergingIsRethrownAndIsUncaught.
public void testOutOfMemoryErrorWhileMergingIsRethrownAndIsUncaught() throws IOException, InterruptedException {
engine.close();
final AtomicReference<Throwable> maybeFatal = new AtomicReference<>();
final CountDownLatch latch = new CountDownLatch(1);
final Thread.UncaughtExceptionHandler uncaughtExceptionHandler = Thread.getDefaultUncaughtExceptionHandler();
try {
/*
* We want to test that the out of memory error thrown from the merge goes uncaught; this gives us confidence that an out of
* memory error thrown while merging will lead to the node being torn down.
*/
Thread.setDefaultUncaughtExceptionHandler((t, e) -> {
maybeFatal.set(e);
latch.countDown();
});
final AtomicReference<List<SegmentCommitInfo>> segmentsReference = new AtomicReference<>();
final FilterMergePolicy mergePolicy = new FilterMergePolicy(newMergePolicy()) {
@Override
public MergeSpecification findForcedMerges(SegmentInfos segmentInfos, int maxSegmentCount, Map<SegmentCommitInfo, Boolean> segmentsToMerge, MergeContext mergeContext) throws IOException {
final List<SegmentCommitInfo> segments = segmentsReference.get();
if (segments != null) {
final MergeSpecification spec = new MergeSpecification();
spec.add(new OneMerge(segments));
return spec;
}
return super.findForcedMerges(segmentInfos, maxSegmentCount, segmentsToMerge, mergeContext);
}
@Override
public MergeSpecification findMerges(MergeTrigger mergeTrigger, SegmentInfos segmentInfos, MergeContext mergeContext) throws IOException {
final List<SegmentCommitInfo> segments = segmentsReference.get();
if (segments != null) {
final MergeSpecification spec = new MergeSpecification();
spec.add(new OneMerge(segments));
return spec;
}
return super.findMerges(mergeTrigger, segmentInfos, mergeContext);
}
};
try (Engine e = createEngine(defaultSettings, store, primaryTranslogDir, mergePolicy, (directory, iwc) -> {
final MergeScheduler mergeScheduler = iwc.getMergeScheduler();
assertNotNull(mergeScheduler);
iwc.setMergeScheduler(new FilterMergeScheduler(mergeScheduler) {
@Override
public void merge(MergeSource mergeSource, MergeTrigger trigger) throws IOException {
final FilterMergeSource wrappedMergeSource = new FilterMergeSource(mergeSource) {
@Override
public MergePolicy.OneMerge getNextMerge() {
synchronized (mergeSource) {
/*
* This will be called when we flush when we will not be ready to return the segments.
* After the segments are on disk, we can only return them from here once or the merge
* scheduler will be stuck in a loop repeatedly peeling off the same segments to schedule
* for merging.
*/
if (segmentsReference.get() == null) {
return super.getNextMerge();
} else {
final List<SegmentCommitInfo> segments = segmentsReference.getAndSet(null);
return new MergePolicy.OneMerge(segments);
}
}
}
@Override
public void merge(MergePolicy.OneMerge merge) {
throw new OutOfMemoryError("640K ought to be enough for anybody");
}
};
super.merge(wrappedMergeSource, trigger);
}
});
return new IndexWriter(directory, iwc);
}, null, null)) {
// force segments to exist on disk
final ParsedDocument doc1 = testParsedDocument("1", null, testDocumentWithTextField(), B_1, null);
e.index(indexForDoc(doc1));
e.flush();
final List<SegmentCommitInfo> segments = StreamSupport.stream(e.getLastCommittedSegmentInfos().spliterator(), false).collect(Collectors.toList());
segmentsReference.set(segments);
// trigger a background merge that will be managed by the concurrent merge scheduler
e.forceMerge(randomBoolean(), 0, false, false, false, UUIDs.randomBase64UUID());
/*
* Merging happens in the background on a merge thread, and the maybeDie handler is invoked on yet another thread; we have
* to wait for these events to finish.
*/
latch.await();
assertNotNull(maybeFatal.get());
assertThat(maybeFatal.get(), instanceOf(OutOfMemoryError.class));
assertThat(maybeFatal.get(), hasToString(containsString("640K ought to be enough for anybody")));
}
} finally {
Thread.setDefaultUncaughtExceptionHandler(uncaughtExceptionHandler);
}
}
use of org.apache.lucene.index.FilterMergePolicy in project OpenSearch by opensearch-project.
the class FsRepositoryTests method deleteRandomDoc.
private void deleteRandomDoc(Directory directory) throws IOException {
try (IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(random(), new MockAnalyzer(random())).setCodec(TestUtil.getDefaultCodec()).setMergePolicy(new FilterMergePolicy(NoMergePolicy.INSTANCE) {
@Override
public boolean keepFullyDeletedSegment(IOSupplier<CodecReader> readerIOSupplier) {
return true;
}
}))) {
final int numDocs = writer.getDocStats().numDocs;
writer.deleteDocuments(new Term("id", "" + randomIntBetween(0, writer.getDocStats().numDocs - 1)));
writer.commit();
assertEquals(writer.getDocStats().numDocs, numDocs - 1);
}
}
Aggregations