use of org.apache.lucene.index.TieredMergePolicy in project jackrabbit-oak by apache.
the class IndexDefinition method createMergePolicy.
private MergePolicy createMergePolicy() {
String mmp = System.getProperty("oak.lucene.cmmp");
if (mmp != null) {
return new CommitMitigatingTieredMergePolicy();
}
String mergePolicyName = getOptionalValue(definition, LuceneIndexConstants.MERGE_POLICY_NAME, null);
MergePolicy mergePolicy = null;
if (mergePolicyName != null) {
if (mergePolicyName.equalsIgnoreCase("no")) {
mergePolicy = NoMergePolicy.COMPOUND_FILES;
} else if (mergePolicyName.equalsIgnoreCase("mitigated")) {
mergePolicy = new CommitMitigatingTieredMergePolicy();
} else if (mergePolicyName.equalsIgnoreCase("tiered") || mergePolicyName.equalsIgnoreCase("default")) {
mergePolicy = new TieredMergePolicy();
} else if (mergePolicyName.equalsIgnoreCase("logbyte")) {
mergePolicy = new LogByteSizeMergePolicy();
} else if (mergePolicyName.equalsIgnoreCase("logdoc")) {
mergePolicy = new LogDocMergePolicy();
}
}
if (mergePolicy == null) {
mergePolicy = new TieredMergePolicy();
}
return mergePolicy;
}
use of org.apache.lucene.index.TieredMergePolicy in project crate by crate.
the class InternalEngineTests method testSegmentsWithMergeFlag.
@Test
public void testSegmentsWithMergeFlag() throws Exception {
try (Store store = createStore();
Engine engine = createEngine(defaultSettings, store, createTempDir(), new TieredMergePolicy())) {
ParsedDocument doc = testParsedDocument("1", null, testDocument(), B_1, null);
Engine.Index index = indexForDoc(doc);
engine.index(index);
engine.flush();
assertThat(engine.segments(false).size(), equalTo(1));
index = indexForDoc(testParsedDocument("2", null, testDocument(), B_1, null));
engine.index(index);
engine.flush();
List<Segment> segments = engine.segments(false);
assertThat(segments.size(), equalTo(2));
for (Segment segment : segments) {
assertThat(segment.getMergeId(), nullValue());
}
index = indexForDoc(testParsedDocument("3", null, testDocument(), B_1, null));
engine.index(index);
engine.flush();
segments = engine.segments(false);
assertThat(segments.size(), equalTo(3));
for (Segment segment : segments) {
assertThat(segment.getMergeId(), nullValue());
}
index = indexForDoc(doc);
engine.index(index);
engine.flush();
final long gen1 = store.readLastCommittedSegmentsInfo().getGeneration();
// now, optimize and wait for merges, see that we have no merge flag
engine.forceMerge(true, 1, false, false, false, UUIDs.randomBase64UUID());
for (Segment segment : engine.segments(false)) {
assertThat(segment.getMergeId(), nullValue());
}
// we could have multiple underlying merges, so the generation may increase more than once
assertTrue(store.readLastCommittedSegmentsInfo().getGeneration() > gen1);
final boolean flush = randomBoolean();
final long gen2 = store.readLastCommittedSegmentsInfo().getGeneration();
engine.forceMerge(flush, 1, false, false, false, UUIDs.randomBase64UUID());
for (Segment segment : engine.segments(false)) {
assertThat(segment.getMergeId(), nullValue());
}
if (flush) {
// we should have had just 1 merge, so last generation should be exact
assertEquals(gen2, store.readLastCommittedSegmentsInfo().getLastGeneration());
}
}
}
use of org.apache.lucene.index.TieredMergePolicy in project elasticsearch by elastic.
the class InternalEngineTests method testSegmentsWithMergeFlag.
public void testSegmentsWithMergeFlag() throws Exception {
try (Store store = createStore();
Engine engine = createEngine(defaultSettings, store, createTempDir(), new TieredMergePolicy())) {
ParsedDocument doc = testParsedDocument("1", "test", null, testDocument(), B_1, null);
Engine.Index index = indexForDoc(doc);
engine.index(index);
engine.flush();
assertThat(engine.segments(false).size(), equalTo(1));
index = indexForDoc(testParsedDocument("2", "test", null, testDocument(), B_1, null));
engine.index(index);
engine.flush();
List<Segment> segments = engine.segments(false);
assertThat(segments.size(), equalTo(2));
for (Segment segment : segments) {
assertThat(segment.getMergeId(), nullValue());
}
index = indexForDoc(testParsedDocument("3", "test", null, testDocument(), B_1, null));
engine.index(index);
engine.flush();
segments = engine.segments(false);
assertThat(segments.size(), equalTo(3));
for (Segment segment : segments) {
assertThat(segment.getMergeId(), nullValue());
}
index = indexForDoc(doc);
engine.index(index);
engine.flush();
final long gen1 = store.readLastCommittedSegmentsInfo().getGeneration();
// now, optimize and wait for merges, see that we have no merge flag
engine.forceMerge(true);
for (Segment segment : engine.segments(false)) {
assertThat(segment.getMergeId(), nullValue());
}
// we could have multiple underlying merges, so the generation may increase more than once
assertTrue(store.readLastCommittedSegmentsInfo().getGeneration() > gen1);
final boolean flush = randomBoolean();
final long gen2 = store.readLastCommittedSegmentsInfo().getGeneration();
engine.forceMerge(flush);
for (Segment segment : engine.segments(false)) {
assertThat(segment.getMergeId(), nullValue());
}
if (flush) {
// we should have had just 1 merge, so last generation should be exact
assertEquals(gen2, store.readLastCommittedSegmentsInfo().getLastGeneration());
}
}
}
use of org.apache.lucene.index.TieredMergePolicy in project lucene-solr by apache.
the class TestMergePolicyConfig method testTieredMergePolicyConfig.
public void testTieredMergePolicyConfig() throws Exception {
final boolean expectCFS = Boolean.parseBoolean(System.getProperty("useCompoundFile"));
initCore("solrconfig-tieredmergepolicyfactory.xml", "schema-minimal.xml");
IndexWriterConfig iwc = solrConfig.indexConfig.toIndexWriterConfig(h.getCore());
assertEquals(expectCFS, iwc.getUseCompoundFile());
TieredMergePolicy tieredMP = assertAndCast(TieredMergePolicy.class, iwc.getMergePolicy());
// set by legacy <mergeFactor> setting
assertEquals(7, tieredMP.getMaxMergeAtOnce());
// mp-specific setters
assertEquals(19, tieredMP.getMaxMergeAtOnceExplicit());
assertEquals(0.1D, tieredMP.getNoCFSRatio(), 0.0D);
// make sure we overrode segmentsPerTier
// (split from maxMergeAtOnce out of mergeFactor)
assertEquals(9D, tieredMP.getSegmentsPerTier(), 0.001);
assertCommitSomeNewDocs();
// even though we have a single segment (which is 100% of the size of
// the index which is higher then our 0.6D threshold) the
// compound ratio doesn't matter because the segment was never merged
assertCompoundSegments(h.getCore(), expectCFS);
assertCommitSomeNewDocs();
assertNumSegments(h.getCore(), 2);
assertCompoundSegments(h.getCore(), expectCFS);
assertU(optimize());
assertNumSegments(h.getCore(), 1);
// we've now forced a merge, and the MP ratio should be in play
assertCompoundSegments(h.getCore(), false);
}
use of org.apache.lucene.index.TieredMergePolicy in project lucene-solr by apache.
the class TestMergePolicyConfig method testSetNoCFSMergePolicyConfig.
public void testSetNoCFSMergePolicyConfig() throws Exception {
final boolean useCompoundFile = random().nextBoolean();
System.setProperty("testSetNoCFSMergePolicyConfig.useCompoundFile", String.valueOf(useCompoundFile));
try {
initCore("solrconfig-mergepolicyfactory-nocfs.xml", "schema-minimal.xml");
IndexWriterConfig iwc = solrConfig.indexConfig.toIndexWriterConfig(h.getCore());
assertEquals(useCompoundFile, iwc.getUseCompoundFile());
TieredMergePolicy tieredMP = assertAndCast(TieredMergePolicy.class, iwc.getMergePolicy());
assertEquals(0.5D, tieredMP.getNoCFSRatio(), 0.0D);
} finally {
System.getProperties().remove("testSetNoCFSMergePolicyConfig.useCompoundFile");
}
}
Aggregations